From 7d3051a3d597dfb4cafaf9030fe9029b8b51b589 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 16 May 2024 17:25:55 +0100 Subject: [PATCH 01/53] Add a WeightedCost --- examples/scripts/spm_weighted_cost.py | 66 ++++++++++++++++++ pybop/__init__.py | 2 +- pybop/costs/_likelihoods.py | 38 ++++++----- pybop/costs/base_cost.py | 96 +++++++++++++++++++++++++++ pybop/costs/design_costs.py | 2 + pybop/costs/fitting_costs.py | 53 ++++++++++----- 6 files changed, 223 insertions(+), 34 deletions(-) create mode 100644 examples/scripts/spm_weighted_cost.py diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py new file mode 100644 index 000000000..4bd8380e9 --- /dev/null +++ b/examples/scripts/spm_weighted_cost.py @@ -0,0 +1,66 @@ +import numpy as np + +import pybop + +# Parameter set and model definition +parameter_set = pybop.ParameterSet.pybamm("Chen2020") +model = pybop.lithium_ion.SPM(parameter_set=parameter_set) + +# Fitting parameters +parameters = [ + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.68, 0.05), + bounds=[0.5, 0.8], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.58, 0.05), + bounds=[0.4, 0.7], + ), +] + +# Generate data +sigma = 0.001 +t_eval = np.arange(0, 900, 3) +values = model.predict(t_eval=t_eval) +corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval)) + +# Form dataset +dataset = pybop.Dataset( + { + "Time [s]": t_eval, + "Current function [A]": values["Current [A]"].data, + "Voltage [V]": corrupt_values, + } +) + +# Generate problem, cost function, and optimisation class +problem = pybop.FittingProblem(model, parameters, dataset) +cost1 = pybop.SumSquaredError(problem) +cost2 = pybop.RootMeanSquaredError(problem) +weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) + +for cost in [weighted_cost, cost1, cost2]: + optim = pybop.Optimisation(cost, optimiser=pybop.IRPropMin) + optim.set_max_iterations(60) + + # Run the optimisation + x, final_cost = optim.run() + print( + "True parameters:", + [ + parameters[0].true_value, + parameters[1].true_value, + ], + ) + print("Estimated parameters:", x) + + # Plot the timeseries output + pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") + + # Plot convergence + pybop.plot_convergence(optim) + + # Plot the cost landscape with optimisation path + pybop.plot2d(optim, steps=15) diff --git a/pybop/__init__.py b/pybop/__init__.py index 034dcb4a4..6419f8f42 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -55,7 +55,7 @@ # # Cost function class # -from .costs.base_cost import BaseCost +from .costs.base_cost import BaseCost, WeightedCost from .costs.fitting_costs import ( RootMeanSquaredError, SumSquaredError, diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 91374cc07..848c34c2d 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -62,20 +62,22 @@ def __init__(self, problem, sigma): def _evaluate(self, x, grad=None): """ - Calls the problem.evaluate method and calculates - the log-likelihood + Calculates the log-likelihood. """ - y = self.problem.evaluate(x) - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): return -np.float64(np.inf) # prediction doesn't match target e = np.array( [ np.sum( self._offset - + self._multip * np.sum((self._target[signal] - y[signal]) ** 2) + + self._multip + * np.sum( + (self._target[signal] - self._current_prediction[signal]) ** 2 + ) ) for signal in self.signal ] @@ -88,20 +90,26 @@ def _evaluate(self, x, grad=None): def _evaluateS1(self, x, grad=None): """ - Calls the problem.evaluateS1 method and calculates - the log-likelihood + Calculates the log-likelihood and sensitivities. """ - y, dy = self.problem.evaluateS1(x) - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): likelihood = np.float64(np.inf) dl = self._dl * np.ones(self.n_parameters) return -likelihood, -dl - r = np.array([self._target[signal] - y[signal] for signal in self.signal]) + r = np.array( + [ + self._target[signal] - self._current_prediction[signal] + for signal in self.signal + ] + ) likelihood = self._evaluate(x) - dl = np.sum((self.sigma2 * np.sum((r * dy.T), axis=2)), axis=1) + dl = np.sum( + (self.sigma2 * np.sum((r * self._current_sensitivities.T), axis=2)), axis=1 + ) return likelihood, dl @@ -119,6 +127,7 @@ def __init__(self, problem): super(GaussianLogLikelihood, self).__init__(problem) self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) self._dl = np.ones(self._n_parameters + self.n_outputs) + self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, x, grad=None): """ @@ -161,8 +170,7 @@ def _evaluate(self, x, grad=None): def _evaluateS1(self, x, grad=None): """ - Calls the problem.evaluateS1 method and calculates - the log-likelihood + Calculates the log-likelihood and sensitivities. """ sigma = np.asarray(x[-self.n_outputs :]) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index f3ac95170..28fdff7f7 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -39,6 +39,7 @@ def __init__(self, problem=None, sigma=None): self.bounds = None self.sigma0 = sigma self._minimising = True + self._fixed_problem = True if isinstance(self.problem, BaseProblem): self._target = problem._target self.parameters = problem.parameters @@ -82,6 +83,9 @@ def evaluate(self, x, grad=None): If an error occurs during the calculation of the cost. """ try: + if self._fixed_problem: + self._current_prediction = self.problem.evaluate(x) + if self._minimising: return self._evaluate(x, grad) else: # minimise the negative cost @@ -140,6 +144,11 @@ def evaluateS1(self, x): If an error occurs during the calculation of the cost or gradient. """ try: + if self._fixed_problem: + self._current_prediction, self._current_sensitivities = ( + self.problem.evaluateS1(x) + ) + if self._minimising: return self._evaluateS1(x) else: # minimise the negative cost @@ -173,3 +182,90 @@ def _evaluateS1(self, x): If the method has not been implemented by the subclass. """ raise NotImplementedError + + +class WeightedCost(BaseCost): + """ + A subclass for constructing a linear combination of cost functions as + a single weighted cost function. + + Inherits all parameters and attributes from ``BaseCost``. + """ + + def __init__(self, cost_list, weights=None): + self.cost_list = cost_list + self.weights = weights + self._different_problems = False + + if not isinstance(self.cost_list, list): + raise TypeError("Expected a list of costs.") + if self.weights is None: + self.weights = np.ones(len(cost_list)) + elif isinstance(self.weights, list): + self.weights = np.array(self.weights) + if not isinstance(self.weights, np.ndarray): + raise TypeError( + "Expected a list or array of weights the same length as cost_list." + ) + if not len(self.weights) == len(self.cost_list): + raise ValueError( + "Expected a list or array of weights the same length as cost_list." + ) + + # Check if all costs depend on the same problem + for cost in self.cost_list: + if hasattr(cost, "problem") and ( + not cost._fixed_problem or cost.problem is not self.cost_list[0].problem + ): + self._different_problems = True + + if not self._different_problems: + super(WeightedCost, self).__init__(self.cost_list[0].problem) + self._fixed_problem = False + else: + super(WeightedCost, self).__init__() + + def _evaluate(self, x, grad=None): + """ + Calculate the weighted cost for a given set of parameters. + """ + e = np.empty_like(self.cost_list) + + if not self._different_problems: + current_prediction = self.problem.evaluate(x) + + for i, cost in enumerate(self.cost_list): + if self._different_problems: + cost._current_prediction = cost.problem.evaluate(x) + else: + cost._current_prediction = current_prediction + e[i] = cost._evaluate(x, grad) + + return np.dot(e, self.weights) + + def _evaluateS1(self, x): + """ + Compute the cost and its gradient with respect to the parameters. + """ + e = np.empty_like(self.cost_list) + de = np.empty((len(self.parameters), len(self.cost_list))) + + if not self._different_problems: + current_prediction, current_sensitivities = self.problem.evaluateS1(x) + + for i, cost in enumerate(self.cost_list): + if self._different_problems: + cost._current_prediction, cost._current_sensitivities = ( + cost.problem.evaluateS1(x) + ) + else: + cost._current_prediction, cost._current_sensitivities = ( + current_prediction, + current_sensitivities, + ) + e[i], de[:, i] = cost._evaluateS1(x) + + e = np.dot(e, self.weights) + de = np.dot(de, self.weights) + + return e, de diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index b02940bf0..aff621910 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -98,6 +98,7 @@ class GravimetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super(GravimetricEnergyDensity, self).__init__(problem, update_capacity) + self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, x, grad=None): """ @@ -157,6 +158,7 @@ class VolumetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super(VolumetricEnergyDensity, self).__init__(problem, update_capacity) + self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, x, grad=None): """ diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 0665454b0..24c78fff9 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -41,15 +41,19 @@ def _evaluate(self, x, grad=None): The root mean square error. """ - prediction = self.problem.evaluate(x) - for key in self.signal: - if len(prediction.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): return np.float64(np.inf) # prediction doesn't match target e = np.array( [ - np.sqrt(np.mean((prediction[signal] - self._target[signal]) ** 2)) + np.sqrt( + np.mean( + (self._current_prediction[signal] - self._target[signal]) ** 2 + ) + ) for signal in self.signal ] ) @@ -79,18 +83,24 @@ def _evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(x) - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): e = np.float64(np.inf) de = self._de * np.ones(self.n_parameters) return e, de - r = np.array([y[signal] - self._target[signal] for signal in self.signal]) + r = np.array( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.sqrt(np.mean(r**2, axis=1)) - de = np.mean((r * dy.T), axis=2) / ( - np.sqrt(np.mean((r * dy.T) ** 2, axis=2)) + np.finfo(float).eps + de = np.mean((r * self._current_sensitivities.T), axis=2) / ( + np.sqrt(np.mean((r * self._current_sensitivities.T) ** 2, axis=2)) + + np.finfo(float).eps ) if self.n_outputs == 1: @@ -155,15 +165,15 @@ def _evaluate(self, x, grad=None): float The sum of squared errors. """ - prediction = self.problem.evaluate(x) - for key in self.signal: - if len(prediction.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): return np.float64(np.inf) # prediction doesn't match target e = np.array( [ - np.sum(((prediction[signal] - self._target[signal]) ** 2)) + np.sum(((self._current_prediction[signal] - self._target[signal]) ** 2)) for signal in self.signal ] ) @@ -192,16 +202,22 @@ def _evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(x) for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): e = np.float64(np.inf) de = self._de * np.ones(self.n_parameters) return e, de - r = np.array([y[signal] - self._target[signal] for signal in self.signal]) + r = np.array( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.sum(np.sum(r**2, axis=0), axis=0) - de = 2 * np.sum(np.sum((r * dy.T), axis=2), axis=1) + de = 2 * np.sum(np.sum((r * self._current_sensitivities.T), axis=2), axis=1) return e, de @@ -235,6 +251,7 @@ class ObserverCost(BaseCost): def __init__(self, observer: Observer): super().__init__(problem=observer) self._observer = observer + self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, x, grad=None): """ From 7898e8868ae55e2a853728998fd3c1465b7c52e6 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 16 May 2024 19:06:48 +0100 Subject: [PATCH 02/53] Fix setting --- pybop/costs/base_cost.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 28fdff7f7..b6fa4885d 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -221,9 +221,9 @@ def __init__(self, cost_list, weights=None): if not self._different_problems: super(WeightedCost, self).__init__(self.cost_list[0].problem) - self._fixed_problem = False else: super(WeightedCost, self).__init__() + self._fixed_problem = False def _evaluate(self, x, grad=None): """ From 7a9bcc4f7d59be349c12630826bdaed56b62133f Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 16 May 2024 19:08:14 +0100 Subject: [PATCH 03/53] Add tests --- tests/unit/test_cost.py | 50 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 6ffeb58ea..3ed5245e6 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -1,3 +1,5 @@ +from copy import copy + import numpy as np import pytest @@ -227,3 +229,51 @@ def test_energy_density_costs( # Compute after updating nominal capacity cost = cost_class(problem, update_capacity=True) cost([0.4]) + + @pytest.mark.unit + def test_weighted_cost(self, problem, x0): + cost1 = pybop.SumSquaredError(problem) + cost2 = pybop.RootMeanSquaredError(problem) + + # Test with and without weights + weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2]) + np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) + weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 1]) + np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) + weighted_cost = pybop.WeightedCost( + cost_list=[cost1, cost2], weights=np.array([1, 1]) + ) + np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) + with pytest.raises( + TypeError, + match="Expected a list or array of weights the same length as cost_list.", + ): + weighted_cost = pybop.WeightedCost( + cost_list=[cost1, cost2], weights="Invalid string" + ) + with pytest.raises( + ValueError, + match="Expected a list or array of weights the same length as cost_list.", + ): + weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1]) + + # Test with and without different problems + weighted_cost_2 = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) + assert weighted_cost_2._different_problems is False + assert weighted_cost_2.problem is problem + assert weighted_cost_2(x0) >= 0 + + cost3 = pybop.RootMeanSquaredError(copy(problem)) + weighted_cost_3 = pybop.WeightedCost(cost_list=[cost1, cost3], weights=[1, 100]) + assert weighted_cost_3._different_problems is True + assert weighted_cost_3.problem is None + assert weighted_cost_3(x0) >= 0 + + np.testing.assert_allclose( + weighted_cost_2._evaluate(x0), weighted_cost_3._evaluate(x0), atol=1e-5 + ) + weighted_cost_3.parameters = problem.parameters + errors_2, sensitivities_2 = weighted_cost_2._evaluateS1(x0) + errors_3, sensitivities_3 = weighted_cost_3._evaluateS1(x0) + np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) + np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) From a1c2ec6c15754e06a962864b234bcd83184fa399 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 23 May 2024 17:26:26 +0100 Subject: [PATCH 04/53] Update base_cost.py --- pybop/costs/base_cost.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 9c2ec6926..a734ccbd5 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -38,7 +38,7 @@ def __init__(self, problem=None, sigma=None): self.x0 = None self.bounds = None self.sigma0 = sigma - self._fixed_problem = True + self._fixed_problem = False if isinstance(self.problem, BaseProblem): self._target = problem._target self.parameters = problem.parameters @@ -48,6 +48,7 @@ def __init__(self, problem=None, sigma=None): self.signal = problem.signal self._n_parameters = problem.n_parameters self.sigma0 = sigma or problem.sigma0 or np.zeros(self._n_parameters) + self._fixed_problem = True @property def n_parameters(self): From 501064d37bd5b7c2a4c3444d78277707cf82a41d Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 24 May 2024 17:10:01 +0100 Subject: [PATCH 05/53] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e26270dc..27e3b8ce5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#327](https://github.com/pybop-team/PyBOP/issues/327) - Adds the `WeightedCost` subclass, defines when to evaluate a problem and adds the `spm_weighted_cost` example script. - [#236](https://github.com/pybop-team/PyBOP/issues/236) - Restructures the optimiser classes, adds a new optimisation API through direct construction and keyword arguments, and fixes the setting of `max_iterations`, and `_minimising`. Introduces `pybop.BaseOptimiser`, `pybop.BasePintsOptimiser`, and `pybop.BaseSciPyOptimiser` classes. - [#321](https://github.com/pybop-team/PyBOP/pull/321) - Updates Prior classes with BaseClass, adds a `problem.sample_initial_conditions` method to improve stability of SciPy.Minimize optimiser. - [#249](https://github.com/pybop-team/PyBOP/pull/249) - Add WeppnerHuggins model and GITT example. From 403a280c210fd37006ae165b453238a257d6a804 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 30 May 2024 17:22:31 +0100 Subject: [PATCH 06/53] feat: initial transformation class --- examples/scripts/spm_CMAES.py | 4 +- examples/standalone/cost.py | 4 +- pybop/__init__.py | 6 + pybop/costs/_likelihoods.py | 8 +- pybop/costs/base_cost.py | 16 +-- pybop/costs/design_costs.py | 12 +- pybop/costs/fitting_costs.py | 11 +- pybop/optimisers/base_optimiser.py | 4 + pybop/optimisers/base_pints_optimiser.py | 55 ++++++-- pybop/transformations/__init__.py | 157 ++++++++++++++++++++++ pybop/transformations/_transformations.py | 56 ++++++++ 11 files changed, 288 insertions(+), 45 deletions(-) create mode 100644 pybop/transformations/__init__.py create mode 100644 pybop/transformations/_transformations.py diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py index b60bc0194..a71de0bb5 100644 --- a/examples/scripts/spm_CMAES.py +++ b/examples/scripts/spm_CMAES.py @@ -42,7 +42,9 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) cost = pybop.SumSquaredError(problem) -optim = pybop.CMAES(cost, max_iterations=100) +optim = pybop.CMAES( + cost, max_iterations=100, transformation=pybop.IdentityTransformation(2) +) # Run the optimisation x, final_cost = optim.run() diff --git a/examples/standalone/cost.py b/examples/standalone/cost.py index c3763ff4c..7befc6f76 100644 --- a/examples/standalone/cost.py +++ b/examples/standalone/cost.py @@ -27,7 +27,7 @@ class StandaloneCost(pybop.BaseCost): Methods ------- - __call__(x, grad=None) + __call__(x) Calculate the cost for a given parameter value. """ @@ -48,7 +48,7 @@ def __init__(self, problem=None): upper=[10], ) - def _evaluate(self, x, grad=None): + def _evaluate(self, x): """ Calculate the cost for a given parameter value. diff --git a/pybop/__init__.py b/pybop/__init__.py index ecd420198..b020a0c51 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -73,6 +73,12 @@ GaussianLogLikelihoodKnownSigma, ) +# +# Transformation classes +# +from .transformations import Transformation +from .transformations._transformations import IdentityTransformation + # # Dataset class # diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 91374cc07..20ef32bc4 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -60,7 +60,7 @@ def __init__(self, problem, sigma): self.sigma2 = self.sigma0**-2 self._dl = np.ones(self._n_parameters) - def _evaluate(self, x, grad=None): + def _evaluate(self, x): """ Calls the problem.evaluate method and calculates the log-likelihood @@ -86,7 +86,7 @@ def _evaluate(self, x, grad=None): else: return np.sum(e) - def _evaluateS1(self, x, grad=None): + def _evaluateS1(self, x): """ Calls the problem.evaluateS1 method and calculates the log-likelihood @@ -120,7 +120,7 @@ def __init__(self, problem): self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) self._dl = np.ones(self._n_parameters + self.n_outputs) - def _evaluate(self, x, grad=None): + def _evaluate(self, x): """ Evaluates the Gaussian log-likelihood for the given parameters. @@ -159,7 +159,7 @@ def _evaluate(self, x, grad=None): else: return np.sum(e) - def _evaluateS1(self, x, grad=None): + def _evaluateS1(self, x): """ Calls the problem.evaluateS1 method and calculates the log-likelihood diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 025cb2e42..85eb55d98 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -52,13 +52,13 @@ def __init__(self, problem=None, sigma=None): def n_parameters(self): return self._n_parameters - def __call__(self, x, grad=None): + def __call__(self, x): """ Call the evaluate function for a given set of parameters. """ - return self.evaluate(x, grad) + return self.evaluate(x) - def evaluate(self, x, grad=None): + def evaluate(self, x): """ Call the evaluate function for a given set of parameters. @@ -66,9 +66,6 @@ def evaluate(self, x, grad=None): ---------- x : array-like The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- @@ -81,7 +78,7 @@ def evaluate(self, x, grad=None): If an error occurs during the calculation of the cost. """ try: - return self._evaluate(x, grad) + return self._evaluate(x) except NotImplementedError as e: raise e @@ -89,7 +86,7 @@ def evaluate(self, x, grad=None): except Exception as e: raise ValueError(f"Error in cost calculation: {e}") - def _evaluate(self, x, grad=None): + def _evaluate(self, x): """ Calculate the cost function value for a given set of parameters. @@ -99,9 +96,6 @@ def _evaluate(self, x, grad=None): ---------- x : array-like The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index f6364cdc6..811f0c381 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -65,7 +65,7 @@ def update_simulation_data(self, initial_conditions): self.problem._target = {key: solution[key] for key in self.problem.signal} self.dt = solution["Time [s]"][1] - solution["Time [s]"][0] - def _evaluate(self, x, grad=None): + def _evaluate(self, x): """ Computes the value of the cost function. @@ -75,8 +75,6 @@ def _evaluate(self, x, grad=None): ---------- x : array The parameter set for which to compute the cost. - grad : array, optional - Gradient information, not used in this method. Raises ------ @@ -99,7 +97,7 @@ class GravimetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super(GravimetricEnergyDensity, self).__init__(problem, update_capacity) - def _evaluate(self, x, grad=None): + def _evaluate(self, x): """ Computes the cost function for the energy density. @@ -107,8 +105,6 @@ def _evaluate(self, x, grad=None): ---------- x : array The parameter set for which to compute the cost. - grad : array, optional - Gradient information, not used in this method. Returns ------- @@ -158,7 +154,7 @@ class VolumetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super(VolumetricEnergyDensity, self).__init__(problem, update_capacity) - def _evaluate(self, x, grad=None): + def _evaluate(self, x): """ Computes the cost function for the energy density. @@ -166,8 +162,6 @@ def _evaluate(self, x, grad=None): ---------- x : array The parameter set for which to compute the cost. - grad : array, optional - Gradient information, not used in this method. Returns ------- diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index b7b266591..d4fd23311 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -23,7 +23,7 @@ def __init__(self, problem): # Default fail gradient self._de = 1.0 - def _evaluate(self, x, grad=None): + def _evaluate(self, x): """ Calculate the root mean square error for a given set of parameters. @@ -138,7 +138,7 @@ def __init__(self, problem): # Default fail gradient self._de = 1.0 - def _evaluate(self, x, grad=None): + def _evaluate(self, x): """ Calculate the sum of squared errors for a given set of parameters. @@ -146,9 +146,6 @@ def _evaluate(self, x, grad=None): ---------- x : array-like The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- @@ -236,7 +233,7 @@ def __init__(self, observer: Observer): super().__init__(problem=observer) self._observer = observer - def _evaluate(self, x, grad=None): + def _evaluate(self, x): """ Calculate the observer cost for a given set of parameters. @@ -314,7 +311,7 @@ def __init__(self, problem, likelihood, sigma=None): ): raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood") - def _evaluate(self, x, grad=None): + def _evaluate(self, x): """ Calculate the maximum a posteriori cost for a given set of parameters. diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 713df4d49..9d4cd919d 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -54,6 +54,7 @@ def __init__( self.verbose = False self.log = [] self.minimising = True + self.transformation = None self.physical_viability = False self.allow_infeasible_solutions = False self.default_max_iterations = 1000 @@ -103,6 +104,9 @@ def set_base_options(self): self.sigma0 = self.unset_options.pop("sigma0", self.sigma0) self.verbose = self.unset_options.pop("verbose", self.verbose) self.minimising = self.unset_options.pop("minimising", self.minimising) + self.transformation = self.unset_options.pop( + "transformation", self.transformation + ) if "allow_infeasible_solutions" in self.unset_options.keys(): self.set_allow_infeasible_solutions( self.unset_options.pop("allow_infeasible_solutions") diff --git a/pybop/optimisers/base_pints_optimiser.py b/pybop/optimisers/base_pints_optimiser.py index 543d32b64..074719e3e 100644 --- a/pybop/optimisers/base_pints_optimiser.py +++ b/pybop/optimisers/base_pints_optimiser.py @@ -1,7 +1,7 @@ import numpy as np import pints -from pybop import BaseOptimiser +from pybop import BaseLikelihood, BaseOptimiser, Transformation class BasePintsOptimiser(BaseOptimiser): @@ -38,11 +38,11 @@ def __init__(self, cost, pints_optimiser, **optimiser_kwargs): self._evaluations = None self._iterations = None - # PyBOP doesn't currently support the PINTS transformation class - self._transformation = None - self.pints_optimiser = pints_optimiser super().__init__(cost, **optimiser_kwargs) + self.f = self.cost + if self.transformation is not None: + self.set_transformation(self.transformation) def _set_up_optimiser(self): """ @@ -145,6 +145,39 @@ def _sanitise_inputs(self): self.bounds["lower"], self.bounds["upper"] ) + def set_transformation(self, transformation: Transformation): + """ + Apply the given transformation to the optimizer's settings. + + Initial credit: Pints team + + Parameters + ---------- + transformation : pybop.Transformation + The transformation object to be applied. + """ + # Convert cost or log pdf + if isinstance(self.cost, BaseLikelihood): + self.f = transformation.convert_log_pdf(self.cost) + else: + self.f = transformation.convert_cost(self.cost) + + # Convert initial position + self.x0 = transformation.to_search(self.x0) + + # Convert sigma0, if provided + if self.sigma0 is not None: + self.sigma0 = transformation.convert_standard_deviation( + self.sigma0, self.x0 + ) + + # Convert boundaries, if provided + if self._boundaries: + self._boundaries = transformation.convert_boundaries(self._boundaries) + + # Store the transformation for later detransformation + self.transformation = transformation + def name(self): """ Provides the name of the optimisation strategy. @@ -191,12 +224,12 @@ def _run(self): if self._needs_sensitivities: def f(x): - L, dl = self.cost.evaluateS1(x) + L, dl = self.f.evaluateS1(x) return (L, dl) if self.minimising else (-L, -dl) else: - def f(x, grad=None): - return self.cost(x, grad) if self.minimising else -self.cost(x, grad) + def f(x): + return self.f(x) if self.minimising else -self.f(x) # Create evaluator object if self._parallel: @@ -316,8 +349,8 @@ def f(x, grad=None): # Show current parameters x_user = self.pints_optimiser.x_guessed() - if self._transformation is not None: - x_user = self._transformation.to_model(x_user) + if self.transformation is not None: + x_user = self.transformation.to_model(x_user) for p in x_user: print(pints.strfloat(p)) print("-" * 40) @@ -339,8 +372,8 @@ def f(x, grad=None): f = self.pints_optimiser.f_best() # Inverse transform search parameters - if self._transformation is not None: - x = self._transformation.to_model(x) + if self.transformation is not None: + x = self.transformation.to_model(x) # Store result final_cost = f if self.minimising else -f diff --git a/pybop/transformations/__init__.py b/pybop/transformations/__init__.py new file mode 100644 index 000000000..074693b98 --- /dev/null +++ b/pybop/transformations/__init__.py @@ -0,0 +1,157 @@ +from abc import ABC, abstractmethod +from typing import Tuple, Union, Sequence +import numpy as np + +from pybop import BaseCost + +class Transformation(ABC): + """ + Abstract base class for transformations between two parameter spaces: the model + parameter space and a search space. + + If `trans` is an instance of a `Transformation` class, you can apply the + transformation of a parameter vector from the model space `p` to the search + space `q` using `q = trans.to_search(p)` and the inverse using `p = trans.to_model(q)`. + + Based on pints.transformation method. + + References + ---------- + .. [1] Erik Jorgensen and Asger Roer Pedersen. "How to Obtain Those Nasty Standard Errors From Transformed Data." + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.47.9023 + .. [2] Kaare Brandt Petersen and Michael Syskind Pedersen. "The Matrix Cookbook." 2012. + """ + + def convert_log_pdf(self, log_pdf): + """Returns a transformed log-PDF class.""" + return TransformedLogPDF(log_pdf, self) + + def convert_log_prior(self, log_prior): + """Returns a transformed log-prior class.""" + return TransformedLogPrior(log_prior, self) + + def convert_cost(self, cost): + """Returns a transformed cost class.""" + return TransformedCost(cost, self) + + def convert_boundaries(self, boundaries): + """Returns a transformed boundaries class.""" + return TransformedBoundaries(boundaries, self) + + def convert_covariance_matrix(self, covariance: np.ndarray, q: np.ndarray) -> np.ndarray: + """ + Converts a covariance matrix `covariance` from the model space to the search space + around a parameter vector `q` in the search space. + """ + jac_inv = np.linalg.pinv(self.jacobian(q)) + return jac_inv @ covariance @ jac_inv.T + + def convert_standard_deviation(self, std: Union[float, np.ndarray], q: np.ndarray) -> np.ndarray: + """ + Converts standard deviation `std`, either a scalar or a vector, from the model space + to the search space around a parameter vector `q` in the search space. + """ + jac_inv = np.linalg.pinv(self.jacobian(q)) + cov = jac_inv @ jac_inv.T + return std * np.sqrt(np.diagonal(cov)) + + @abstractmethod + def jacobian(self, q: np.ndarray) -> np.ndarray: + """Returns the Jacobian matrix of the transformation at the parameter vector `q`.""" + pass + + def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, Sequence[np.ndarray]]: + """ + Computes the Jacobian matrix and its partial derivatives at the parameter vector `q`. + + Returns a tuple `(jacobian, jacobian_derivatives)`. + """ + raise NotImplementedError("jacobian_S1 method must be implemented if used.") + + def log_jacobian_det(self, q: np.ndarray) -> float: + """ + Returns the logarithm of the absolute value of the determinant of the Jacobian matrix + at the parameter vector `q`. + """ + return np.log(np.abs(np.linalg.det(self.jacobian(q)))) + + def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + """ + Computes the logarithm of the absolute value of the determinant of the Jacobian, + and returns it along with its partial derivatives. + """ + jacobian, jacobian_derivatives = self.jacobian_S1(q) + jacobian_inv = np.linalg.pinv(jacobian) + derivatives = np.array([np.trace(jacobian_inv @ djac) for djac in jacobian_derivatives]) + return self.log_jacobian_det(q), derivatives + + @abstractmethod + def n_parameters(self) -> int: + """Returns the dimension of the parameter space this transformation is defined over.""" + pass + + @abstractmethod + def to_model(self, q: np.ndarray) -> np.ndarray: + """Transforms a parameter vector `q` from the search space to the model space.""" + pass + + @abstractmethod + def to_search(self, p: np.ndarray) -> np.ndarray: + """Transforms a parameter vector `p` from the model space to the search space.""" + pass + + def is_elementwise(self) -> bool: + """ + Returns `True` if the transformation is element-wise, meaning it can be applied + element-by-element independently. + """ + raise NotImplementedError("is_elementwise method must be implemented if used.") + +class TransformedLogPDF(BaseCost): + """Transformed log-PDF class.""" + def __init__(self, log_pdf, transformation): + self._log_pdf = log_pdf + self._transformation = transformation + + def __call__(self, q): + p = self._transformation.to_model(q) + log_pdf = self._log_pdf(p) + + # Calculate the PDF using change of variable + # Wikipedia: https://w.wiki/UsJ + log_jacobian_det = self._transformation.log_jacobian_det(q) + return log_pdf + log_jacobian_det + + def _evaluateS1(self, x): + p = self._transformation.to_model(x) + log_pdf, log_pdf_derivatives = self._log_pdf._evaluateS1(p) + log_jacobian_det, log_jacobian_det_derivatives = self._transformation.log_jacobian_det_S1(x) + return log_pdf + log_jacobian_det, log_pdf_derivatives + log_jacobian_det_derivatives + +class TransformedLogPrior: + """Transformed log-prior class.""" + def __init__(self, log_prior, transformation): + self._log_prior = log_prior + self._transformation = transformation + + def __call__(self, q): + return self + +class TransformedCost(BaseCost): + """Transformed cost class.""" + def __init__(self, cost, transformation): + self._cost = cost + self._transformation = transformation + + def __call__(self, q ): + p = self._transformation.to_model(q) + return self._cost(p) + + def _evaluateS1(self, x): + p = self._transformation.to_model(x) + return self._cost._evaluateS1(x) + +class TransformedBoundaries: + """Transformed boundaries class.""" + def __init__(self, boundaries, transformation): + self._boundaries = boundaries diff --git a/pybop/transformations/_transformations.py b/pybop/transformations/_transformations.py new file mode 100644 index 000000000..a7a1541c5 --- /dev/null +++ b/pybop/transformations/_transformations.py @@ -0,0 +1,56 @@ +from typing import Sequence, Tuple, Union + +import numpy as np + +from pybop import Transformation + + +class IdentityTransformation(Transformation): + """ + Identity transformation between two parameter spaces: the model parameter space and a search space. + + Based on pints.IdentityTransformation method. + """ + + def __init__(self, n_parameters: int): + self._n_parameters = n_parameters + + def elementwise(self) -> bool: + """See :meth:`Transformation.elementwise()`.""" + return True + + def jacobian(self, q: np.ndarray) -> np.ndarray: + """See :meth:`Transformation.jacobian()`.""" + return np.eye(self._n_parameters) + + def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + n = self._n_parameters + return self.jacobian(q), np.zeros((n, n, n)) + + def log_jacobian_det(self, q: np.ndarray) -> float: + """See :meth:`Transformation.log_jacobian_det()`.""" + return 0.0 + + def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + """See :meth:`Transformation.log_jacobian_det_S1()`.""" + return self.log_jacobian_det(q), np.zeros(self._n_parameters) + + def n_parameters(self) -> int: + """See :meth:`Transformation.n_parameters()`.""" + return self._n_parameters + + def to_model(self, q: Union[np.ndarray, Sequence, float, int]) -> np.ndarray: + """See :meth:`Transformation.to_model()`.""" + return np.asarray(q) + + def to_search(self, p: Union[np.ndarray, Sequence, float, int]) -> np.ndarray: + """See :meth:`Transformation.to_search()`.""" + return np.asarray(p) + + +# TODO: Implement the following classes: +# class LogTransformation(Transformation): +# class LogitTransformation(Transformation): +# class ComposedTransformation(Transformation): +# class ScaledTransformation(Transformation): From 5fa3db4e84b65641edc48cb1bdbc582331b106d5 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Mon, 10 Jun 2024 10:41:30 +0100 Subject: [PATCH 07/53] Update imports --- pybop/costs/base_cost.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index e6c2f5408..558a96359 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,3 +1,5 @@ +import numpy as np + from pybop import BaseProblem From 23b10992a87894b3d99bd1078d6280d2e707807c Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Mon, 10 Jun 2024 11:19:42 +0100 Subject: [PATCH 08/53] Update x0 to [0.5] --- tests/unit/test_cost.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index fc643ef44..3f230fdab 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -234,7 +234,7 @@ def test_design_costs( cost([0.4]) @pytest.mark.unit - def test_weighted_cost(self, problem, x0): + def test_weighted_cost(self, problem): cost1 = pybop.SumSquaredError(problem) cost2 = pybop.RootMeanSquaredError(problem) @@ -264,19 +264,19 @@ def test_weighted_cost(self, problem, x0): weighted_cost_2 = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) assert weighted_cost_2._different_problems is False assert weighted_cost_2.problem is problem - assert weighted_cost_2(x0) >= 0 + assert weighted_cost_2([0.5]) >= 0 cost3 = pybop.RootMeanSquaredError(copy(problem)) weighted_cost_3 = pybop.WeightedCost(cost_list=[cost1, cost3], weights=[1, 100]) assert weighted_cost_3._different_problems is True assert weighted_cost_3.problem is None - assert weighted_cost_3(x0) >= 0 + assert weighted_cost_3([0.5]) >= 0 np.testing.assert_allclose( - weighted_cost_2._evaluate(x0), weighted_cost_3._evaluate(x0), atol=1e-5 + weighted_cost_2._evaluate([0.5]), weighted_cost_3._evaluate([0.5]), atol=1e-5 ) weighted_cost_3.parameters = problem.parameters - errors_2, sensitivities_2 = weighted_cost_2._evaluateS1(x0) - errors_3, sensitivities_3 = weighted_cost_3._evaluateS1(x0) + errors_2, sensitivities_2 = weighted_cost_2._evaluateS1([0.5]) + errors_3, sensitivities_3 = weighted_cost_3._evaluateS1([0.5]) np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) From d6708d6e90a1e07998e257849c5ff481eb208b20 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 10:20:01 +0000 Subject: [PATCH 09/53] style: pre-commit fixes --- tests/unit/test_cost.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 3f230fdab..f1e1fc4b9 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -273,7 +273,9 @@ def test_weighted_cost(self, problem): assert weighted_cost_3([0.5]) >= 0 np.testing.assert_allclose( - weighted_cost_2._evaluate([0.5]), weighted_cost_3._evaluate([0.5]), atol=1e-5 + weighted_cost_2._evaluate([0.5]), + weighted_cost_3._evaluate([0.5]), + atol=1e-5, ) weighted_cost_3.parameters = problem.parameters errors_2, sensitivities_2 = weighted_cost_2._evaluateS1([0.5]) From 37ac6e2e0898b14470c020c30a89e7574c65c54e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 19:03:53 +0000 Subject: [PATCH 10/53] style: pre-commit fixes --- pybop/costs/fitting_costs.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 069875c21..c63784e60 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -98,7 +98,9 @@ def _evaluateS1(self, x): ] ) e = np.sqrt(np.mean(r**2, axis=1)) - de = np.mean((r * self._current_sensitivities.T), axis=2) / (e + np.finfo(float).eps) + de = np.mean((r * self._current_sensitivities.T), axis=2) / ( + e + np.finfo(float).eps + ) if self.n_outputs == 1: return e.item(), de.flatten() From 1c540d95514c5469e2c1bb5c5098461259ca4c87 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 11 Jun 2024 14:42:14 +0100 Subject: [PATCH 11/53] Update spm_weighted_cost.py --- examples/scripts/spm_weighted_cost.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py index 4bd8380e9..14ba213d8 100644 --- a/examples/scripts/spm_weighted_cost.py +++ b/examples/scripts/spm_weighted_cost.py @@ -7,18 +7,20 @@ model = pybop.lithium_ion.SPM(parameter_set=parameter_set) # Fitting parameters -parameters = [ +parameters = pybop.Parameters( pybop.Parameter( "Negative electrode active material volume fraction", prior=pybop.Gaussian(0.68, 0.05), bounds=[0.5, 0.8], + true_value=parameter_set["Negative electrode active material volume fraction"], ), pybop.Parameter( "Positive electrode active material volume fraction", prior=pybop.Gaussian(0.58, 0.05), bounds=[0.4, 0.7], + true_value=parameter_set["Positive electrode active material volume fraction"], ), -] +) # Generate data sigma = 0.001 @@ -42,18 +44,11 @@ weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) for cost in [weighted_cost, cost1, cost2]: - optim = pybop.Optimisation(cost, optimiser=pybop.IRPropMin) - optim.set_max_iterations(60) + optim = pybop.IRPropMin(cost, max_iterations=60) # Run the optimisation x, final_cost = optim.run() - print( - "True parameters:", - [ - parameters[0].true_value, - parameters[1].true_value, - ], - ) + print("True parameters:", parameters.true_value()) print("Estimated parameters:", x) # Plot the timeseries output From 3a9e10a08700b3fe4f3778a4b8d15b80443204a9 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 11 Jun 2024 15:21:58 +0100 Subject: [PATCH 12/53] Update TypeError with test --- pybop/costs/base_cost.py | 4 +++- tests/unit/test_cost.py | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 558a96359..c385a51f1 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -179,7 +179,9 @@ def __init__(self, cost_list, weights=None): self._different_problems = False if not isinstance(self.cost_list, list): - raise TypeError("Expected a list of costs.") + raise TypeError( + f"Expected a list of costs. Received {type(self.cost_list)}" + ) if self.weights is None: self.weights = np.ones(len(cost_list)) elif isinstance(self.weights, list): diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index f1e1fc4b9..972862552 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -247,6 +247,11 @@ def test_weighted_cost(self, problem): cost_list=[cost1, cost2], weights=np.array([1, 1]) ) np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) + with pytest.raises( + TypeError, + match=r"Expected a list of costs.", + ): + weighted_cost = pybop.WeightedCost(cost_list="Invalid string") with pytest.raises( TypeError, match="Expected a list or array of weights the same length as cost_list.", From 8530224c83c3c8a0ca0e13e75f648402eae32c32 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Tue, 18 Jun 2024 09:08:40 +0100 Subject: [PATCH 13/53] feat: Integrate transformations into Parameters API, adds log, scaled, and composed transformations, updts for example API usage --- examples/scripts/spm_CMAES.py | 6 +- pybop/__init__.py | 17 +- pybop/costs/base_cost.py | 22 +- pybop/optimisers/base_optimiser.py | 3 + pybop/optimisers/base_pints_optimiser.py | 40 +--- pybop/parameters/parameter.py | 52 ++++- pybop/transformations/__init__.py | 112 ++++----- pybop/transformations/_transformations.py | 265 ++++++++++++++++++++-- 8 files changed, 387 insertions(+), 130 deletions(-) diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py index 43d816d54..ff4e0f3af 100644 --- a/examples/scripts/spm_CMAES.py +++ b/examples/scripts/spm_CMAES.py @@ -13,12 +13,14 @@ prior=pybop.Gaussian(6e-06, 0.1e-6), bounds=[1e-6, 9e-6], true_value=parameter_set["Negative particle radius [m]"], + transformation=pybop.ScaledTransformation(scale=2.0), ), pybop.Parameter( "Positive particle radius [m]", prior=pybop.Gaussian(4.5e-06, 0.1e-6), bounds=[1e-6, 9e-6], true_value=parameter_set["Positive particle radius [m]"], + transformation=pybop.LogTransformation(), ), ) @@ -42,9 +44,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) cost = pybop.SumSquaredError(problem) -optim = pybop.CMAES( - cost, max_iterations=100, transformation=pybop.IdentityTransformation(2) -) +optim = pybop.CMAES(cost, max_iterations=100, min_iterations=40) # Run the optimisation x, final_cost = optim.run() diff --git a/pybop/__init__.py b/pybop/__init__.py index 7d86b88e6..82d527b03 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -78,6 +78,17 @@ from .problems.fitting_problem import FittingProblem from .problems.design_problem import DesignProblem +# +# Transformation classes +# +from .transformations import Transformation +from .transformations._transformations import ( + IdentityTransformation, + ScaledTransformation, + LogTransformation, + ComposedTransformation, +) + # # Cost function class # @@ -99,12 +110,6 @@ GaussianLogLikelihoodKnownSigma, ) -# -# Transformation classes -# -from .transformations import Transformation -from .transformations._transformations import IdentityTransformation - # # Optimiser class # diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 7f316a72a..1d45b04b2 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,4 +1,4 @@ -from pybop import BaseProblem +from pybop import BaseProblem, ComposedTransformation, IdentityTransformation class BaseCost: @@ -25,6 +25,7 @@ class BaseCost: def __init__(self, problem=None): self.parameters = None + self.transformation = None self.problem = problem self.x0 = None if isinstance(self.problem, BaseProblem): @@ -34,6 +35,13 @@ def __init__(self, problem=None): self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal + # Construct ComposedTransformation from list of transformations + self.transformations = [ + t if t is not None else IdentityTransformation() + for t in self.parameters.get_transformations() + ] + self.transformation = ComposedTransformation(self.transformations) + @property def n_parameters(self): return len(self.parameters) @@ -42,7 +50,11 @@ def __call__(self, x): """ Call the evaluate function for a given set of parameters. """ - return self.evaluate(x) + if self.transformation: + p = self.transformation.to_model(x) + return self.evaluate(p) + else: + return self.evaluate(x) def evaluate(self, x): """ @@ -116,7 +128,11 @@ def evaluateS1(self, x): If an error occurs during the calculation of the cost or gradient. """ try: - return self._evaluateS1(x) + if self.transformation: + p = self.transformation.to_model(x) + return self._evaluateS1(p) + else: + return self._evaluateS1(x) except NotImplementedError as e: raise e diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 7fff3bc33..921bf2547 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -75,6 +75,9 @@ def __init__( # Set default initial standard deviation (for all or no parameters) self.sigma0 = cost.parameters.get_sigma0() or self.sigma0 + # Set transformation + self.transformation = cost.transformation + else: try: cost_test = cost(optimiser_kwargs.get("x0", [])) diff --git a/pybop/optimisers/base_pints_optimiser.py b/pybop/optimisers/base_pints_optimiser.py index 943766182..84a0ba71c 100644 --- a/pybop/optimisers/base_pints_optimiser.py +++ b/pybop/optimisers/base_pints_optimiser.py @@ -10,7 +10,10 @@ from pints import SequentialEvaluator as PintsSequentialEvaluator from pints import strfloat as PintsStrFloat -from pybop import BaseLikelihood, BaseOptimiser, Result, Transformation +from pybop import ( + BaseOptimiser, + Result, +) class BasePintsOptimiser(BaseOptimiser): @@ -51,8 +54,6 @@ def __init__(self, cost, pints_optimiser, **optimiser_kwargs): self.pints_optimiser = pints_optimiser super().__init__(cost, **optimiser_kwargs) self.f = self.cost - if self.transformation is not None: - self.set_transformation(self.transformation) def _set_up_optimiser(self): """ @@ -154,39 +155,6 @@ def _sanitise_inputs(self): self.bounds["lower"], self.bounds["upper"] ) - def set_transformation(self, transformation: Transformation): - """ - Apply the given transformation to the optimizer's settings. - - Initial credit: Pints team - - Parameters - ---------- - transformation : pybop.Transformation - The transformation object to be applied. - """ - # Convert cost or log pdf - if isinstance(self.cost, BaseLikelihood): - self.f = transformation.convert_log_pdf(self.cost) - else: - self.f = transformation.convert_cost(self.cost) - - # Convert initial position - self.x0 = transformation.to_search(self.x0) - - # Convert sigma0, if provided - if self.sigma0 is not None: - self.sigma0 = transformation.convert_standard_deviation( - self.sigma0, self.x0 - ) - - # Convert boundaries, if provided - if self._boundaries: - self._boundaries = transformation.convert_boundaries(self._boundaries) - - # Store the transformation for later detransformation - self.transformation = transformation - def name(self): """ Provides the name of the optimisation strategy. diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index 76754847d..94549a31d 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -32,7 +32,13 @@ class Parameter: """ def __init__( - self, name, initial_value=None, true_value=None, prior=None, bounds=None + self, + name, + initial_value=None, + true_value=None, + prior=None, + bounds=None, + transformation=None, ): """ Construct the parameter class with a name, initial value, prior, and bounds. @@ -42,8 +48,9 @@ def __init__( self.true_value = true_value self.initial_value = initial_value self.value = initial_value - self.set_bounds(bounds) + self.transformation = transformation self.margin = 1e-4 + self.set_bounds(bounds) def rvs(self, n_samples, random_state=None): """ @@ -64,6 +71,9 @@ def rvs(self, n_samples, random_state=None): """ samples = self.prior.rvs(n_samples, random_state=random_state) + if self.transformation is not None: + samples = self.transformation.to_search(samples) + # Constrain samples to be within bounds if self.bounds is not None: offset = self.margin * (self.upper_bound - self.lower_bound) @@ -142,11 +152,19 @@ def set_bounds(self, bounds=None): if bounds is not None: if bounds[0] >= bounds[1]: raise ValueError("Lower bound must be less than upper bound") + elif self.transformation is not None: + self.lower_bound = np.ndarray.item( + self.transformation.to_search(bounds[0]) + ) + self.upper_bound = np.ndarray.item( + self.transformation.to_search(bounds[1]) + ) else: self.lower_bound = bounds[0] self.upper_bound = bounds[1] - - self.bounds = bounds + self.bounds = [self.lower_bound, self.upper_bound] + else: + self.bounds = bounds class Parameters: @@ -166,6 +184,7 @@ def __init__(self, *args): self.param = OrderedDict() for param in args: self.add(param) + self.initial_value() def __getitem__(self, key: str) -> Parameter: """ @@ -309,12 +328,20 @@ def get_sigma0(self) -> List: for param in self.param.values(): if hasattr(param.prior, "sigma"): - sigma0.append(param.prior.sigma) + if param.transformation is None: + sigma0.append(param.prior.sigma) + else: + sigma0.append( + np.ndarray.item( + param.transformation.convert_standard_deviation( + param.prior.sigma, param.initial_value + ) + ) + ) else: all_have_sigma = False if not all_have_sigma: sigma0 = None - return sigma0 def initial_value(self) -> List: @@ -324,7 +351,7 @@ def initial_value(self) -> List: initial_values = [] for param in self.param.values(): - if param.initial_value is None: + if param.initial_value is None and param.prior is not None: initial_value = param.rvs(1) param.update(initial_value=initial_value[0]) initial_values.append(param.initial_value) @@ -353,6 +380,17 @@ def true_value(self) -> List: return true_values + def get_transformations(self): + """ + Get the transformations for each parameter. + """ + transformations = [] + + for param in self.param.values(): + transformations.append(param.transformation) + + return transformations + def get_bounds_for_plotly(self): """ Retrieve parameter bounds in the format expected by Plotly. diff --git a/pybop/transformations/__init__.py b/pybop/transformations/__init__.py index 074693b98..f1ad8cfc6 100644 --- a/pybop/transformations/__init__.py +++ b/pybop/transformations/__init__.py @@ -1,8 +1,8 @@ from abc import ABC, abstractmethod from typing import Tuple, Union, Sequence import numpy as np +from pints import TransformedBoundaries as PintsTransformedBoundaries -from pybop import BaseCost class Transformation(ABC): """ @@ -22,18 +22,10 @@ class Transformation(ABC): .. [2] Kaare Brandt Petersen and Michael Syskind Pedersen. "The Matrix Cookbook." 2012. """ - def convert_log_pdf(self, log_pdf): - """Returns a transformed log-PDF class.""" - return TransformedLogPDF(log_pdf, self) - def convert_log_prior(self, log_prior): """Returns a transformed log-prior class.""" return TransformedLogPrior(log_prior, self) - def convert_cost(self, cost): - """Returns a transformed cost class.""" - return TransformedCost(cost, self) - def convert_boundaries(self, boundaries): """Returns a transformed boundaries class.""" return TransformedBoundaries(boundaries, self) @@ -51,6 +43,8 @@ def convert_standard_deviation(self, std: Union[float, np.ndarray], q: np.ndarra Converts standard deviation `std`, either a scalar or a vector, from the model space to the search space around a parameter vector `q` in the search space. """ + if isinstance(q, (int, float)): + q = np.asarray([q]) jac_inv = np.linalg.pinv(self.jacobian(q)) cov = jac_inv @ jac_inv.T return std * np.sqrt(np.diagonal(cov)) @@ -64,7 +58,7 @@ def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, Sequence[np.ndarray]]: """ Computes the Jacobian matrix and its partial derivatives at the parameter vector `q`. - Returns a tuple `(jacobian, jacobian_derivatives)`. + Returns a tuple `(jacobian, hessian)`. """ raise NotImplementedError("jacobian_S1 method must be implemented if used.") @@ -80,9 +74,9 @@ def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: Computes the logarithm of the absolute value of the determinant of the Jacobian, and returns it along with its partial derivatives. """ - jacobian, jacobian_derivatives = self.jacobian_S1(q) + jacobian, hessian = self.jacobian_S1(q) jacobian_inv = np.linalg.pinv(jacobian) - derivatives = np.array([np.trace(jacobian_inv @ djac) for djac in jacobian_derivatives]) + derivatives = np.array([np.trace(jacobian_inv @ djac) for djac in hessian]) return self.log_jacobian_det(q), derivatives @abstractmethod @@ -90,14 +84,20 @@ def n_parameters(self) -> int: """Returns the dimension of the parameter space this transformation is defined over.""" pass - @abstractmethod def to_model(self, q: np.ndarray) -> np.ndarray: """Transforms a parameter vector `q` from the search space to the model space.""" - pass + return self._transform(q, method="to_model") - @abstractmethod def to_search(self, p: np.ndarray) -> np.ndarray: """Transforms a parameter vector `p` from the model space to the search space.""" + return self._transform(p, method="to_search") + + @abstractmethod + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """ + Transforms a parameter vector `x` from the search space to the model space if `method` + is "to_model", or from the model space to the search space if `method` is "to_search". + """ pass def is_elementwise(self) -> bool: @@ -107,51 +107,39 @@ def is_elementwise(self) -> bool: """ raise NotImplementedError("is_elementwise method must be implemented if used.") -class TransformedLogPDF(BaseCost): - """Transformed log-PDF class.""" - def __init__(self, log_pdf, transformation): - self._log_pdf = log_pdf - self._transformation = transformation - - def __call__(self, q): - p = self._transformation.to_model(q) - log_pdf = self._log_pdf(p) - - # Calculate the PDF using change of variable - # Wikipedia: https://w.wiki/UsJ - log_jacobian_det = self._transformation.log_jacobian_det(q) - return log_pdf + log_jacobian_det - - def _evaluateS1(self, x): - p = self._transformation.to_model(x) - log_pdf, log_pdf_derivatives = self._log_pdf._evaluateS1(p) - log_jacobian_det, log_jacobian_det_derivatives = self._transformation.log_jacobian_det_S1(x) - return log_pdf + log_jacobian_det, log_pdf_derivatives + log_jacobian_det_derivatives - -class TransformedLogPrior: - """Transformed log-prior class.""" - def __init__(self, log_prior, transformation): - self._log_prior = log_prior - self._transformation = transformation - - def __call__(self, q): - return self - -class TransformedCost(BaseCost): - """Transformed cost class.""" - def __init__(self, cost, transformation): - self._cost = cost - self._transformation = transformation - - def __call__(self, q ): - p = self._transformation.to_model(q) - return self._cost(p) - - def _evaluateS1(self, x): - p = self._transformation.to_model(x) - return self._cost._evaluateS1(x) - -class TransformedBoundaries: - """Transformed boundaries class.""" +class TransformedBoundaries(PintsTransformedBoundaries): + """Transformed boundaries class inherited from Pints TransformedBoundaries.""" def __init__(self, boundaries, transformation): - self._boundaries = boundaries + super().__init__(boundaries, transformation) + + +# ---- To be implemented with Monte Carlo PR ------ # +# class TransformedLogPDF(BaseCost): +# """Transformed log-PDF class.""" +# def __init__(self, log_pdf, transformation): +# self._log_pdf = log_pdf +# self._transformation = transformation + +# def __call__(self, q): +# p = self._transformation.to_model(q) +# log_pdf = self._log_pdf(p) + +# # Calculate the PDF using change of variable +# # Wikipedia: https://w.wiki/UsJ +# log_jacobian_det = self._transformation.log_jacobian_det(q) +# return log_pdf + log_jacobian_det + +# def _evaluateS1(self, x): +# p = self._transformation.to_model(x) +# log_pdf, log_pdf_derivatives = self._log_pdf._evaluateS1(p) +# log_jacobian_det, log_jacobian_det_derivatives = self._transformation.log_jacobian_det_S1(x) +# return log_pdf + log_jacobian_det, log_pdf_derivatives + log_jacobian_det_derivatives + +# class TransformedLogPrior: +# """Transformed log-prior class.""" +# def __init__(self, log_prior, transformation): +# self._log_prior = log_prior +# self._transformation = transformation + +# def __call__(self, q): +# return self diff --git a/pybop/transformations/_transformations.py b/pybop/transformations/_transformations.py index a7a1541c5..32f38327c 100644 --- a/pybop/transformations/_transformations.py +++ b/pybop/transformations/_transformations.py @@ -1,4 +1,4 @@ -from typing import Sequence, Tuple, Union +from typing import List, Tuple, Union import numpy as np @@ -12,11 +12,11 @@ class IdentityTransformation(Transformation): Based on pints.IdentityTransformation method. """ - def __init__(self, n_parameters: int): + def __init__(self, n_parameters: int = 1): self._n_parameters = n_parameters - def elementwise(self) -> bool: - """See :meth:`Transformation.elementwise()`.""" + def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" return True def jacobian(self, q: np.ndarray) -> np.ndarray: @@ -40,17 +40,256 @@ def n_parameters(self) -> int: """See :meth:`Transformation.n_parameters()`.""" return self._n_parameters - def to_model(self, q: Union[np.ndarray, Sequence, float, int]) -> np.ndarray: - """See :meth:`Transformation.to_model()`.""" - return np.asarray(q) + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + return np.asarray(x) - def to_search(self, p: Union[np.ndarray, Sequence, float, int]) -> np.ndarray: - """See :meth:`Transformation.to_search()`.""" - return np.asarray(p) + +class ScaledTransformation(Transformation): + """ + Scaled transformation between two parameter spaces: the model parameter space and a search space. + + Based on pints.ScaledTransformation method. + """ + + def __init__( + self, + scale: Union[list, float, np.ndarray], + translate: Union[list, float, np.ndarray] = 0, + n_parameters: int = 1, + ): + self._translate = translate + self._scale = np.asarray([scale]) + self._inverse_scale = np.reciprocal(self._scale) + self._n_parameters = n_parameters + + def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" + return True + + def jacobian(self, q: np.ndarray) -> np.ndarray: + """See :meth:`Transformation.jacobian()`.""" + return np.diag(self._inverse_scale) + + def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + n = self._n_parameters + return self.jacobian(q), np.zeros((n, n, n)) + + def log_jacobian_det(self, q: np.ndarray) -> float: + """See :meth:`Transformation.log_jacobian_det()`.""" + return np.sum(np.log(np.abs(self._scale))) + + def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + """See :meth:`Transformation.log_jacobian_det_S1()`.""" + return self.log_jacobian_det(q), np.zeros(self._n_parameters) + + def n_parameters(self) -> int: + """See :meth:`Transformation.n_parameters()`.""" + return self._n_parameters + + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + x = np.asarray(x) + if method == "to_model": + return x * self._inverse_scale - self._translate + elif method == "to_search": + return self._scale * (x + self._translate) + else: + raise ValueError(f"Unknown method: {method}") + + +class LogTransformation(Transformation): + """ + Log transformation between two parameter spaces: the model parameter space and a search space. + + Based on pints.LogTransformation method. + """ + + def __init__(self, n_parameters: int = 1): + self._n_parameters = n_parameters + + def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" + return True + + def jacobian(self, q: np.ndarray) -> np.ndarray: + """See :meth:`Transformation.jacobian()`.""" + return np.diag(1 / q) + + def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + return np.diag(1 / q), np.diag(-1 / q**2) + + def log_jacobian_det(self, q: np.ndarray) -> float: + """See :meth:`Transformation.log_jacobian_det()`.""" + return np.sum(-np.log(q)) + + def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + """See :meth:`Transformation.log_jacobian_det_S1()`.""" + return self.log_jacobian_det(q), -1 / q + + def n_parameters(self) -> int: + """See :meth:`Transformation.n_parameters()`.""" + return self._n_parameters + + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + if isinstance(x, (int, float)): + x = np.asarray([x]) + if method == "to_model": + return np.exp(x) + elif method == "to_search": + return np.log(x) + else: + raise ValueError(f"Unknown method: {method}") + + +class ComposedTransformation(Transformation): + """ + N-dimensional Transformation composed of one or more other N_i-dimensional + sub-transformations, where the sum of N_i equals N. + + The dimensionality of the individual transformations does not have to be + the same, i.e., N_i != N_j is allowed. + + Extends pybop.Transformation. Based on pints.ComposedTransformation method. + """ + + def __init__(self, transformations: Transformation): + if not [*transformations]: + raise ValueError("Must have at least one sub-transformation.") + self._transformations: List[Transformation] = transformations + self._n_parameters: int = len(transformations) + self._is_elementwise: bool = all(t.is_elementwise() for t in transformations) + + if self._is_elementwise: + self._jacobian = self._elementwise_jacobian + self._log_jacobian_det = self._elementwise_log_jacobian_det + self._log_jacobian_det_S1 = self._elementwise_log_jacobian_det_S1 + else: + self._jacobian = self._general_jacobian + self._log_jacobian_det = self._general_log_jacobian_det + self._log_jacobian_det_S1 = self._general_log_jacobian_det_S1 + + def is_elementwise(self) -> bool: + return self._is_elementwise + + def jacobian(self, q: np.ndarray) -> np.ndarray: + return self._jacobian(q) + + def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + q = np.asarray(q) + output_S1 = np.zeros( + (self._n_parameters, self._n_parameters, self._n_parameters) + ) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters() + _, jac_S1 = transformation.jacobian_S1(q[lo:hi]) + for i, jac_S1_i in enumerate(jac_S1): + output_S1[lo + i, lo:hi, lo:hi] = jac_S1_i + lo = hi + + return self.jacobian(q), output_S1 + + def log_jacobian_det(self, q: np.ndarray) -> float: + return self._log_jacobian_det(q) + + def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + return self._log_jacobian_det_S1(q) + + def _transform(self, data: np.ndarray, method: str) -> np.ndarray: + data = np.asarray(data) + output = np.zeros_like(data) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters() + output[lo:hi] = getattr(transformation, method)(data[lo:hi]) + lo = hi + + return output + + def n_parameters(self) -> int: + return self._n_parameters + + def _elementwise_jacobian(self, q: np.ndarray) -> np.ndarray: + q = np.asarray(q) + diag = np.zeros(self._n_parameters) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters() + diag[lo:hi] = np.diagonal(transformation.jacobian(q[lo:hi])) + lo = hi + + return np.diag(diag) + + def _elementwise_log_jacobian_det(self, q: np.ndarray) -> float: + q = np.asarray(q) + return sum( + transformation.log_jacobian_det(q[lo : lo + transformation.n_parameters()]) + for lo, transformation in self._iter_transformations() + ) + + def _elementwise_log_jacobian_det_S1( + self, q: np.ndarray + ) -> Tuple[float, np.ndarray]: + q = np.asarray(q) + output = 0.0 + output_S1 = np.zeros(self._n_parameters) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters() + j, j_S1 = transformation.log_jacobian_det_S1(q[lo:hi]) + output += j + output_S1[lo:hi] = j_S1 + lo = hi + + return output, output_S1 + + def _general_jacobian(self, q: np.ndarray) -> np.ndarray: + q = np.asarray(q) + jacobian_blocks = [] + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters() + jacobian_blocks.append(transformation.jacobian(q[lo:hi])) + lo = hi + + return np.block( + [ + [ + b if i == j else np.zeros_like(b) + for j, b in enumerate(jacobian_blocks) + ] + for i, _ in enumerate(jacobian_blocks) + ] + ) + + def _general_log_jacobian_det(self, q: np.ndarray) -> float: + return np.log(np.abs(np.linalg.det(self.jacobian(q)))) + + def _general_log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + q = np.asarray(q) + jac, jac_S1 = self.jacobian_S1(q) + out_S1 = np.zeros(self._n_parameters) + + for i, jac_S1_i in enumerate(jac_S1): + out_S1[i] = np.trace(np.matmul(np.linalg.pinv(jac), jac_S1_i)) + + return self.log_jacobian_det(q), out_S1 + + def _iter_transformations(self): + lo = 0 + for transformation in self._transformations: + yield lo, transformation + lo += transformation.n_parameters() # TODO: Implement the following classes: -# class LogTransformation(Transformation): # class LogitTransformation(Transformation): -# class ComposedTransformation(Transformation): -# class ScaledTransformation(Transformation): From 7a12d6b866ea8934fddef903aa82b95b2cb0670d Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 21 Jun 2024 17:28:54 +0100 Subject: [PATCH 14/53] tests: add tests, remove redundant methods, add catch on x0==ground_truth --- pybop/transformations/__init__.py | 20 ++-- pybop/transformations/_transformations.py | 2 +- .../integration/test_optimisation_options.py | 2 + .../integration/test_spm_parameterisations.py | 2 + .../test_thevenin_parameterisation.py | 2 + tests/integration/test_transformation.py | 102 ++++++++++++++++++ tests/unit/test_transformations.py | 71 ++++++++++++ 7 files changed, 187 insertions(+), 14 deletions(-) create mode 100644 tests/integration/test_transformation.py create mode 100644 tests/unit/test_transformations.py diff --git a/pybop/transformations/__init__.py b/pybop/transformations/__init__.py index f1ad8cfc6..ab87fd93d 100644 --- a/pybop/transformations/__init__.py +++ b/pybop/transformations/__init__.py @@ -9,9 +9,9 @@ class Transformation(ABC): Abstract base class for transformations between two parameter spaces: the model parameter space and a search space. - If `trans` is an instance of a `Transformation` class, you can apply the + If `transform` is an instance of a `Transformation` class, you can apply the transformation of a parameter vector from the model space `p` to the search - space `q` using `q = trans.to_search(p)` and the inverse using `p = trans.to_model(q)`. + space `q` using `q = transform.to_search(p)` and the inverse using `p = transform.to_model(q)`. Based on pints.transformation method. @@ -21,14 +21,14 @@ class Transformation(ABC): http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.47.9023 .. [2] Kaare Brandt Petersen and Michael Syskind Pedersen. "The Matrix Cookbook." 2012. """ - - def convert_log_prior(self, log_prior): - """Returns a transformed log-prior class.""" - return TransformedLogPrior(log_prior, self) + # ---- To be implemented with Monte Carlo PR ------ # + # def convert_log_prior(self, log_prior): + # """Returns a transformed log-prior class.""" + # return TransformedLogPrior(log_prior, self) def convert_boundaries(self, boundaries): """Returns a transformed boundaries class.""" - return TransformedBoundaries(boundaries, self) + return PintsTransformedBoundaries(boundaries, self) def convert_covariance_matrix(self, covariance: np.ndarray, q: np.ndarray) -> np.ndarray: """ @@ -107,12 +107,6 @@ def is_elementwise(self) -> bool: """ raise NotImplementedError("is_elementwise method must be implemented if used.") -class TransformedBoundaries(PintsTransformedBoundaries): - """Transformed boundaries class inherited from Pints TransformedBoundaries.""" - def __init__(self, boundaries, transformation): - super().__init__(boundaries, transformation) - - # ---- To be implemented with Monte Carlo PR ------ # # class TransformedLogPDF(BaseCost): # """Transformed log-PDF class.""" diff --git a/pybop/transformations/_transformations.py b/pybop/transformations/_transformations.py index 32f38327c..8b9dd8b33 100644 --- a/pybop/transformations/_transformations.py +++ b/pybop/transformations/_transformations.py @@ -59,7 +59,7 @@ def __init__( n_parameters: int = 1, ): self._translate = translate - self._scale = np.asarray([scale]) + self._scale = np.asarray([float(scale)]) self._inverse_scale = np.reciprocal(self._scale) self._n_parameters = n_parameters diff --git a/tests/integration/test_optimisation_options.py b/tests/integration/test_optimisation_options.py index dcd942764..3103ef35a 100644 --- a/tests/integration/test_optimisation_options.py +++ b/tests/integration/test_optimisation_options.py @@ -104,6 +104,8 @@ def test_optimisation_f_guessed(self, f_guessed, spm_costs): assert initial_cost > final_cost else: assert initial_cost < final_cost + else: + raise ValueError("Initial value is the same as the ground truth value.") np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) def get_data(self, model, parameters, x, init_soc): diff --git a/tests/integration/test_spm_parameterisations.py b/tests/integration/test_spm_parameterisations.py index 9ae2b4215..60ef43810 100644 --- a/tests/integration/test_spm_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -122,6 +122,8 @@ def test_spm_optimisers(self, optimiser, spm_costs): assert initial_cost > final_cost else: assert initial_cost < final_cost + else: + raise ValueError("Initial value is the same as the ground truth value.") np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) @pytest.fixture diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py index 57bb06898..37d67b62c 100644 --- a/tests/integration/test_thevenin_parameterisation.py +++ b/tests/integration/test_thevenin_parameterisation.py @@ -90,6 +90,8 @@ def test_optimisers_on_simple_model(self, optimiser, cost): assert initial_cost > final_cost else: assert initial_cost < final_cost + else: + raise ValueError("Initial value is the same as the ground truth value.") np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) def get_data(self, model, parameters, x): diff --git a/tests/integration/test_transformation.py b/tests/integration/test_transformation.py new file mode 100644 index 000000000..45feff80a --- /dev/null +++ b/tests/integration/test_transformation.py @@ -0,0 +1,102 @@ +import numpy as np +import pytest + +import pybop + + +class TestTransformation: + """ + A class for integration tests of the transformation methods. + """ + + @pytest.fixture(autouse=True) + def setup(self): + self.ground_truth = np.array([0.5, 0.2]) + np.random.normal( + loc=0.0, scale=0.05, size=2 + ) + + @pytest.fixture + def model(self): + parameter_set = pybop.ParameterSet.pybamm("Chen2020") + return pybop.lithium_ion.SPMe(parameter_set=parameter_set) + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Uniform(0.4, 0.7), + bounds=[0.375, 0.725], + transformation=pybop.LogTransformation(), + ), + pybop.Parameter( + "Positive electrode conductivity [S.m-1]", + prior=pybop.Uniform(0.05, 0.45), + bounds=[0.04, 0.5], + transformation=pybop.LogTransformation(), + ), + ) + + @pytest.fixture(params=[0.4, 0.7]) + def init_soc(self, request): + return request.param + + def noise(self, sigma, values): + return np.random.normal(0, sigma, values) + + @pytest.fixture + def cost(self, model, parameters, init_soc): + # Form dataset + solution = self.get_data(model, parameters, self.ground_truth, init_soc) + dataset = pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Voltage [V]"].data + + self.noise(0.002, len(solution["Time [s]"].data)), + } + ) + + # Define the cost to optimise + problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) + return pybop.RootMeanSquaredError(problem) + + @pytest.mark.parametrize( + "optimiser", + [ + pybop.AdamW, + pybop.CMAES, + ], + ) + @pytest.mark.integration + def test_spm_optimisers(self, optimiser, cost): + x0 = cost.parameters.initial_value() + optim = optimiser( + cost=cost, + max_iterations=125, + ) + + initial_cost = optim.cost(x0) + x, final_cost = optim.run() + + # Assertions + if not np.allclose(x0, self.ground_truth, atol=1e-5): + assert initial_cost > final_cost + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + else: + raise ValueError("Initial value is the same as the ground truth value.") + + def get_data(self, model, parameters, x, init_soc): + model.parameters = parameters + experiment = pybop.Experiment( + [ + ( + "Discharge at 1C for 3 minutes (4 second period)", + "Charge at 1C for 3 minutes (4 second period)", + ), + ] + ) + sim = model.predict( + init_soc=init_soc, experiment=experiment, inputs=parameters.as_dict(x) + ) + return sim diff --git a/tests/unit/test_transformations.py b/tests/unit/test_transformations.py new file mode 100644 index 000000000..60dd318e9 --- /dev/null +++ b/tests/unit/test_transformations.py @@ -0,0 +1,71 @@ +import numpy as np +import pytest + +import pybop + + +class TestTransformations: + """ + A class to test the transformations. + """ + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "Identity", + transformation=pybop.IdentityTransformation(), + ), + pybop.Parameter( + "Scaled", + transformation=pybop.ScaledTransformation(scale=2.0, translate=1), + ), + pybop.Parameter( + "Log", + transformation=pybop.LogTransformation(), + ), + ) + + @pytest.mark.unit + def test_identity_transformation(self, parameters): + q = np.array([5.0]) + transformation = parameters["Identity"].transformation + assert np.array_equal(transformation.to_model(q), q) + assert np.array_equal(transformation.to_search(q), q) + assert transformation.log_jacobian_det(q) == 0.0 + + jac, jac_S1 = transformation.jacobian_S1(q) + assert np.array_equal(jac, np.eye(1)) + assert np.array_equal(jac_S1, np.zeros((1, 1, 1))) + + @pytest.mark.unit + def test_scaled_transformation(self, parameters): + q = np.array([2.5]) + transformation = parameters["Scaled"].transformation + p = transformation.to_model(q) + assert np.allclose(p, (q / 2.0) - 1.0) + + q_transformed = transformation.to_search(p) + assert np.allclose(q_transformed, q) + assert np.allclose( + transformation.log_jacobian_det(q), np.sum(np.log(np.abs(2.0))) + ) + + jac, jac_S1 = transformation.jacobian_S1(q) + assert np.array_equal(jac, np.diag([0.5])) + assert np.array_equal(jac_S1, np.zeros((1, 1, 1))) + + @pytest.mark.unit + def test_log_transformation(self, parameters): + q = np.array([10]) + transformation = parameters["Log"].transformation + p = transformation.to_model(q) + assert np.allclose(p, np.exp(q)) + + q_transformed = transformation.to_search(p) + assert np.allclose(q_transformed, q) + assert np.allclose(transformation.log_jacobian_det(q), -np.sum(np.log(q))) + + jac, jac_S1 = transformation.jacobian_S1(q) + assert np.array_equal(jac, np.diag(1 / q)) + assert np.array_equal(jac_S1, np.diag(-1 / q**2)) From 9a4b34bf6d3e6666b71efa9ed31e9106b3650e23 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Sat, 22 Jun 2024 11:37:35 +0100 Subject: [PATCH 15/53] fix: add _verify_inputs, increase coverage, add temp fix to GaussianLogLikelihood --- pybop/__init__.py | 4 +- pybop/costs/_likelihoods.py | 4 + .../__init__.py | 38 ++++---- .../_transformation.py} | 86 ++++++++++--------- tests/unit/test_likelihoods.py | 6 +- tests/unit/test_transformations.py | 64 +++++++++++++- 6 files changed, 139 insertions(+), 63 deletions(-) rename pybop/{transformations => transformation}/__init__.py (82%) rename pybop/{transformations/_transformations.py => transformation/_transformation.py} (82%) diff --git a/pybop/__init__.py b/pybop/__init__.py index 82d527b03..5755a4b3b 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -81,8 +81,8 @@ # # Transformation classes # -from .transformations import Transformation -from .transformations._transformations import ( +from .transformation import Transformation +from .transformation._transformation import ( IdentityTransformation, ScaledTransformation, LogTransformation, diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 67ad97ec2..615184bcd 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -1,5 +1,6 @@ import numpy as np +from pybop import IdentityTransformation from pybop.costs.base_cost import BaseCost @@ -124,6 +125,9 @@ def __init__(self, problem): super(GaussianLogLikelihood, self).__init__(problem) self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) self._dl = np.ones(self.n_parameters + self.n_outputs) + self.transformation.append( + IdentityTransformation() + ) # Temporary fix, ahead of #338 def _evaluate(self, x): """ diff --git a/pybop/transformations/__init__.py b/pybop/transformation/__init__.py similarity index 82% rename from pybop/transformations/__init__.py rename to pybop/transformation/__init__.py index ab87fd93d..20c7819fa 100644 --- a/pybop/transformations/__init__.py +++ b/pybop/transformation/__init__.py @@ -1,7 +1,6 @@ from abc import ABC, abstractmethod -from typing import Tuple, Union, Sequence +from typing import Tuple, Union, Sequence, List import numpy as np -from pints import TransformedBoundaries as PintsTransformedBoundaries class Transformation(ABC): @@ -26,17 +25,13 @@ class Transformation(ABC): # """Returns a transformed log-prior class.""" # return TransformedLogPrior(log_prior, self) - def convert_boundaries(self, boundaries): - """Returns a transformed boundaries class.""" - return PintsTransformedBoundaries(boundaries, self) - - def convert_covariance_matrix(self, covariance: np.ndarray, q: np.ndarray) -> np.ndarray: + def convert_covariance_matrix(self, cov: np.ndarray, q: np.ndarray) -> np.ndarray: """ Converts a covariance matrix `covariance` from the model space to the search space around a parameter vector `q` in the search space. """ jac_inv = np.linalg.pinv(self.jacobian(q)) - return jac_inv @ covariance @ jac_inv.T + return jac_inv @ cov @ jac_inv.T def convert_standard_deviation(self, std: Union[float, np.ndarray], q: np.ndarray) -> np.ndarray: """ @@ -67,22 +62,18 @@ def log_jacobian_det(self, q: np.ndarray) -> float: Returns the logarithm of the absolute value of the determinant of the Jacobian matrix at the parameter vector `q`. """ - return np.log(np.abs(np.linalg.det(self.jacobian(q)))) + raise NotImplementedError("log_jacobian_det method must be implemented if used.") def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: """ Computes the logarithm of the absolute value of the determinant of the Jacobian, and returns it along with its partial derivatives. """ - jacobian, hessian = self.jacobian_S1(q) - jacobian_inv = np.linalg.pinv(jacobian) - derivatives = np.array([np.trace(jacobian_inv @ djac) for djac in hessian]) - return self.log_jacobian_det(q), derivatives + raise NotImplementedError("log_jacobian_det_S1 method must be implemented if used.") - @abstractmethod - def n_parameters(self) -> int: - """Returns the dimension of the parameter space this transformation is defined over.""" - pass + @property + def n_parameters(self): + return self._n_parameters def to_model(self, q: np.ndarray) -> np.ndarray: """Transforms a parameter vector `q` from the search space to the model space.""" @@ -107,6 +98,19 @@ def is_elementwise(self) -> bool: """ raise NotImplementedError("is_elementwise method must be implemented if used.") + def _verify_input(self, input: Union[List[float], float, np.ndarray]) -> None: + """Set and validate the translate parameter.""" + if isinstance(input, (float, int)): + input = np.full(self._n_parameters, float(input)) + elif isinstance(input, (list, np.ndarray)): + if len(input) != self._n_parameters: + raise ValueError(f"Translate must have {self._n_parameters} elements") + input = np.asarray(input, dtype=float) + else: + raise TypeError("Translate must be a float, list, or numpy array") + + return input + # ---- To be implemented with Monte Carlo PR ------ # # class TransformedLogPDF(BaseCost): # """Transformed log-PDF class.""" diff --git a/pybop/transformations/_transformations.py b/pybop/transformation/_transformation.py similarity index 82% rename from pybop/transformations/_transformations.py rename to pybop/transformation/_transformation.py index 8b9dd8b33..37db5a3e7 100644 --- a/pybop/transformations/_transformations.py +++ b/pybop/transformation/_transformation.py @@ -36,10 +36,6 @@ def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: """See :meth:`Transformation.log_jacobian_det_S1()`.""" return self.log_jacobian_det(q), np.zeros(self._n_parameters) - def n_parameters(self) -> int: - """See :meth:`Transformation.n_parameters()`.""" - return self._n_parameters - def _transform(self, x: np.ndarray, method: str) -> np.ndarray: """See :meth:`Transformation._transform`.""" return np.asarray(x) @@ -58,10 +54,10 @@ def __init__( translate: Union[list, float, np.ndarray] = 0, n_parameters: int = 1, ): - self._translate = translate - self._scale = np.asarray([float(scale)]) - self._inverse_scale = np.reciprocal(self._scale) self._n_parameters = n_parameters + self._translate = self._verify_input(translate) + self._scale = self._verify_input(scale) + self._inverse_scale = np.reciprocal(self._scale) def is_elementwise(self) -> bool: """See :meth:`Transformation.is_elementwise()`.""" @@ -84,13 +80,9 @@ def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: """See :meth:`Transformation.log_jacobian_det_S1()`.""" return self.log_jacobian_det(q), np.zeros(self._n_parameters) - def n_parameters(self) -> int: - """See :meth:`Transformation.n_parameters()`.""" - return self._n_parameters - def _transform(self, x: np.ndarray, method: str) -> np.ndarray: """See :meth:`Transformation._transform`.""" - x = np.asarray(x) + x = self._verify_input(x) if method == "to_model": return x * self._inverse_scale - self._translate elif method == "to_search": @@ -129,14 +121,9 @@ def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: """See :meth:`Transformation.log_jacobian_det_S1()`.""" return self.log_jacobian_det(q), -1 / q - def n_parameters(self) -> int: - """See :meth:`Transformation.n_parameters()`.""" - return self._n_parameters - def _transform(self, x: np.ndarray, method: str) -> np.ndarray: """See :meth:`Transformation._transform`.""" - if isinstance(x, (int, float)): - x = np.asarray([x]) + x = self._verify_input(x) if method == "to_model": return np.exp(x) elif method == "to_search": @@ -156,13 +143,24 @@ class ComposedTransformation(Transformation): Extends pybop.Transformation. Based on pints.ComposedTransformation method. """ - def __init__(self, transformations: Transformation): - if not [*transformations]: + def __init__(self, transformations: List[Transformation]): + if not transformations: raise ValueError("Must have at least one sub-transformation.") - self._transformations: List[Transformation] = transformations - self._n_parameters: int = len(transformations) - self._is_elementwise: bool = all(t.is_elementwise() for t in transformations) - + self._transformations = [] + self._n_parameters = 0 + self._is_elementwise = True + for transformation in transformations: + self._append_transformation(transformation) + self._update_methods() + + def _append_transformation(self, transformation: Transformation): + if not isinstance(transformation, Transformation): + raise ValueError("The appended object must be a Transformation.") + self._transformations.append(transformation) + self._n_parameters += transformation.n_parameters + self._is_elementwise = self._is_elementwise and transformation.is_elementwise() + + def _update_methods(self): if self._is_elementwise: self._jacobian = self._elementwise_jacobian self._log_jacobian_det = self._elementwise_log_jacobian_det @@ -172,6 +170,16 @@ def __init__(self, transformations: Transformation): self._log_jacobian_det = self._general_log_jacobian_det self._log_jacobian_det_S1 = self._general_log_jacobian_det_S1 + def append(self, transformation: Transformation): + """ + Append a new transformation to the existing composition. + + Args: + transformation (Transformation): The transformation to append. + """ + self._append_transformation(transformation) + self._update_methods() + def is_elementwise(self) -> bool: return self._is_elementwise @@ -179,14 +187,14 @@ def jacobian(self, q: np.ndarray) -> np.ndarray: return self._jacobian(q) def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - q = np.asarray(q) + q = self._verify_input(q) output_S1 = np.zeros( (self._n_parameters, self._n_parameters, self._n_parameters) ) lo = 0 for transformation in self._transformations: - hi = lo + transformation.n_parameters() + hi = lo + transformation.n_parameters _, jac_S1 = transformation.jacobian_S1(q[lo:hi]) for i, jac_S1_i in enumerate(jac_S1): output_S1[lo + i, lo:hi, lo:hi] = jac_S1_i @@ -195,40 +203,38 @@ def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: return self.jacobian(q), output_S1 def log_jacobian_det(self, q: np.ndarray) -> float: + """See :meth:`Transformation.log_jacobian_det()`.""" return self._log_jacobian_det(q) def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: return self._log_jacobian_det_S1(q) def _transform(self, data: np.ndarray, method: str) -> np.ndarray: - data = np.asarray(data) + data = self._verify_input(data) output = np.zeros_like(data) lo = 0 for transformation in self._transformations: - hi = lo + transformation.n_parameters() + hi = lo + transformation.n_parameters output[lo:hi] = getattr(transformation, method)(data[lo:hi]) lo = hi return output - def n_parameters(self) -> int: - return self._n_parameters - def _elementwise_jacobian(self, q: np.ndarray) -> np.ndarray: - q = np.asarray(q) + q = self._verify_input(q) diag = np.zeros(self._n_parameters) lo = 0 for transformation in self._transformations: - hi = lo + transformation.n_parameters() + hi = lo + transformation.n_parameters diag[lo:hi] = np.diagonal(transformation.jacobian(q[lo:hi])) lo = hi return np.diag(diag) def _elementwise_log_jacobian_det(self, q: np.ndarray) -> float: - q = np.asarray(q) + q = self._verify_input(q) return sum( transformation.log_jacobian_det(q[lo : lo + transformation.n_parameters()]) for lo, transformation in self._iter_transformations() @@ -237,13 +243,13 @@ def _elementwise_log_jacobian_det(self, q: np.ndarray) -> float: def _elementwise_log_jacobian_det_S1( self, q: np.ndarray ) -> Tuple[float, np.ndarray]: - q = np.asarray(q) + q = self._verify_input(q) output = 0.0 output_S1 = np.zeros(self._n_parameters) lo = 0 for transformation in self._transformations: - hi = lo + transformation.n_parameters() + hi = lo + transformation.n_parameters j, j_S1 = transformation.log_jacobian_det_S1(q[lo:hi]) output += j output_S1[lo:hi] = j_S1 @@ -252,12 +258,12 @@ def _elementwise_log_jacobian_det_S1( return output, output_S1 def _general_jacobian(self, q: np.ndarray) -> np.ndarray: - q = np.asarray(q) + q = self._verify_input(q) jacobian_blocks = [] lo = 0 for transformation in self._transformations: - hi = lo + transformation.n_parameters() + hi = lo + transformation.n_parameters jacobian_blocks.append(transformation.jacobian(q[lo:hi])) lo = hi @@ -275,7 +281,7 @@ def _general_log_jacobian_det(self, q: np.ndarray) -> float: return np.log(np.abs(np.linalg.det(self.jacobian(q)))) def _general_log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: - q = np.asarray(q) + q = self._verify_input(q) jac, jac_S1 = self.jacobian_S1(q) out_S1 = np.zeros(self._n_parameters) @@ -288,7 +294,7 @@ def _iter_transformations(self): lo = 0 for transformation in self._transformations: yield lo, transformation - lo += transformation.n_parameters() + lo += transformation.n_parameters # TODO: Implement the following classes: diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index 41ee36673..cc55ca73d 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -86,7 +86,7 @@ def test_base_likelihood_call_raises_not_implemented_error( ): likelihood = pybop.BaseLikelihood(one_signal_problem) with pytest.raises(NotImplementedError): - likelihood(np.array([0.5, 0.5])) + likelihood(np.array([0.5])) @pytest.mark.unit def test_set_get_sigma(self, one_signal_problem): @@ -128,8 +128,8 @@ def test_gaussian_log_likelihood_known_sigma(self, problem_name, request): @pytest.mark.unit def test_gaussian_log_likelihood(self, one_signal_problem): likelihood = pybop.GaussianLogLikelihood(one_signal_problem) - result = likelihood(np.array([0.5, 0.5])) - grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5, 0.5])) + result = likelihood(np.array([0.8, 0.2])) + grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.8, 0.2])) assert isinstance(result, float) np.testing.assert_allclose(result, grad_result, atol=1e-5) assert np.all(grad_likelihood <= 0) diff --git a/tests/unit/test_transformations.py b/tests/unit/test_transformations.py index 60dd318e9..5b917ceae 100644 --- a/tests/unit/test_transformations.py +++ b/tests/unit/test_transformations.py @@ -1,10 +1,12 @@ +import inspect + import numpy as np import pytest import pybop -class TestTransformations: +class TestTransformation: """ A class to test the transformations. """ @@ -33,17 +35,25 @@ def test_identity_transformation(self, parameters): assert np.array_equal(transformation.to_model(q), q) assert np.array_equal(transformation.to_search(q), q) assert transformation.log_jacobian_det(q) == 0.0 + assert transformation.n_parameters == 1 jac, jac_S1 = transformation.jacobian_S1(q) assert np.array_equal(jac, np.eye(1)) assert np.array_equal(jac_S1, np.zeros((1, 1, 1))) + # Test covariance transformation + cov = np.array([[0.5]]) + q = np.array([5.0]) + cov_transformed = transformation.convert_covariance_matrix(cov, q) + assert np.array_equal(cov_transformed, cov) + @pytest.mark.unit def test_scaled_transformation(self, parameters): q = np.array([2.5]) transformation = parameters["Scaled"].transformation p = transformation.to_model(q) assert np.allclose(p, (q / 2.0) - 1.0) + assert transformation.n_parameters == 1 q_transformed = transformation.to_search(p) assert np.allclose(q_transformed, q) @@ -55,12 +65,18 @@ def test_scaled_transformation(self, parameters): assert np.array_equal(jac, np.diag([0.5])) assert np.array_equal(jac_S1, np.zeros((1, 1, 1))) + # Test covariance transformation + cov = np.array([[0.5]]) + cov_transformed = transformation.convert_covariance_matrix(cov, q) + assert np.array_equal(cov_transformed, cov * transformation._scale**2) + @pytest.mark.unit def test_log_transformation(self, parameters): q = np.array([10]) transformation = parameters["Log"].transformation p = transformation.to_model(q) assert np.allclose(p, np.exp(q)) + assert transformation.n_parameters == 1 q_transformed = transformation.to_search(p) assert np.allclose(q_transformed, q) @@ -69,3 +85,49 @@ def test_log_transformation(self, parameters): jac, jac_S1 = transformation.jacobian_S1(q) assert np.array_equal(jac, np.diag(1 / q)) assert np.array_equal(jac_S1, np.diag(-1 / q**2)) + + # Test covariance transformation + cov = np.array([[0.5]]) + cov_transformed = transformation.convert_covariance_matrix(cov, q) + assert np.array_equal(cov_transformed, cov * q**2) + + +class TestBaseTransformation: + """ + A class to test the abstract base transformation class. + """ + + @pytest.mark.unit + def test_abstract_base_transformation(self): + with pytest.raises(TypeError): + pybop.Transformation() + + @pytest.mark.unit + def test_abstract_methods(self): + abstract_methods = ["jacobian", "_transform"] + for method in abstract_methods: + assert hasattr(pybop.Transformation, method) + assert getattr(pybop.Transformation, method).__isabstractmethod__ + + @pytest.mark.unit + def test_concrete_methods(self): + concrete_methods = [ + "convert_covariance_matrix", + "convert_standard_deviation", + "log_jacobian_det", + "to_model", + "to_search", + ] + for method in concrete_methods: + assert hasattr(pybop.Transformation, method) + assert not inspect.isabstract(getattr(pybop.Transformation, method)) + + @pytest.mark.unit + def test_not_implemented_methods(self): + not_implemented_methods = [ + "jacobian_S1", + "log_jacobian_det_S1", + "is_elementwise", + ] + for method in not_implemented_methods: + assert hasattr(pybop.Transformation, method) From aab8995fc7bd6911af7ead9946f34f48ae3ba5ad Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Sat, 22 Jun 2024 19:16:25 +0100 Subject: [PATCH 16/53] docs: update docstrings --- pybop/transformation/_transformation.py | 208 +++++++++++++++++++++--- 1 file changed, 186 insertions(+), 22 deletions(-) diff --git a/pybop/transformation/_transformation.py b/pybop/transformation/_transformation.py index 37db5a3e7..7746ddc74 100644 --- a/pybop/transformation/_transformation.py +++ b/pybop/transformation/_transformation.py @@ -7,9 +7,29 @@ class IdentityTransformation(Transformation): """ - Identity transformation between two parameter spaces: the model parameter space and a search space. + This class implements a trivial transformation where the model parameter space + and the search space are identical. It extends the base Transformation class. - Based on pints.IdentityTransformation method. + The transformation is defined as: + - to_search: y = x + - to_model: x = y + + Key properties: + 1. Jacobian: Identity matrix + 2. Log determinant of Jacobian: Always 0 + 3. Elementwise: True (each output dimension depends only on the corresponding input dimension) + + Use cases: + 1. When no transformation is needed between spaces + 2. As a placeholder in composite transformations + 3. For testing and benchmarking other transformations + + Note: While this transformation doesn't change the parameters, it still provides + all the methods required by the Transformation interface, making it useful in + scenarios where a transformation object is expected but no actual transformation + is needed. + + Initially based on pints.IdentityTransformation method. """ def __init__(self, n_parameters: int = 1): @@ -43,21 +63,36 @@ def _transform(self, x: np.ndarray, method: str) -> np.ndarray: class ScaledTransformation(Transformation): """ - Scaled transformation between two parameter spaces: the model parameter space and a search space. + This class implements a linear transformation between the model parameter space + and a search space, using a coefficient (scale factor) and an intercept (offset). + It extends the base Transformation class. + + The transformation is defined as: + - to_search: y = coefficient * (x + intercept) + - to_model: x = y / coefficient - intercept + + Where: + - x is in the model parameter space + - y is in the search space + - coefficient is the scaling factor + - intercept is the offset + + This transformation is useful for scaling and shifting parameters to a more + suitable range for optimisation algorithms. - Based on pints.ScaledTransformation method. + Based on pints.ScaledTransformation class. """ def __init__( self, - scale: Union[list, float, np.ndarray], - translate: Union[list, float, np.ndarray] = 0, + coefficient: Union[list, float, np.ndarray], + intercept: Union[list, float, np.ndarray] = 0, n_parameters: int = 1, ): self._n_parameters = n_parameters - self._translate = self._verify_input(translate) - self._scale = self._verify_input(scale) - self._inverse_scale = np.reciprocal(self._scale) + self._intercept = self._verify_input(intercept) + self._coefficient = self._verify_input(coefficient) + self._inverse_coeff = np.reciprocal(self._coefficient) def is_elementwise(self) -> bool: """See :meth:`Transformation.is_elementwise()`.""" @@ -65,7 +100,7 @@ def is_elementwise(self) -> bool: def jacobian(self, q: np.ndarray) -> np.ndarray: """See :meth:`Transformation.jacobian()`.""" - return np.diag(self._inverse_scale) + return np.diag(self._inverse_coeff) def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """See :meth:`Transformation.jacobian_S1()`.""" @@ -74,7 +109,7 @@ def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: def log_jacobian_det(self, q: np.ndarray) -> float: """See :meth:`Transformation.log_jacobian_det()`.""" - return np.sum(np.log(np.abs(self._scale))) + return np.sum(np.log(np.abs(self._coefficient))) def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: """See :meth:`Transformation.log_jacobian_det_S1()`.""" @@ -84,18 +119,38 @@ def _transform(self, x: np.ndarray, method: str) -> np.ndarray: """See :meth:`Transformation._transform`.""" x = self._verify_input(x) if method == "to_model": - return x * self._inverse_scale - self._translate + return x * self._inverse_coeff - self._intercept elif method == "to_search": - return self._scale * (x + self._translate) + return self._coefficient * (x + self._intercept) else: raise ValueError(f"Unknown method: {method}") class LogTransformation(Transformation): """ - Log transformation between two parameter spaces: the model parameter space and a search space. + This class implements a logarithmic transformation between the model parameter space + and a search space. It extends the base Transformation class. - Based on pints.LogTransformation method. + The transformation is defined as: + - to_search: y = log(x) + - to_model: x = exp(y) + + Where: + - x is in the model parameter space (strictly positive) + - y is in the search space (can be any real number) + + This transformation is particularly useful for: + 1. Parameters that are strictly positive and may span several orders of magnitude. + 2. Converting multiplicative processes to additive ones in the search space. + 3. Ensuring positivity constraints without explicit bounds in optimisation. + + Note: Care should be taken when using this transformation, as it can introduce + bias in the parameter estimates if not accounted for properly in the likelihood + or cost function. Simply, E[log(x)] <= log(E[x]) as per to Jensen's inequality. + For more information, see Jensen's inequality: + https://en.wikipedia.org/w/index.php?title=Jensen%27s_inequality&oldid=1212437916#Probabilistic_form + + Initially based on pints.LogTransformation class. """ def __init__(self, n_parameters: int = 1): @@ -137,10 +192,14 @@ class ComposedTransformation(Transformation): N-dimensional Transformation composed of one or more other N_i-dimensional sub-transformations, where the sum of N_i equals N. + This class allows for the composition of multiple transformations, each potentially + operating on a different number of dimensions. The total dimensionality of the + composed transformation is the sum of the dimensionalities of its components. + The dimensionality of the individual transformations does not have to be the same, i.e., N_i != N_j is allowed. - Extends pybop.Transformation. Based on pints.ComposedTransformation method. + Extends pybop.Transformation. Initially based on pints.ComposedTransformation class. """ def __init__(self, transformations: List[Transformation]): @@ -154,6 +213,19 @@ def __init__(self, transformations: List[Transformation]): self._update_methods() def _append_transformation(self, transformation: Transformation): + """ + Append a transformation to the internal list of transformations. + + Parameters + ---------- + transformation : Transformation + Transformation to append. + + Raises + ------ + ValueError + If the appended object is not a Transformation. + """ if not isinstance(transformation, Transformation): raise ValueError("The appended object must be a Transformation.") self._transformations.append(transformation) @@ -161,6 +233,9 @@ def _append_transformation(self, transformation: Transformation): self._is_elementwise = self._is_elementwise and transformation.is_elementwise() def _update_methods(self): + """ + Update the internal methods based on whether the transformation is elementwise. + """ if self._is_elementwise: self._jacobian = self._elementwise_jacobian self._log_jacobian_det = self._elementwise_log_jacobian_det @@ -174,19 +249,24 @@ def append(self, transformation: Transformation): """ Append a new transformation to the existing composition. - Args: - transformation (Transformation): The transformation to append. + Parameters + ---------- + transformation : Transformation + The transformation to append. """ self._append_transformation(transformation) self._update_methods() def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" return self._is_elementwise def jacobian(self, q: np.ndarray) -> np.ndarray: + """See :meth:`Transformation.jacobian()`.""" return self._jacobian(q) def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" q = self._verify_input(q) output_S1 = np.zeros( (self._n_parameters, self._n_parameters, self._n_parameters) @@ -207,9 +287,11 @@ def log_jacobian_det(self, q: np.ndarray) -> float: return self._log_jacobian_det(q) def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + """See :meth:`Transformation.log_jacobian_det_S1()`.""" return self._log_jacobian_det_S1(q) def _transform(self, data: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" data = self._verify_input(data) output = np.zeros_like(data) lo = 0 @@ -222,6 +304,19 @@ def _transform(self, data: np.ndarray, method: str) -> np.ndarray: return output def _elementwise_jacobian(self, q: np.ndarray) -> np.ndarray: + """ + Compute the elementwise Jacobian of the composed transformation. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + np.ndarray + Diagonal matrix representing the elementwise Jacobian. + """ q = self._verify_input(q) diag = np.zeros(self._n_parameters) lo = 0 @@ -234,6 +329,19 @@ def _elementwise_jacobian(self, q: np.ndarray) -> np.ndarray: return np.diag(diag) def _elementwise_log_jacobian_det(self, q: np.ndarray) -> float: + """ + Compute the elementwise logarithm of the determinant of the Jacobian. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + float + Sum of log determinants of individual transformations. + """ q = self._verify_input(q) return sum( transformation.log_jacobian_det(q[lo : lo + transformation.n_parameters()]) @@ -243,6 +351,19 @@ def _elementwise_log_jacobian_det(self, q: np.ndarray) -> float: def _elementwise_log_jacobian_det_S1( self, q: np.ndarray ) -> Tuple[float, np.ndarray]: + """ + Compute the elementwise logarithm of the determinant of the Jacobian and its first-order sensitivities. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + Tuple[float, np.ndarray] + Tuple of sum of log determinants and concatenated first-order sensitivities. + """ q = self._verify_input(q) output = 0.0 output_S1 = np.zeros(self._n_parameters) @@ -258,6 +379,19 @@ def _elementwise_log_jacobian_det_S1( return output, output_S1 def _general_jacobian(self, q: np.ndarray) -> np.ndarray: + """ + Compute the general Jacobian of the composed transformation. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + np.ndarray + Block diagonal matrix of the Jacobian of each transformation. + """ q = self._verify_input(q) jacobian_blocks = [] lo = 0 @@ -278,9 +412,35 @@ def _general_jacobian(self, q: np.ndarray) -> np.ndarray: ) def _general_log_jacobian_det(self, q: np.ndarray) -> float: + """ + Compute the general logarithm of the determinant of the Jacobian. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + float + Logarithm of the absolute value of the determinant of the Jacobian. + """ return np.log(np.abs(np.linalg.det(self.jacobian(q)))) def _general_log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + """ + Compute the general logarithm of the determinant of the Jacobian and its first-order sensitivities. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + Tuple[float, np.ndarray] + Tuple of log determinant and its first-order sensitivities. + """ q = self._verify_input(q) jac, jac_S1 = self.jacobian_S1(q) out_S1 = np.zeros(self._n_parameters) @@ -291,11 +451,15 @@ def _general_log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray return self.log_jacobian_det(q), out_S1 def _iter_transformations(self): + """ + Iterate over the transformations in the composition. + + Yields + ------ + Tuple[int, Transformation] + Tuple of starting index and transformation object for each sub-transformation. + """ lo = 0 for transformation in self._transformations: yield lo, transformation lo += transformation.n_parameters - - -# TODO: Implement the following classes: -# class LogitTransformation(Transformation): From feae89fd58505cd6d3f0abf26237ca9fccc1518a Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Sun, 23 Jun 2024 17:52:20 +0100 Subject: [PATCH 17/53] fix: add catch for transformation == None, updt tests for arg rename --- pybop/costs/_likelihoods.py | 7 ++++--- pybop/costs/base_cost.py | 25 ++++++++++++++++--------- tests/unit/test_transformations.py | 4 ++-- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 615184bcd..7aa027e28 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -125,9 +125,10 @@ def __init__(self, problem): super(GaussianLogLikelihood, self).__init__(problem) self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) self._dl = np.ones(self.n_parameters + self.n_outputs) - self.transformation.append( - IdentityTransformation() - ) # Temporary fix, ahead of #338 + if self.transformation: + self.transformation.append( + IdentityTransformation() + ) # Temporary fix, ahead of #338 def _evaluate(self, x): """ diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 1d45b04b2..525ab9f52 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -34,17 +34,20 @@ def __init__(self, problem=None): self.x0 = self.problem.x0 self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal + self.transformation = self.construct_transformation() - # Construct ComposedTransformation from list of transformations - self.transformations = [ - t if t is not None else IdentityTransformation() - for t in self.parameters.get_transformations() - ] - self.transformation = ComposedTransformation(self.transformations) + def construct_transformation(self): + """ + Create a ComposedTransformation object from the individual parameters transformations. + """ + transformations = self.parameters.get_transformations() + if not transformations or all(t is None for t in transformations): + return None - @property - def n_parameters(self): - return len(self.parameters) + valid_transformations = [ + t if t is not None else IdentityTransformation() for t in transformations + ] + return ComposedTransformation(valid_transformations) def __call__(self, x): """ @@ -161,3 +164,7 @@ def _evaluateS1(self, x): If the method has not been implemented by the subclass. """ raise NotImplementedError + + @property + def n_parameters(self): + return len(self.parameters) diff --git a/tests/unit/test_transformations.py b/tests/unit/test_transformations.py index 5b917ceae..fe4f8898f 100644 --- a/tests/unit/test_transformations.py +++ b/tests/unit/test_transformations.py @@ -20,7 +20,7 @@ def parameters(self): ), pybop.Parameter( "Scaled", - transformation=pybop.ScaledTransformation(scale=2.0, translate=1), + transformation=pybop.ScaledTransformation(coefficient=2.0, intercept=1), ), pybop.Parameter( "Log", @@ -68,7 +68,7 @@ def test_scaled_transformation(self, parameters): # Test covariance transformation cov = np.array([[0.5]]) cov_transformed = transformation.convert_covariance_matrix(cov, q) - assert np.array_equal(cov_transformed, cov * transformation._scale**2) + assert np.array_equal(cov_transformed, cov * transformation._coefficient**2) @pytest.mark.unit def test_log_transformation(self, parameters): From 6c697efa3999b5823ebd33a2d3bffd7867cd113b Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 24 Jun 2024 11:20:22 +0100 Subject: [PATCH 18/53] fix: old args for ScaledTransformation --- examples/scripts/spm_CMAES.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py index ff4e0f3af..1fac5d4fd 100644 --- a/examples/scripts/spm_CMAES.py +++ b/examples/scripts/spm_CMAES.py @@ -13,7 +13,7 @@ prior=pybop.Gaussian(6e-06, 0.1e-6), bounds=[1e-6, 9e-6], true_value=parameter_set["Negative particle radius [m]"], - transformation=pybop.ScaledTransformation(scale=2.0), + transformation=pybop.ScaledTransformation(coefficient=2.0), ), pybop.Parameter( "Positive particle radius [m]", From 4e1f8454f960b8464a4349478717b29ca6374b23 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 3 Jul 2024 09:51:32 +0100 Subject: [PATCH 19/53] tests: add ComposedTransformation unit tests, increase coverage --- tests/unit/test_transformations.py | 31 +++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_transformations.py b/tests/unit/test_transformations.py index fe4f8898f..953fee229 100644 --- a/tests/unit/test_transformations.py +++ b/tests/unit/test_transformations.py @@ -30,12 +30,13 @@ def parameters(self): @pytest.mark.unit def test_identity_transformation(self, parameters): - q = np.array([5.0]) + q = np.asarray([5.0]) transformation = parameters["Identity"].transformation assert np.array_equal(transformation.to_model(q), q) assert np.array_equal(transformation.to_search(q), q) assert transformation.log_jacobian_det(q) == 0.0 assert transformation.n_parameters == 1 + assert transformation.is_elementwise() jac, jac_S1 = transformation.jacobian_S1(q) assert np.array_equal(jac, np.eye(1)) @@ -49,11 +50,12 @@ def test_identity_transformation(self, parameters): @pytest.mark.unit def test_scaled_transformation(self, parameters): - q = np.array([2.5]) + q = np.asarray([2.5]) transformation = parameters["Scaled"].transformation p = transformation.to_model(q) assert np.allclose(p, (q / 2.0) - 1.0) assert transformation.n_parameters == 1 + assert transformation.is_elementwise() q_transformed = transformation.to_search(p) assert np.allclose(q_transformed, q) @@ -72,7 +74,7 @@ def test_scaled_transformation(self, parameters): @pytest.mark.unit def test_log_transformation(self, parameters): - q = np.array([10]) + q = np.asarray([10]) transformation = parameters["Log"].transformation p = transformation.to_model(q) assert np.allclose(p, np.exp(q)) @@ -91,6 +93,29 @@ def test_log_transformation(self, parameters): cov_transformed = transformation.convert_covariance_matrix(cov, q) assert np.array_equal(cov_transformed, cov * q**2) + @pytest.mark.unit + def test_composed_transformation(self, parameters): + q = np.asarray([5.0, 2.5]) + transformation = pybop.ComposedTransformation( + [parameters["Identity"].transformation, parameters["Scaled"].transformation] + ) + p = transformation.to_model(q) + np.testing.assert_allclose(p, np.asarray([5.0, ((q[1] / 2.0) - 1.0)])) + jac = transformation.jacobian(q) + jac_S1 = transformation.jacobian_S1(q) + np.testing.assert_allclose(jac, np.asarray([[1, 0], [0, 0.5]])) + np.testing.assert_allclose(jac_S1[0], jac) + np.testing.assert_allclose( + jac_S1[1], np.asarray([[[0, 0], [0, 0]], [[0, 0], [0, 0]]]) + ) + + transformation.append(parameters["Log"].transformation) + q = np.asarray([5.0, 2.5, 10]) + p = transformation.to_model(q) + np.testing.assert_allclose( + p, np.asarray([5.0, ((q[1] / 2.0) - 1.0), np.exp(q[2])]) + ) + class TestBaseTransformation: """ From be1c5662ec9d58d314ddcbfe21738d6ff654dc82 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 4 Jul 2024 21:42:35 +0100 Subject: [PATCH 20/53] Update spm_weighted_cost.py --- examples/scripts/spm_weighted_cost.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py index 14ba213d8..557434613 100644 --- a/examples/scripts/spm_weighted_cost.py +++ b/examples/scripts/spm_weighted_cost.py @@ -52,7 +52,7 @@ print("Estimated parameters:", x) # Plot the timeseries output - pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") + pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) From f608aa722b078ae8ae6ec3c930a96db2ab41a083 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 5 Jul 2024 11:06:05 +0100 Subject: [PATCH 21/53] Update evaluate and _evaluate --- pybop/costs/base_cost.py | 66 +++++++++++++++++++++++++++--------- pybop/costs/fitting_costs.py | 8 ++++- tests/unit/test_cost.py | 27 ++++++++++----- 3 files changed, 76 insertions(+), 25 deletions(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 4fb0fd010..7fae9ee9c 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -208,46 +208,80 @@ def __init__(self, cost_list, weights=None): else: super(WeightedCost, self).__init__() self._fixed_problem = False + for cost in self.cost_list: + self.parameters.join(cost.parameters) - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Calculate the weighted cost for a given set of parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The weighted cost value. """ e = np.empty_like(self.cost_list) - if not self._different_problems: - current_prediction = self.problem.evaluate(x) + if not self._fixed_problem and self._different_problems: + self.parameters.update(values=list(inputs.values())) + elif not self._fixed_problem: + self._current_prediction = self.problem.evaluate(inputs) for i, cost in enumerate(self.cost_list): - if self._different_problems: - cost._current_prediction = cost.problem.evaluate(x) + if not self._fixed_problem and self._different_problems: + inputs = cost.parameters.as_dict() + cost._current_prediction = cost.problem.evaluate(inputs) else: - cost._current_prediction = current_prediction - e[i] = cost._evaluate(x, grad) + cost._current_prediction = self._current_prediction + e[i] = cost._evaluate(inputs, grad) return np.dot(e, self.weights) - def _evaluateS1(self, x): + def _evaluateS1(self, inputs: Inputs): """ - Compute the cost and its gradient with respect to the parameters. + Compute the weighted cost and its gradient with respect to the parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost and gradient. + + Returns + ------- + tuple + A tuple containing the cost and the gradient. The cost is a float, + and the gradient is an array-like of the same length as `x`. """ e = np.empty_like(self.cost_list) de = np.empty((len(self.parameters), len(self.cost_list))) - if not self._different_problems: - current_prediction, current_sensitivities = self.problem.evaluateS1(x) + if not self._fixed_problem and self._different_problems: + self.parameters.update(values=list(inputs.values())) + elif not self._fixed_problem: + self._current_prediction, self._current_sensitivities = ( + self.problem.evaluateS1(inputs) + ) for i, cost in enumerate(self.cost_list): - if self._different_problems: + if not self._fixed_problem and self._different_problems: + inputs = cost.parameters.as_dict() cost._current_prediction, cost._current_sensitivities = ( - cost.problem.evaluateS1(x) + cost.problem.evaluateS1(inputs) ) else: cost._current_prediction, cost._current_sensitivities = ( - current_prediction, - current_sensitivities, + self._current_prediction, + self._current_sensitivities, ) - e[i], de[:, i] = cost._evaluateS1(x) + e[i], de[:, i] = cost._evaluateS1(inputs) e = np.dot(e, self.weights) de = np.dot(de, self.weights) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index d907c06ff..08eef2390 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -275,7 +275,7 @@ def _evaluate(self, inputs: Inputs, grad=None): ) return -log_likelihood - def evaluateS1(self, inputs: Inputs): + def _evaluateS1(self, inputs: Inputs): """ Compute the cost and its gradient with respect to the parameters. @@ -347,6 +347,9 @@ def _evaluate(self, inputs: Inputs, grad=None): float The maximum a posteriori cost. """ + if self._fixed_problem: + self.likelihood._current_prediction = self._current_prediction + log_likelihood = self.likelihood._evaluate(inputs) log_prior = sum( self.parameters[key].prior.logpdf(value) for key, value in inputs.items() @@ -376,6 +379,9 @@ def _evaluateS1(self, inputs: Inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ + if self._fixed_problem: + self.likelihood._current_prediction = self._current_prediction + log_likelihood, dl = self.likelihood._evaluateS1(inputs) log_prior = sum( self.parameters[key].prior.logpdf(inputs[key]) for key in inputs.keys() diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 990682eb9..cc45deed6 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -285,24 +285,35 @@ def test_weighted_cost(self, problem): weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1]) # Test with and without different problems - weighted_cost_2 = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) + weight = 100 + weighted_cost_2 = pybop.WeightedCost( + cost_list=[cost1, cost2], weights=[1, weight] + ) assert weighted_cost_2._different_problems is False + assert weighted_cost_2._fixed_problem is True assert weighted_cost_2.problem is problem assert weighted_cost_2([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost_2.evaluate([0.6]), + cost1([0.6]) + weight * cost2([0.6]), + atol=1e-5, + ) cost3 = pybop.RootMeanSquaredError(copy(problem)) - weighted_cost_3 = pybop.WeightedCost(cost_list=[cost1, cost3], weights=[1, 100]) + weighted_cost_3 = pybop.WeightedCost( + cost_list=[cost1, cost3], weights=[1, weight] + ) assert weighted_cost_3._different_problems is True + assert weighted_cost_3._fixed_problem is False assert weighted_cost_3.problem is None assert weighted_cost_3([0.5]) >= 0 - np.testing.assert_allclose( - weighted_cost_2._evaluate([0.5]), - weighted_cost_3._evaluate([0.5]), + weighted_cost_3.evaluate([0.6]), + cost1([0.6]) + weight * cost3([0.6]), atol=1e-5, ) - weighted_cost_3.parameters = problem.parameters - errors_2, sensitivities_2 = weighted_cost_2._evaluateS1([0.5]) - errors_3, sensitivities_3 = weighted_cost_3._evaluateS1([0.5]) + + errors_2, sensitivities_2 = weighted_cost_2.evaluateS1([0.5]) + errors_3, sensitivities_3 = weighted_cost_3.evaluateS1([0.5]) np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) From 68b763d2e92f8dd7d0269d811cd7309d72dc3912 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 5 Jul 2024 11:23:12 +0100 Subject: [PATCH 22/53] Pass current_sensitivities to MAP --- pybop/costs/fitting_costs.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 08eef2390..8634e16f3 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -380,7 +380,10 @@ def _evaluateS1(self, inputs: Inputs): If an error occurs during the calculation of the cost or gradient. """ if self._fixed_problem: - self.likelihood._current_prediction = self._current_prediction + ( + self.likelihood._current_prediction, + self.likelihood._current_sensitivities, + ) = self._current_prediction, self._current_sensitivities log_likelihood, dl = self.likelihood._evaluateS1(inputs) log_prior = sum( From 8c2632dfd0b636488d2e6c8eac55002798914e08 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 5 Jul 2024 12:10:52 +0100 Subject: [PATCH 23/53] Add test_weighted_design_cost --- pybop/costs/base_cost.py | 6 ++++-- tests/unit/test_cost.py | 43 +++++++++++++++++++++++++--------------- 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 7fae9ee9c..b285ef261 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -198,13 +198,15 @@ def __init__(self, cost_list, weights=None): # Check if all costs depend on the same problem for cost in self.cost_list: - if hasattr(cost, "problem") and ( - not cost._fixed_problem or cost.problem is not self.cost_list[0].problem + if ( + hasattr(cost, "problem") + and cost.problem is not self.cost_list[0].problem ): self._different_problems = True if not self._different_problems: super(WeightedCost, self).__init__(self.cost_list[0].problem) + self._fixed_problem = self.cost_list[0]._fixed_problem else: super(WeightedCost, self).__init__() self._fixed_problem = False diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index cc45deed6..0db06b05e 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -200,6 +200,12 @@ def test_costs(self, cost): # Test treatment of simulations that terminated early # by variation of the cut-off voltage. + @pytest.fixture + def design_problem(self, model, parameters, experiment, signal): + return pybop.DesignProblem( + model, parameters, experiment, signal=signal, init_soc=0.5 + ) + @pytest.mark.parametrize( "cost_class", [ @@ -209,21 +215,9 @@ def test_costs(self, cost): ], ) @pytest.mark.unit - def test_design_costs( - self, - cost_class, - model, - parameters, - experiment, - signal, - ): - # Construct Problem - problem = pybop.DesignProblem( - model, parameters, experiment, signal=signal, init_soc=0.5 - ) - + def test_design_costs(self, cost_class, design_problem): # Construct Cost - cost = cost_class(problem) + cost = cost_class(design_problem) if cost_class in [pybop.DesignCost]: with pytest.raises(NotImplementedError): @@ -249,11 +243,11 @@ def test_design_costs( cost(["StringInputShouldNotWork"]) # Compute after updating nominal capacity - cost = cost_class(problem, update_capacity=True) + cost = cost_class(design_problem, update_capacity=True) cost([0.4]) @pytest.mark.unit - def test_weighted_cost(self, problem): + def test_weighted_fitting_cost(self, problem): cost1 = pybop.SumSquaredError(problem) cost2 = pybop.RootMeanSquaredError(problem) @@ -317,3 +311,20 @@ def test_weighted_cost(self, problem): errors_3, sensitivities_3 = weighted_cost_3.evaluateS1([0.5]) np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) + + @pytest.mark.unit + def test_weighted_design_cost(self, design_problem): + cost1 = pybop.GravimetricEnergyDensity(design_problem) + cost2 = pybop.RootMeanSquaredError(design_problem) + + # Test with and without weights + weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2]) + assert weighted_cost._different_problems is False + assert weighted_cost._fixed_problem is False + assert weighted_cost.problem is design_problem + assert weighted_cost([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost.evaluate([0.6]), + cost1([0.6]) + cost2([0.6]), + atol=1e-5, + ) From c189d7a2ccc3ae6ab3c847ecd6c209e797d2b963 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 5 Jul 2024 12:35:53 +0000 Subject: [PATCH 24/53] style: pre-commit fixes --- pybop/costs/_likelihoods.py | 43 ++++++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index d05e0e9d4..696ae01db 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -45,7 +45,8 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo Evaluates the Gaussian log-likelihood for the given parameters with known sigma. """ if any( - len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) + for key in self.signal ): return -np.inf # prediction length doesn't match target @@ -53,7 +54,10 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo [ np.sum( self._offset - + self._multip * np.sum((self._target[signal] - self._current_prediction[signal]) ** 2.0) + + self._multip + * np.sum( + (self._target[signal] - self._current_prediction[signal]) ** 2.0 + ) ) for signal in self.signal ] @@ -66,14 +70,22 @@ def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: Calculates the log-likelihood and gradient. """ if any( - len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) + for key in self.signal ): return -np.inf, -self._dl likelihood = self._evaluate(inputs) - r = np.asarray([self._target[signal] - self._current_prediction[signal] for signal in self.signal]) - dl = np.sum((np.sum((r * self._current_sensitivities.T), axis=2) / self.sigma2), axis=1) + r = np.asarray( + [ + self._target[signal] - self._current_prediction[signal] + for signal in self.signal + ] + ) + dl = np.sum( + (np.sum((r * self._current_sensitivities.T), axis=2) / self.sigma2), axis=1 + ) return likelihood, dl @@ -193,7 +205,8 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo return -np.inf if any( - len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) + for key in self.signal ): return -np.inf # prediction length doesn't match target @@ -202,7 +215,9 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo np.sum( self._logpi - self.n_time_data * np.log(sigma) - - np.sum((self._target[signal] - self._current_prediction[signal]) ** 2.0) + - np.sum( + (self._target[signal] - self._current_prediction[signal]) ** 2.0 + ) / (2.0 * sigma**2.0) ) for signal in self.signal @@ -232,14 +247,22 @@ def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: return -np.inf, -self._dl if any( - len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) + for key in self.signal ): return -np.inf, -self._dl likelihood = self._evaluate(inputs) - r = np.asarray([self._target[signal] - self._current_prediction[signal] for signal in self.signal]) - dl = np.sum((np.sum((r * self._current_sensitivities.T), axis=2) / (sigma**2.0)), axis=1) + r = np.asarray( + [ + self._target[signal] - self._current_prediction[signal] + for signal in self.signal + ] + ) + dl = np.sum( + (np.sum((r * self._current_sensitivities.T), axis=2) / (sigma**2.0)), axis=1 + ) dsigma = ( -self.n_time_data / sigma + np.sum(r**2.0, axis=1) / (sigma**3.0) ) / self._dsigma_scale From 2b1ae4c6af54fc719a3c3dabea9d3594663bbb24 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 5 Jul 2024 14:08:51 +0100 Subject: [PATCH 25/53] Add evaluate back into GaussianLogLikelihood --- pybop/costs/_likelihoods.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 696ae01db..8b867d4eb 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -204,6 +204,9 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo if np.any(sigma <= 0): return -np.inf + self._current_prediction = self.problem.evaluate( + self.problem.parameters.as_dict() + ) if any( len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal @@ -246,6 +249,9 @@ def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: if np.any(sigma <= 0): return -np.inf, -self._dl + self._current_prediction, self._current_sensitivities = self.problem.evaluateS1( + self.problem.parameters.as_dict() + ) if any( len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal From d12173d2b757e3595b6b212550ca2b55b03fe49b Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Tue, 9 Jul 2024 17:36:23 +0100 Subject: [PATCH 26/53] fix: leftover merge items --- pybop/costs/_likelihoods.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index ff15a79a0..fce0608c8 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -173,7 +173,7 @@ def dsigma_scale(self, new_value): raise ValueError("dsigma_scale must be non-negative") self._dsigma_scale = new_value - def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> float: + def _evaluate(self, inputs: Inputs) -> float: """ Evaluates the Gaussian log-likelihood for the given parameters. @@ -293,9 +293,6 @@ def _evaluate(self, inputs: Inputs) -> float: ---------- inputs : Inputs The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- From 87dae1c91965db05e05aac20ab8ddc13b07723f7 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 10 Jul 2024 11:13:28 +0100 Subject: [PATCH 27/53] tests: increase coverage, add condition on prior bounds creation, bugfix plotly bounds method --- pybop/parameters/parameter.py | 9 +- pybop/transformation/__init__.py | 31 ++++--- pybop/transformation/_transformation.py | 9 +- tests/unit/test_transformations.py | 111 ++++++++++++++++++++++-- 4 files changed, 131 insertions(+), 29 deletions(-) diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index c82b67cc1..7fe793ff0 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -172,16 +172,17 @@ def set_bounds(self, bounds=None, boundary_multiplier=6): else: self.lower_bound = bounds[0] self.upper_bound = bounds[1] - bounds = [self.lower_bound, self.upper_bound] elif self.prior is not None: self.applied_prior_bounds = True self.lower_bound = self.prior.mean - boundary_multiplier * self.prior.sigma self.upper_bound = self.prior.mean + boundary_multiplier * self.prior.sigma - bounds = [self.lower_bound, self.upper_bound] print("Default bounds applied based on prior distribution.") + else: + self.bounds = None + return - self.bounds = bounds + self.bounds = [self.lower_bound, self.upper_bound] def get_initial_value(self) -> float: """ @@ -467,7 +468,7 @@ def get_bounds_for_plotly(self): UserWarning, stacklevel=2, ) - elif param.bounds is not None: + if param.bounds is not None: bounds[i] = param.bounds else: raise ValueError("All parameters require bounds for plotting.") diff --git a/pybop/transformation/__init__.py b/pybop/transformation/__init__.py index e96e9fea4..08ba5e17d 100644 --- a/pybop/transformation/__init__.py +++ b/pybop/transformation/__init__.py @@ -47,7 +47,6 @@ def convert_standard_deviation(self, std: Union[float, np.ndarray], q: np.ndarra @abstractmethod def jacobian(self, q: np.ndarray) -> np.ndarray: """Returns the Jacobian matrix of the transformation at the parameter vector `q`.""" - pass def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, Sequence[np.ndarray]]: """ @@ -89,7 +88,6 @@ def _transform(self, x: np.ndarray, method: str) -> np.ndarray: Transforms a parameter vector `x` from the search space to the model space if `method` is "to_model", or from the model space to the search space if `method` is "to_search". """ - pass def is_elementwise(self) -> bool: """ @@ -98,22 +96,23 @@ def is_elementwise(self) -> bool: """ raise NotImplementedError("is_elementwise method must be implemented if used.") - def _verify_input(self, input: Union[List[float], float, np.ndarray]) -> None: - """Set and validate the translate parameter.""" + def _verify_input(self, input: Union[float, int, List[float], np.ndarray, dict[str, float]]) -> np.ndarray: + """Set and validate the transformation parameter.""" if isinstance(input, (float, int)): - input = np.full(self._n_parameters, float(input)) + return np.full(self._n_parameters, float(input)) + if isinstance(input, dict): - if len(input) != self._n_parameters: - raise ValueError(f"Translate must have {self._n_parameters} elements") - input = np.asarray([k for k in input.values()], dtype=float) - elif isinstance(input, (list, np.ndarray)): - if len(input) != self._n_parameters: - raise ValueError(f"Translate must have {self._n_parameters} elements") - input = np.asarray(input, dtype=float) - else: - raise TypeError("Translate must be a float, list, or numpy array") - - return input + input = list(input.values()) + + try: + input_array = np.asarray(input, dtype=float) + except (ValueError, TypeError): + raise TypeError("Transform must be a float, int, list, numpy array, or dictionary") + + if input_array.size != self._n_parameters: + raise ValueError(f"Transform must have {self._n_parameters} elements") + + return input_array # ---- To be implemented with Monte Carlo PR ------ # # class TransformedLogPDF(BaseCost): diff --git a/pybop/transformation/_transformation.py b/pybop/transformation/_transformation.py index 7746ddc74..6af572600 100644 --- a/pybop/transformation/_transformation.py +++ b/pybop/transformation/_transformation.py @@ -227,15 +227,18 @@ def _append_transformation(self, transformation: Transformation): If the appended object is not a Transformation. """ if not isinstance(transformation, Transformation): - raise ValueError("The appended object must be a Transformation.") + raise TypeError("The appended object must be a Transformation.") self._transformations.append(transformation) self._n_parameters += transformation.n_parameters self._is_elementwise = self._is_elementwise and transformation.is_elementwise() - def _update_methods(self): + def _update_methods(self, elementwise: bool = None): """ Update the internal methods based on whether the transformation is elementwise. """ + if elementwise is not None: + self._is_elementwise = elementwise + if self._is_elementwise: self._jacobian = self._elementwise_jacobian self._log_jacobian_det = self._elementwise_log_jacobian_det @@ -344,7 +347,7 @@ def _elementwise_log_jacobian_det(self, q: np.ndarray) -> float: """ q = self._verify_input(q) return sum( - transformation.log_jacobian_det(q[lo : lo + transformation.n_parameters()]) + transformation.log_jacobian_det(q[lo : lo + transformation.n_parameters]) for lo, transformation in self._iter_transformations() ) diff --git a/tests/unit/test_transformations.py b/tests/unit/test_transformations.py index 953fee229..9131e622c 100644 --- a/tests/unit/test_transformations.py +++ b/tests/unit/test_transformations.py @@ -35,6 +35,7 @@ def test_identity_transformation(self, parameters): assert np.array_equal(transformation.to_model(q), q) assert np.array_equal(transformation.to_search(q), q) assert transformation.log_jacobian_det(q) == 0.0 + assert transformation.log_jacobian_det_S1(q) == (0.0, np.zeros(1)) assert transformation.n_parameters == 1 assert transformation.is_elementwise() @@ -62,6 +63,9 @@ def test_scaled_transformation(self, parameters): assert np.allclose( transformation.log_jacobian_det(q), np.sum(np.log(np.abs(2.0))) ) + log_jac_det_S1 = transformation.log_jacobian_det_S1(q) + assert log_jac_det_S1[0] == np.sum(np.log(np.abs(2.0))) + assert log_jac_det_S1[1] == np.zeros(1) jac, jac_S1 = transformation.jacobian_S1(q) assert np.array_equal(jac, np.diag([0.5])) @@ -72,6 +76,10 @@ def test_scaled_transformation(self, parameters): cov_transformed = transformation.convert_covariance_matrix(cov, q) assert np.array_equal(cov_transformed, cov * transformation._coefficient**2) + # Test incorrect transform + with pytest.raises(ValueError, match="Unknown method:"): + transformation._transform(q, "bad-string") + @pytest.mark.unit def test_log_transformation(self, parameters): q = np.asarray([10]) @@ -84,6 +92,10 @@ def test_log_transformation(self, parameters): assert np.allclose(q_transformed, q) assert np.allclose(transformation.log_jacobian_det(q), -np.sum(np.log(q))) + log_jac_det_S1 = transformation.log_jacobian_det_S1(q) + assert log_jac_det_S1[0] == -np.sum(np.log(q)) + assert log_jac_det_S1[1] == -1 / q + jac, jac_S1 = transformation.jacobian_S1(q) assert np.array_equal(jac, np.diag(1 / q)) assert np.array_equal(jac_S1, np.diag(-1 / q**2)) @@ -93,12 +105,38 @@ def test_log_transformation(self, parameters): cov_transformed = transformation.convert_covariance_matrix(cov, q) assert np.array_equal(cov_transformed, cov * q**2) + # Test incorrect transform + with pytest.raises(ValueError, match="Unknown method:"): + transformation._transform(q, "bad-string") + @pytest.mark.unit def test_composed_transformation(self, parameters): - q = np.asarray([5.0, 2.5]) + # Test elementwise transformations transformation = pybop.ComposedTransformation( [parameters["Identity"].transformation, parameters["Scaled"].transformation] ) + self._test_elementwise_transformations(transformation) + + # Test appending a non-elementwise transformation + transformation.append(parameters["Log"].transformation) + self._test_non_elementwise_transformations(transformation) + + # Test composed with no transformations + with pytest.raises( + ValueError, match="Must have at least one sub-transformation." + ): + pybop.ComposedTransformation([]) + + # Test incorrect append object + with pytest.raises( + TypeError, match="The appended object must be a Transformation." + ): + pybop.ComposedTransformation( + [parameters["Identity"].transformation, "string"] + ) + + def _test_elementwise_transformations(self, transformation): + q = np.asarray([5.0, 2.5]) p = transformation.to_model(q) np.testing.assert_allclose(p, np.asarray([5.0, ((q[1] / 2.0) - 1.0)])) jac = transformation.jacobian(q) @@ -108,20 +146,77 @@ def test_composed_transformation(self, parameters): np.testing.assert_allclose( jac_S1[1], np.asarray([[[0, 0], [0, 0]], [[0, 0], [0, 0]]]) ) + assert transformation.is_elementwise() is True - transformation.append(parameters["Log"].transformation) + def _test_non_elementwise_transformations(self, transformation): q = np.asarray([5.0, 2.5, 10]) p = transformation.to_model(q) np.testing.assert_allclose( p, np.asarray([5.0, ((q[1] / 2.0) - 1.0), np.exp(q[2])]) ) + assert_log_jac_det = np.sum(np.log(np.abs(2.0))) + np.sum(-np.log(10)) + + log_jac_det = transformation.log_jacobian_det(q) + assert log_jac_det == assert_log_jac_det + + log_jac_det_S1 = transformation.log_jacobian_det_S1(q) + assert log_jac_det_S1[0] == assert_log_jac_det + np.testing.assert_allclose(log_jac_det_S1[1], np.asarray([0.0, 0.0, -0.1])) + + # Test general transformation + transformation._update_methods(elementwise=False) + assert transformation.is_elementwise() is False + + jac_general = transformation.jacobian(q) + assert np.array_equal(jac_general, np.diag([1, 0.5, 1 / 10])) + + assert_log_jac_det_general = -np.sum(np.log(np.abs(2.0))) + np.sum(-np.log(10)) + log_jac_det_general = transformation.log_jacobian_det(q) + np.testing.assert_almost_equal(log_jac_det_general, assert_log_jac_det_general) + + log_jac_det_S1_general = transformation.log_jacobian_det_S1(q) + np.testing.assert_almost_equal( + log_jac_det_S1_general[0], assert_log_jac_det_general + ) + np.testing.assert_allclose( + log_jac_det_S1_general[1], np.asarray([0.0, 0.0, -0.1]) + ) + + @pytest.mark.unit + def test_verify_input(self, parameters): + q = np.asarray([5.0]) + q_dict = {"Identity": q[0]} + transformation = parameters["Scaled"].transformation + assert transformation._verify_input(q) == q + assert transformation._verify_input(q.tolist()) == q + assert transformation._verify_input(q_dict) == q + + with pytest.raises( + TypeError, match="Transform must be a float, int, list, numpy array," + ): + transformation._verify_input("string") + + with pytest.raises(ValueError, match="Transform must have"): + transformation._verify_input(np.array([1.0, 2.0, 3.0])) + class TestBaseTransformation: """ A class to test the abstract base transformation class. """ + @pytest.fixture + def ConcreteTransformation(self): + class ConcreteTransformation(pybop.Transformation): + def jacobian(self, q): + pass + + def _transform(self, q, method): + pass + + return ConcreteTransformation() + @pytest.mark.unit def test_abstract_base_transformation(self): with pytest.raises(TypeError): @@ -148,11 +243,15 @@ def test_concrete_methods(self): assert not inspect.isabstract(getattr(pybop.Transformation, method)) @pytest.mark.unit - def test_not_implemented_methods(self): + def test_not_implemented_methods(self, ConcreteTransformation): not_implemented_methods = [ "jacobian_S1", "log_jacobian_det_S1", - "is_elementwise", ] - for method in not_implemented_methods: - assert hasattr(pybop.Transformation, method) + for method_name in not_implemented_methods: + with pytest.raises(NotImplementedError): + method = getattr(ConcreteTransformation, method_name) + method(np.array([1.0])) + + with pytest.raises(NotImplementedError): + ConcreteTransformation.is_elementwise() From 6a72568e7fb4e7237903ede011d851852c8af3cd Mon Sep 17 00:00:00 2001 From: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> Date: Wed, 10 Jul 2024 11:35:27 +0100 Subject: [PATCH 28/53] Apply suggestions from code review --- examples/scripts/spm_CMAES.py | 2 +- pybop/optimisers/base_pints_optimiser.py | 10 +++------- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py index 7416f924d..91c4c3c33 100644 --- a/examples/scripts/spm_CMAES.py +++ b/examples/scripts/spm_CMAES.py @@ -44,7 +44,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) cost = pybop.SumSquaredError(problem) -optim = pybop.CMAES(cost, max_iterations=100, min_iterations=40) +optim = pybop.CMAES(cost, max_iterations=100) # Run the optimisation x, final_cost = optim.run() diff --git a/pybop/optimisers/base_pints_optimiser.py b/pybop/optimisers/base_pints_optimiser.py index cb8abd917..8bf856c4f 100644 --- a/pybop/optimisers/base_pints_optimiser.py +++ b/pybop/optimisers/base_pints_optimiser.py @@ -10,10 +10,7 @@ from pints import SequentialEvaluator as PintsSequentialEvaluator from pints import strfloat as PintsStrFloat -from pybop import ( - BaseOptimiser, - Result, -) +from pybop import BaseOptimiser, Result class BasePintsOptimiser(BaseOptimiser): @@ -53,7 +50,6 @@ def __init__(self, cost, pints_optimiser, **optimiser_kwargs): self.pints_optimiser = pints_optimiser super().__init__(cost, **optimiser_kwargs) - self.f = self.cost def _set_up_optimiser(self): """ @@ -197,12 +193,12 @@ def _run(self): if self._needs_sensitivities: def f(x): - L, dl = self.f.evaluateS1(x) + L, dl = self.cost.evaluateS1(x) return (L, dl) if self.minimising else (-L, -dl) else: def f(x): - return self.f(x) if self.minimising else -self.f(x) + return self.cost(x) if self.minimising else -self.cost(x) # Create evaluator object if self._parallel: From 4cec9fa3a94b8e97d6ab1d9ebfddbf127cdfe4f1 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 10 Jul 2024 12:38:09 +0100 Subject: [PATCH 29/53] refacotr: remove general transformation methods for ComposedTransformation class --- pybop/transformation/_transformation.py | 154 ++++-------------------- tests/unit/test_transformations.py | 78 ++++-------- 2 files changed, 51 insertions(+), 181 deletions(-) diff --git a/pybop/transformation/_transformation.py b/pybop/transformation/_transformation.py index 6af572600..29eb44930 100644 --- a/pybop/transformation/_transformation.py +++ b/pybop/transformation/_transformation.py @@ -210,7 +210,6 @@ def __init__(self, transformations: List[Transformation]): self._is_elementwise = True for transformation in transformations: self._append_transformation(transformation) - self._update_methods() def _append_transformation(self, transformation: Transformation): """ @@ -232,22 +231,6 @@ def _append_transformation(self, transformation: Transformation): self._n_parameters += transformation.n_parameters self._is_elementwise = self._is_elementwise and transformation.is_elementwise() - def _update_methods(self, elementwise: bool = None): - """ - Update the internal methods based on whether the transformation is elementwise. - """ - if elementwise is not None: - self._is_elementwise = elementwise - - if self._is_elementwise: - self._jacobian = self._elementwise_jacobian - self._log_jacobian_det = self._elementwise_log_jacobian_det - self._log_jacobian_det_S1 = self._elementwise_log_jacobian_det_S1 - else: - self._jacobian = self._general_jacobian - self._log_jacobian_det = self._general_log_jacobian_det - self._log_jacobian_det_S1 = self._general_log_jacobian_det_S1 - def append(self, transformation: Transformation): """ Append a new transformation to the existing composition. @@ -258,55 +241,12 @@ def append(self, transformation: Transformation): The transformation to append. """ self._append_transformation(transformation) - self._update_methods() def is_elementwise(self) -> bool: """See :meth:`Transformation.is_elementwise()`.""" return self._is_elementwise def jacobian(self, q: np.ndarray) -> np.ndarray: - """See :meth:`Transformation.jacobian()`.""" - return self._jacobian(q) - - def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """See :meth:`Transformation.jacobian_S1()`.""" - q = self._verify_input(q) - output_S1 = np.zeros( - (self._n_parameters, self._n_parameters, self._n_parameters) - ) - lo = 0 - - for transformation in self._transformations: - hi = lo + transformation.n_parameters - _, jac_S1 = transformation.jacobian_S1(q[lo:hi]) - for i, jac_S1_i in enumerate(jac_S1): - output_S1[lo + i, lo:hi, lo:hi] = jac_S1_i - lo = hi - - return self.jacobian(q), output_S1 - - def log_jacobian_det(self, q: np.ndarray) -> float: - """See :meth:`Transformation.log_jacobian_det()`.""" - return self._log_jacobian_det(q) - - def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: - """See :meth:`Transformation.log_jacobian_det_S1()`.""" - return self._log_jacobian_det_S1(q) - - def _transform(self, data: np.ndarray, method: str) -> np.ndarray: - """See :meth:`Transformation._transform`.""" - data = self._verify_input(data) - output = np.zeros_like(data) - lo = 0 - - for transformation in self._transformations: - hi = lo + transformation.n_parameters - output[lo:hi] = getattr(transformation, method)(data[lo:hi]) - lo = hi - - return output - - def _elementwise_jacobian(self, q: np.ndarray) -> np.ndarray: """ Compute the elementwise Jacobian of the composed transformation. @@ -331,7 +271,24 @@ def _elementwise_jacobian(self, q: np.ndarray) -> np.ndarray: return np.diag(diag) - def _elementwise_log_jacobian_det(self, q: np.ndarray) -> float: + def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + q = self._verify_input(q) + output_S1 = np.zeros( + (self._n_parameters, self._n_parameters, self._n_parameters) + ) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters + _, jac_S1 = transformation.jacobian_S1(q[lo:hi]) + for i, jac_S1_i in enumerate(jac_S1): + output_S1[lo + i, lo:hi, lo:hi] = jac_S1_i + lo = hi + + return self.jacobian(q), output_S1 + + def log_jacobian_det(self, q: np.ndarray) -> float: """ Compute the elementwise logarithm of the determinant of the Jacobian. @@ -351,9 +308,7 @@ def _elementwise_log_jacobian_det(self, q: np.ndarray) -> float: for lo, transformation in self._iter_transformations() ) - def _elementwise_log_jacobian_det_S1( - self, q: np.ndarray - ) -> Tuple[float, np.ndarray]: + def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: """ Compute the elementwise logarithm of the determinant of the Jacobian and its first-order sensitivities. @@ -381,77 +336,18 @@ def _elementwise_log_jacobian_det_S1( return output, output_S1 - def _general_jacobian(self, q: np.ndarray) -> np.ndarray: - """ - Compute the general Jacobian of the composed transformation. - - Parameters - ---------- - q : np.ndarray - Input array. - - Returns - ------- - np.ndarray - Block diagonal matrix of the Jacobian of each transformation. - """ - q = self._verify_input(q) - jacobian_blocks = [] + def _transform(self, data: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + data = self._verify_input(data) + output = np.zeros_like(data) lo = 0 for transformation in self._transformations: hi = lo + transformation.n_parameters - jacobian_blocks.append(transformation.jacobian(q[lo:hi])) + output[lo:hi] = getattr(transformation, method)(data[lo:hi]) lo = hi - return np.block( - [ - [ - b if i == j else np.zeros_like(b) - for j, b in enumerate(jacobian_blocks) - ] - for i, _ in enumerate(jacobian_blocks) - ] - ) - - def _general_log_jacobian_det(self, q: np.ndarray) -> float: - """ - Compute the general logarithm of the determinant of the Jacobian. - - Parameters - ---------- - q : np.ndarray - Input array. - - Returns - ------- - float - Logarithm of the absolute value of the determinant of the Jacobian. - """ - return np.log(np.abs(np.linalg.det(self.jacobian(q)))) - - def _general_log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: - """ - Compute the general logarithm of the determinant of the Jacobian and its first-order sensitivities. - - Parameters - ---------- - q : np.ndarray - Input array. - - Returns - ------- - Tuple[float, np.ndarray] - Tuple of log determinant and its first-order sensitivities. - """ - q = self._verify_input(q) - jac, jac_S1 = self.jacobian_S1(q) - out_S1 = np.zeros(self._n_parameters) - - for i, jac_S1_i in enumerate(jac_S1): - out_S1[i] = np.trace(np.matmul(np.linalg.pinv(jac), jac_S1_i)) - - return self.log_jacobian_det(q), out_S1 + return output def _iter_transformations(self): """ diff --git a/tests/unit/test_transformations.py b/tests/unit/test_transformations.py index 9131e622c..9a86d1f0e 100644 --- a/tests/unit/test_transformations.py +++ b/tests/unit/test_transformations.py @@ -115,73 +115,47 @@ def test_composed_transformation(self, parameters): transformation = pybop.ComposedTransformation( [parameters["Identity"].transformation, parameters["Scaled"].transformation] ) - self._test_elementwise_transformations(transformation) - - # Test appending a non-elementwise transformation + # Test appending transformations transformation.append(parameters["Log"].transformation) - self._test_non_elementwise_transformations(transformation) - - # Test composed with no transformations - with pytest.raises( - ValueError, match="Must have at least one sub-transformation." - ): - pybop.ComposedTransformation([]) - - # Test incorrect append object - with pytest.raises( - TypeError, match="The appended object must be a Transformation." - ): - pybop.ComposedTransformation( - [parameters["Identity"].transformation, "string"] - ) - - def _test_elementwise_transformations(self, transformation): - q = np.asarray([5.0, 2.5]) - p = transformation.to_model(q) - np.testing.assert_allclose(p, np.asarray([5.0, ((q[1] / 2.0) - 1.0)])) - jac = transformation.jacobian(q) - jac_S1 = transformation.jacobian_S1(q) - np.testing.assert_allclose(jac, np.asarray([[1, 0], [0, 0.5]])) - np.testing.assert_allclose(jac_S1[0], jac) - np.testing.assert_allclose( - jac_S1[1], np.asarray([[[0, 0], [0, 0]], [[0, 0], [0, 0]]]) - ) assert transformation.is_elementwise() is True - def _test_non_elementwise_transformations(self, transformation): q = np.asarray([5.0, 2.5, 10]) p = transformation.to_model(q) np.testing.assert_allclose( p, np.asarray([5.0, ((q[1] / 2.0) - 1.0), np.exp(q[2])]) ) + jac = transformation.jacobian(q) + jac_S1 = transformation.jacobian_S1(q) + np.testing.assert_allclose( + jac, np.asarray([[1, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 0.1]]) + ) + np.testing.assert_allclose(jac_S1[0], jac) + assert jac_S1[1].shape == (3, 3, 3) + assert jac_S1[1][2, 2, 2] == -0.01 + np.testing.assert_allclose(jac_S1[1][0, :, :], np.zeros((3, 3))) + np.testing.assert_allclose(jac_S1[1][1, :, :], np.zeros((3, 3))) - assert_log_jac_det = np.sum(np.log(np.abs(2.0))) + np.sum(-np.log(10)) - + correct_output = np.sum(np.log(np.abs(2.0))) + np.sum(-np.log(10)) log_jac_det = transformation.log_jacobian_det(q) - assert log_jac_det == assert_log_jac_det + assert log_jac_det == correct_output log_jac_det_S1 = transformation.log_jacobian_det_S1(q) - assert log_jac_det_S1[0] == assert_log_jac_det + assert log_jac_det_S1[0] == correct_output np.testing.assert_allclose(log_jac_det_S1[1], np.asarray([0.0, 0.0, -0.1])) - # Test general transformation - transformation._update_methods(elementwise=False) - assert transformation.is_elementwise() is False - - jac_general = transformation.jacobian(q) - assert np.array_equal(jac_general, np.diag([1, 0.5, 1 / 10])) - - assert_log_jac_det_general = -np.sum(np.log(np.abs(2.0))) + np.sum(-np.log(10)) - log_jac_det_general = transformation.log_jacobian_det(q) - np.testing.assert_almost_equal(log_jac_det_general, assert_log_jac_det_general) + # Test composed with no transformations + with pytest.raises( + ValueError, match="Must have at least one sub-transformation." + ): + pybop.ComposedTransformation([]) - log_jac_det_S1_general = transformation.log_jacobian_det_S1(q) - np.testing.assert_almost_equal( - log_jac_det_S1_general[0], assert_log_jac_det_general - ) - np.testing.assert_allclose( - log_jac_det_S1_general[1], np.asarray([0.0, 0.0, -0.1]) - ) + # Test incorrect append object + with pytest.raises( + TypeError, match="The appended object must be a Transformation." + ): + pybop.ComposedTransformation( + [parameters["Identity"].transformation, "string"] + ) @pytest.mark.unit def test_verify_input(self, parameters): From 77a9f413ea9574285132f845f8085c10af99d1aa Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 11 Jul 2024 09:15:51 +0100 Subject: [PATCH 30/53] fix: apply ruff linting --- pybop/transformation/_transformation.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pybop/transformation/_transformation.py b/pybop/transformation/_transformation.py index 29eb44930..782fe46b3 100644 --- a/pybop/transformation/_transformation.py +++ b/pybop/transformation/_transformation.py @@ -1,4 +1,4 @@ -from typing import List, Tuple, Union +from typing import Union import numpy as np @@ -43,7 +43,7 @@ def jacobian(self, q: np.ndarray) -> np.ndarray: """See :meth:`Transformation.jacobian()`.""" return np.eye(self._n_parameters) - def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """See :meth:`Transformation.jacobian_S1()`.""" n = self._n_parameters return self.jacobian(q), np.zeros((n, n, n)) @@ -52,7 +52,7 @@ def log_jacobian_det(self, q: np.ndarray) -> float: """See :meth:`Transformation.log_jacobian_det()`.""" return 0.0 - def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: """See :meth:`Transformation.log_jacobian_det_S1()`.""" return self.log_jacobian_det(q), np.zeros(self._n_parameters) @@ -102,7 +102,7 @@ def jacobian(self, q: np.ndarray) -> np.ndarray: """See :meth:`Transformation.jacobian()`.""" return np.diag(self._inverse_coeff) - def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """See :meth:`Transformation.jacobian_S1()`.""" n = self._n_parameters return self.jacobian(q), np.zeros((n, n, n)) @@ -111,7 +111,7 @@ def log_jacobian_det(self, q: np.ndarray) -> float: """See :meth:`Transformation.log_jacobian_det()`.""" return np.sum(np.log(np.abs(self._coefficient))) - def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: """See :meth:`Transformation.log_jacobian_det_S1()`.""" return self.log_jacobian_det(q), np.zeros(self._n_parameters) @@ -164,7 +164,7 @@ def jacobian(self, q: np.ndarray) -> np.ndarray: """See :meth:`Transformation.jacobian()`.""" return np.diag(1 / q) - def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """See :meth:`Transformation.jacobian_S1()`.""" return np.diag(1 / q), np.diag(-1 / q**2) @@ -172,7 +172,7 @@ def log_jacobian_det(self, q: np.ndarray) -> float: """See :meth:`Transformation.log_jacobian_det()`.""" return np.sum(-np.log(q)) - def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: """See :meth:`Transformation.log_jacobian_det_S1()`.""" return self.log_jacobian_det(q), -1 / q @@ -202,7 +202,7 @@ class ComposedTransformation(Transformation): Extends pybop.Transformation. Initially based on pints.ComposedTransformation class. """ - def __init__(self, transformations: List[Transformation]): + def __init__(self, transformations: list[Transformation]): if not transformations: raise ValueError("Must have at least one sub-transformation.") self._transformations = [] @@ -271,7 +271,7 @@ def jacobian(self, q: np.ndarray) -> np.ndarray: return np.diag(diag) - def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """See :meth:`Transformation.jacobian_S1()`.""" q = self._verify_input(q) output_S1 = np.zeros( @@ -308,7 +308,7 @@ def log_jacobian_det(self, q: np.ndarray) -> float: for lo, transformation in self._iter_transformations() ) - def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: """ Compute the elementwise logarithm of the determinant of the Jacobian and its first-order sensitivities. From 8c41327a302e6196074be3519f6673edee34ad57 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 11 Jul 2024 11:44:28 +0100 Subject: [PATCH 31/53] Update to super() --- pybop/costs/base_cost.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 322d082fc..0bd38192d 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -205,10 +205,10 @@ def __init__(self, cost_list, weights=None): self._different_problems = True if not self._different_problems: - super(WeightedCost, self).__init__(self.cost_list[0].problem) + super().__init__(self.cost_list[0].problem) self._fixed_problem = self.cost_list[0]._fixed_problem else: - super(WeightedCost, self).__init__() + super().__init__() self._fixed_problem = False for cost in self.cost_list: self.parameters.join(cost.parameters) From 80a803e4251a05a7d75239ea8d4f68e4814985f7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 16:42:55 +0000 Subject: [PATCH 32/53] style: pre-commit fixes --- pybop/costs/_likelihoods.py | 2 +- pybop/costs/base_cost.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 308704153..43dc326aa 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -324,7 +324,7 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: if self._fixed_problem: self.likelihood._current_prediction = self._current_prediction log_likelihood = self.likelihood._evaluate(inputs) - + posterior = log_likelihood + log_prior return posterior diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 4226ed78f..eb2038b66 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,6 +1,7 @@ -import numpy as np from typing import Union +import numpy as np + from pybop import BaseProblem from pybop.parameters.parameter import Inputs, Parameters From f18552349c6b5447d48601bf1c64505ab157e450 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Mon, 15 Jul 2024 17:48:53 +0100 Subject: [PATCH 33/53] Update prediction to self._current_prediction --- pybop/costs/fitting_costs.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 2ff465868..93c50a75c 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -239,7 +239,10 @@ def _evaluate(self, inputs: Inputs, grad=None): e = np.asarray( [ - np.sum(np.abs(prediction[signal] - self._target[signal]) ** self.p) + np.sum( + np.abs(self._current_prediction[signal] - self._target[signal]) + ** self.p + ) ** (1 / self.p) for signal in self.signal ] @@ -344,7 +347,10 @@ def _evaluate(self, inputs: Inputs, grad=None): e = np.asarray( [ - np.sum(np.abs(prediction[signal] - self._target[signal]) ** self.p) + np.sum( + np.abs(self._current_prediction[signal] - self._target[signal]) + ** self.p + ) for signal in self.signal ] ) From 23b77e895d9ee7aa23413ac1bad457192b4450f5 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Mon, 15 Jul 2024 17:52:01 +0100 Subject: [PATCH 34/53] Update y to self._current_prediction --- pybop/costs/fitting_costs.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 93c50a75c..18cc752d6 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -273,16 +273,24 @@ def _evaluateS1(self, inputs): if not self.verify_prediction(self._current_prediction): return np.inf, self._de * np.ones(self.n_parameters) - r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.asarray( [ - np.sum(np.abs(y[signal] - self._target[signal]) ** self.p) + np.sum( + np.abs(self._current_prediction[signal] - self._target[signal]) + ** self.p + ) ** (1 / self.p) for signal in self.signal ] ) de = np.sum( - np.sum(r ** (self.p - 1) * dy.T, axis=2) + np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2) / (e ** (self.p - 1) + np.finfo(float).eps), axis=1, ) @@ -380,9 +388,16 @@ def _evaluateS1(self, inputs): if not self.verify_prediction(self._current_prediction): return np.inf, self._de * np.ones(self.n_parameters) - r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.sum(np.sum(np.abs(r) ** self.p)) - de = self.p * np.sum(np.sum(r ** (self.p - 1) * dy.T, axis=2), axis=1) + de = self.p * np.sum( + np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2), axis=1 + ) return e, de From 5157a8d8c96390e202877bb81af984ac50dda78d Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 16 Jul 2024 17:10:20 +0100 Subject: [PATCH 35/53] Update cost_list to args --- examples/scripts/spm_weighted_cost.py | 2 +- pybop/costs/base_cost.py | 48 ++++++++++++++------------- tests/unit/test_cost.py | 32 +++++++----------- 3 files changed, 38 insertions(+), 44 deletions(-) diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py index 557434613..74c43a33c 100644 --- a/examples/scripts/spm_weighted_cost.py +++ b/examples/scripts/spm_weighted_cost.py @@ -41,7 +41,7 @@ problem = pybop.FittingProblem(model, parameters, dataset) cost1 = pybop.SumSquaredError(problem) cost2 = pybop.RootMeanSquaredError(problem) -weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) +weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 100]) for cost in [weighted_cost, cost1, cost2]: optim = pybop.IRPropMin(cost, max_iterations=60) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index eb2038b66..9a3590530 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -212,45 +212,47 @@ class WeightedCost(BaseCost): a single weighted cost function. Inherits all parameters and attributes from ``BaseCost``. + + Additional Attributes + --------------------- + costs : List[pybop.BaseCost] + A list of PyBOP cost objects. """ - def __init__(self, cost_list, weights=None): - self.cost_list = cost_list + def __init__(self, *args, weights=None): + self.costs = [] + for cost in args: + if not isinstance(cost, BaseCost): + raise TypeError(f"Received {type(cost)} instead of cost object.") + self.costs.append(cost) self.weights = weights self._different_problems = False - if not isinstance(self.cost_list, list): - raise TypeError( - f"Expected a list of costs. Received {type(self.cost_list)}" - ) if self.weights is None: - self.weights = np.ones(len(cost_list)) + self.weights = np.ones(len(self.costs)) elif isinstance(self.weights, list): self.weights = np.array(self.weights) if not isinstance(self.weights, np.ndarray): raise TypeError( - "Expected a list or array of weights the same length as cost_list." + "Expected a list or array of weights the same length as costs." ) - if not len(self.weights) == len(self.cost_list): + if not len(self.weights) == len(self.costs): raise ValueError( - "Expected a list or array of weights the same length as cost_list." + "Expected a list or array of weights the same length as costs." ) # Check if all costs depend on the same problem - for cost in self.cost_list: - if ( - hasattr(cost, "problem") - and cost.problem is not self.cost_list[0].problem - ): + for cost in self.costs: + if hasattr(cost, "problem") and cost.problem is not self.costs[0].problem: self._different_problems = True if not self._different_problems: - super().__init__(self.cost_list[0].problem) - self._fixed_problem = self.cost_list[0]._fixed_problem + super().__init__(self.costs[0].problem) + self._fixed_problem = self.costs[0]._fixed_problem else: super().__init__() self._fixed_problem = False - for cost in self.cost_list: + for cost in self.costs: self.parameters.join(cost.parameters) def _evaluate(self, inputs: Inputs, grad=None): @@ -270,14 +272,14 @@ def _evaluate(self, inputs: Inputs, grad=None): float The weighted cost value. """ - e = np.empty_like(self.cost_list) + e = np.empty_like(self.costs) if not self._fixed_problem and self._different_problems: self.parameters.update(values=list(inputs.values())) elif not self._fixed_problem: self._current_prediction = self.problem.evaluate(inputs) - for i, cost in enumerate(self.cost_list): + for i, cost in enumerate(self.costs): if not self._fixed_problem and self._different_problems: inputs = cost.parameters.as_dict() cost._current_prediction = cost.problem.evaluate(inputs) @@ -302,8 +304,8 @@ def _evaluateS1(self, inputs: Inputs): A tuple containing the cost and the gradient. The cost is a float, and the gradient is an array-like of the same length as `x`. """ - e = np.empty_like(self.cost_list) - de = np.empty((len(self.parameters), len(self.cost_list))) + e = np.empty_like(self.costs) + de = np.empty((len(self.parameters), len(self.costs))) if not self._fixed_problem and self._different_problems: self.parameters.update(values=list(inputs.values())) @@ -312,7 +314,7 @@ def _evaluateS1(self, inputs: Inputs): self.problem.evaluateS1(inputs) ) - for i, cost in enumerate(self.cost_list): + for i, cost in enumerate(self.costs): if not self._fixed_problem and self._different_problems: inputs = cost.parameters.as_dict() cost._current_prediction, cost._current_sensitivities = ( diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 2c9c2d5e5..ac6e58772 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -304,37 +304,31 @@ def test_weighted_fitting_cost(self, problem): cost2 = pybop.RootMeanSquaredError(problem) # Test with and without weights - weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2]) + weighted_cost = pybop.WeightedCost(cost1, cost2) np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) - weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 1]) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 1]) np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) - weighted_cost = pybop.WeightedCost( - cost_list=[cost1, cost2], weights=np.array([1, 1]) - ) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=np.array([1, 1])) np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) with pytest.raises( TypeError, - match=r"Expected a list of costs.", + match=r"Received instead of cost object.", ): - weighted_cost = pybop.WeightedCost(cost_list="Invalid string") + weighted_cost = pybop.WeightedCost("Invalid string") with pytest.raises( TypeError, - match="Expected a list or array of weights the same length as cost_list.", + match="Expected a list or array of weights the same length as costs.", ): - weighted_cost = pybop.WeightedCost( - cost_list=[cost1, cost2], weights="Invalid string" - ) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights="Invalid string") with pytest.raises( ValueError, - match="Expected a list or array of weights the same length as cost_list.", + match="Expected a list or array of weights the same length as costs.", ): - weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1]) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1]) # Test with and without different problems weight = 100 - weighted_cost_2 = pybop.WeightedCost( - cost_list=[cost1, cost2], weights=[1, weight] - ) + weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) assert weighted_cost_2._different_problems is False assert weighted_cost_2._fixed_problem is True assert weighted_cost_2.problem is problem @@ -346,9 +340,7 @@ def test_weighted_fitting_cost(self, problem): ) cost3 = pybop.RootMeanSquaredError(copy(problem)) - weighted_cost_3 = pybop.WeightedCost( - cost_list=[cost1, cost3], weights=[1, weight] - ) + weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) assert weighted_cost_3._different_problems is True assert weighted_cost_3._fixed_problem is False assert weighted_cost_3.problem is None @@ -370,7 +362,7 @@ def test_weighted_design_cost(self, design_problem): cost2 = pybop.RootMeanSquaredError(design_problem) # Test with and without weights - weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2]) + weighted_cost = pybop.WeightedCost(cost1, cost2) assert weighted_cost._different_problems is False assert weighted_cost._fixed_problem is False assert weighted_cost.problem is design_problem From 3e220b16ccc670ce071f1f4019be2ab1de0a594e Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 16 Jul 2024 18:04:58 +0100 Subject: [PATCH 36/53] Remove repeated line --- pybop/costs/_likelihoods.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 43dc326aa..1f96e2fb5 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -241,7 +241,6 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: self._current_prediction, self._current_sensitivities = self.problem.evaluateS1( self.problem.parameters.as_dict() ) - y, dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) if not self.verify_prediction(self._current_prediction): return -np.inf, -self._de * np.ones(self.n_parameters) From 95600be95bd5994630023efbb2d6581c9a3ea9d2 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 16 Jul 2024 18:05:15 +0100 Subject: [PATCH 37/53] Add descriptions --- pybop/costs/base_cost.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 9a3590530..b3cf7ae1e 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,4 +1,4 @@ -from typing import Union +from typing import Optional, Union import numpy as np @@ -24,9 +24,15 @@ class BaseCost: An array containing the target data to fit. n_outputs : int The number of outputs in the model. + + Additional Attributes + --------------------- + _fixed_problem : bool + If True, the problem does not need to be rebuilt before the cost is + calculated (default: False). """ - def __init__(self, problem=None): + def __init__(self, problem: Optional[BaseProblem] = None): self.parameters = Parameters() self.problem = problem self._fixed_problem = False @@ -215,11 +221,16 @@ class WeightedCost(BaseCost): Additional Attributes --------------------- - costs : List[pybop.BaseCost] + costs : list[pybop.BaseCost] A list of PyBOP cost objects. + weights : list[float] + A list of values with which to weight the cost values. + _different_problems : bool + If True, the problem for each cost is evaluated independently during + each evaluation of the cost (default: False). """ - def __init__(self, *args, weights=None): + def __init__(self, *args, weights: Optional[list[float]] = None): self.costs = [] for cost in args: if not isinstance(cost, BaseCost): From 8c7dddb76d87c23432a3fd1bd45e1f2b91015325 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 17 Jul 2024 17:49:17 +0100 Subject: [PATCH 38/53] refactor: Integrates DesignProblems into weighted_cost structure, adds error checking for different problem class, general refactor for improved clarity --- examples/scripts/spm_weighted_cost.py | 27 +++-- pybop/costs/_likelihoods.py | 55 +++------- pybop/costs/base_cost.py | 146 +++++++++++++------------- pybop/costs/design_costs.py | 114 ++++++++++---------- pybop/costs/fitting_costs.py | 73 ++++--------- pybop/optimisers/base_optimiser.py | 11 +- tests/unit/test_cost.py | 46 +++++--- 7 files changed, 228 insertions(+), 244 deletions(-) diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py index 74c43a33c..43c5cd008 100644 --- a/examples/scripts/spm_weighted_cost.py +++ b/examples/scripts/spm_weighted_cost.py @@ -24,24 +24,37 @@ # Generate data sigma = 0.001 -t_eval = np.arange(0, 900, 3) -values = model.predict(t_eval=t_eval) -corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval)) +init_soc = 0.5 +experiment = pybop.Experiment( + [ + ( + "Discharge at 0.5C for 3 minutes (3 second period)", + "Charge at 0.5C for 3 minutes (3 second period)", + ), + ] + * 2 +) +values = model.predict(experiment=experiment, init_soc=init_soc) + + +def noise(sigma): + return np.random.normal(0, sigma, len(values["Voltage [V]"].data)) + # Form dataset dataset = pybop.Dataset( { - "Time [s]": t_eval, + "Time [s]": values["Time [s]"].data, "Current function [A]": values["Current [A]"].data, - "Voltage [V]": corrupt_values, + "Voltage [V]": values["Voltage [V]"].data + noise(sigma), } ) # Generate problem, cost function, and optimisation class -problem = pybop.FittingProblem(model, parameters, dataset) +problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) cost1 = pybop.SumSquaredError(problem) cost2 = pybop.RootMeanSquaredError(problem) -weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 100]) +weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[0.1, 1]) for cost in [weighted_cost, cost1, cost2]: optim = pybop.IRPropMin(cost, max_iterations=60) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 1f96e2fb5..0751b4331 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -43,7 +43,7 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo """ Evaluates the Gaussian log-likelihood for the given parameters with known sigma. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return -np.inf e = np.asarray( @@ -51,9 +51,7 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo np.sum( self._offset + self._multip - * np.sum( - (self._target[signal] - self._current_prediction[signal]) ** 2.0 - ) + * np.sum((self._target[signal] - self.y[signal]) ** 2.0) ) for signal in self.signal ] @@ -65,20 +63,15 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: """ Calculates the log-likelihood and gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return -np.inf, -self._de * np.ones(self.n_parameters) likelihood = self._evaluate(inputs) r = np.asarray( - [ - self._target[signal] - self._current_prediction[signal] - for signal in self.signal - ] - ) - dl = np.sum( - (np.sum((r * self._current_sensitivities.T), axis=2) / self.sigma2), axis=1 + [self._target[signal] - self.y[signal] for signal in self.signal] ) + dl = np.sum((np.sum((r * self.dy.T), axis=2) / self.sigma2), axis=1) return likelihood, dl @@ -123,7 +116,7 @@ def __init__( super().__init__(problem) self._dsigma_scale = dsigma_scale self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) - self._fixed_problem = False # keep problem evaluation within _evaluate + self._predict = False # keep problem evaluation within _evaluate self.sigma = Parameters() self._add_sigma_parameters(sigma0) @@ -196,10 +189,8 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo if np.any(sigma <= 0): return -np.inf - self._current_prediction = self.problem.evaluate( - self.problem.parameters.as_dict() - ) - if not self.verify_prediction(self._current_prediction): + self.y = self.problem.evaluate(self.problem.parameters.as_dict()) + if not self.verify_prediction(self.y): return -np.inf e = np.asarray( @@ -207,9 +198,7 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo np.sum( self._logpi - self.n_time_data * np.log(sigma) - - np.sum( - (self._target[signal] - self._current_prediction[signal]) ** 2.0 - ) + - np.sum((self._target[signal] - self.y[signal]) ** 2.0) / (2.0 * sigma**2.0) ) for signal in self.signal @@ -238,23 +227,16 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if np.any(sigma <= 0): return -np.inf, -self._de * np.ones(self.n_parameters) - self._current_prediction, self._current_sensitivities = self.problem.evaluateS1( - self.problem.parameters.as_dict() - ) - if not self.verify_prediction(self._current_prediction): + self.y, self.dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) + if not self.verify_prediction(self.y): return -np.inf, -self._de * np.ones(self.n_parameters) likelihood = self._evaluate(inputs) r = np.asarray( - [ - self._target[signal] - self._current_prediction[signal] - for signal in self.signal - ] - ) - dl = np.sum( - (np.sum((r * self._current_sensitivities.T), axis=2) / (sigma**2.0)), axis=1 + [self._target[signal] - self.y[signal] for signal in self.signal] ) + dl = np.sum((np.sum((r * self.dy.T), axis=2) / (sigma**2.0)), axis=1) dsigma = ( -self.n_time_data / sigma + np.sum(r**2.0, axis=1) / (sigma**3.0) ) / self._dsigma_scale @@ -320,9 +302,7 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: if not np.isfinite(log_prior).any(): return -np.inf - if self._fixed_problem: - self.likelihood._current_prediction = self._current_prediction - log_likelihood = self.likelihood._evaluate(inputs) + log_likelihood = self.likelihood.evaluate(inputs) posterior = log_likelihood + log_prior return posterior @@ -354,12 +334,7 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if not np.isfinite(log_prior).any(): return -np.inf, -self._de * np.ones(self.n_parameters) - if self._fixed_problem: - ( - self.likelihood._current_prediction, - self.likelihood._current_sensitivities, - ) = self._current_prediction, self._current_sensitivities - log_likelihood, dl = self.likelihood._evaluateS1(inputs) + log_likelihood, dl = self.likelihood.evaluateS1(inputs) # Compute a finite difference approximation of the gradient of the log prior delta = self.parameters.initial_value() * self.gradient_step diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index b3cf7ae1e..974227f97 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,8 +1,10 @@ +import copy +import warnings from typing import Optional, Union import numpy as np -from pybop import BaseProblem +from pybop import BaseProblem, DesignProblem from pybop.parameters.parameter import Inputs, Parameters @@ -24,25 +26,25 @@ class BaseCost: An array containing the target data to fit. n_outputs : int The number of outputs in the model. - - Additional Attributes - --------------------- - _fixed_problem : bool - If True, the problem does not need to be rebuilt before the cost is - calculated (default: False). + _predict : bool + If False, the problem will be evaluated outside the self.evaluate() method + before the cost is calculated (default: False). """ def __init__(self, problem: Optional[BaseProblem] = None): self.parameters = Parameters() self.problem = problem - self._fixed_problem = False + self.verbose = False + self._predict = False + self.y = None + self.dy = None self.set_fail_gradient() if isinstance(self.problem, BaseProblem): self._target = self.problem._target self.parameters.join(self.problem.parameters) self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal - self._fixed_problem = True + self._predict = True @property def n_parameters(self): @@ -79,8 +81,8 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): inputs = self.parameters.verify(inputs) try: - if self._fixed_problem: - self._current_prediction = self.problem.evaluate(inputs) + if self._predict: + self.y = self.problem.evaluate(inputs) return self._evaluate(inputs, grad) @@ -139,10 +141,8 @@ def evaluateS1(self, inputs: Union[Inputs, list]): inputs = self.parameters.verify(inputs) try: - if self._fixed_problem: - self._current_prediction, self._current_sensitivities = ( - self.problem.evaluateS1(inputs) - ) + if self._predict: + self.y, self.dy = self.problem.evaluateS1(inputs) return self._evaluateS1(inputs) @@ -219,52 +219,59 @@ class WeightedCost(BaseCost): Inherits all parameters and attributes from ``BaseCost``. - Additional Attributes + Attributes --------------------- costs : list[pybop.BaseCost] A list of PyBOP cost objects. weights : list[float] A list of values with which to weight the cost values. - _different_problems : bool + _has_different_problems : bool If True, the problem for each cost is evaluated independently during each evaluation of the cost (default: False). """ - def __init__(self, *args, weights: Optional[list[float]] = None): - self.costs = [] - for cost in args: - if not isinstance(cost, BaseCost): - raise TypeError(f"Received {type(cost)} instead of cost object.") - self.costs.append(cost) - self.weights = weights - self._different_problems = False - - if self.weights is None: + def __init__(self, *costs, weights: Optional[list[float]] = None): + if not all(isinstance(cost, BaseCost) for cost in costs): + raise TypeError("All costs must be instances of BaseCost.") + self.costs = [copy.copy(cost) for cost in costs] + self._has_different_problems = False + self.minimising = not any( + isinstance(cost.problem, DesignProblem) for cost in self.costs + ) + if len(set(type(cost.problem) for cost in self.costs)) > 1: + raise TypeError("All problems must be of the same class type.") + + # Check if weights are provided + if weights is not None: + try: + self.weights = np.asarray(weights, dtype=float) + except ValueError: + raise ValueError("Weights must be numeric values.") from None + + if self.weights.size != len(self.costs): + raise ValueError("Number of weights must match number of costs.") + else: self.weights = np.ones(len(self.costs)) - elif isinstance(self.weights, list): - self.weights = np.array(self.weights) - if not isinstance(self.weights, np.ndarray): - raise TypeError( - "Expected a list or array of weights the same length as costs." - ) - if not len(self.weights) == len(self.costs): - raise ValueError( - "Expected a list or array of weights the same length as costs." - ) # Check if all costs depend on the same problem - for cost in self.costs: - if hasattr(cost, "problem") and cost.problem is not self.costs[0].problem: - self._different_problems = True + self._has_different_problems = any( + hasattr(cost, "problem") and cost.problem is not self.costs[0].problem + for cost in self.costs[1:] + ) - if not self._different_problems: - super().__init__(self.costs[0].problem) - self._fixed_problem = self.costs[0]._fixed_problem - else: + if self._has_different_problems: super().__init__() - self._fixed_problem = False for cost in self.costs: self.parameters.join(cost.parameters) + else: + super().__init__(self.costs[0].problem) + self._predict = False + for cost in self.costs: + cost._predict = False + + # Catch UserWarnings as exceptions + if not self.minimising: + warnings.filterwarnings("error", category=UserWarning) def _evaluate(self, inputs: Inputs, grad=None): """ @@ -285,18 +292,22 @@ def _evaluate(self, inputs: Inputs, grad=None): """ e = np.empty_like(self.costs) - if not self._fixed_problem and self._different_problems: - self.parameters.update(values=list(inputs.values())) - elif not self._fixed_problem: - self._current_prediction = self.problem.evaluate(inputs) + if not self._predict: + if self._has_different_problems: + self.parameters.update(values=list(inputs.values())) + else: + try: + with warnings.catch_warnings(): + self.y = self.problem.evaluate(inputs) + except UserWarning as e: + if self.verbose: + print(f"Ignoring this sample due to: {e}") + return -np.inf for i, cost in enumerate(self.costs): - if not self._fixed_problem and self._different_problems: - inputs = cost.parameters.as_dict() - cost._current_prediction = cost.problem.evaluate(inputs) - else: - cost._current_prediction = self._current_prediction - e[i] = cost._evaluate(inputs, grad) + if not self._has_different_problems: + cost.y = self.y + e[i] = cost.evaluate(inputs) return np.dot(e, self.weights) @@ -318,25 +329,16 @@ def _evaluateS1(self, inputs: Inputs): e = np.empty_like(self.costs) de = np.empty((len(self.parameters), len(self.costs))) - if not self._fixed_problem and self._different_problems: - self.parameters.update(values=list(inputs.values())) - elif not self._fixed_problem: - self._current_prediction, self._current_sensitivities = ( - self.problem.evaluateS1(inputs) - ) + if not self._predict: + if self._has_different_problems: + self.parameters.update(values=list(inputs.values())) + else: + self.y, self.dy = self.problem.evaluateS1(inputs) for i, cost in enumerate(self.costs): - if not self._fixed_problem and self._different_problems: - inputs = cost.parameters.as_dict() - cost._current_prediction, cost._current_sensitivities = ( - cost.problem.evaluateS1(inputs) - ) - else: - cost._current_prediction, cost._current_sensitivities = ( - self._current_prediction, - self._current_sensitivities, - ) - e[i], de[:, i] = cost._evaluateS1(inputs) + if not self._has_different_problems: + cost.y, cost.dy = (self.y, self.dy) + e[i], de[:, i] = cost.evaluateS1(inputs) e = np.dot(e, self.weights) de = np.dot(de, self.weights) diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index 738dfe61e..06649cb64 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -1,4 +1,5 @@ import warnings +from typing import Union import numpy as np @@ -65,25 +66,56 @@ def update_simulation_data(self, inputs: Inputs): self.problem._target = {key: solution[key] for key in self.problem.signal} self.dt = solution["Time [s]"][1] - solution["Time [s]"][0] - def _evaluate(self, inputs: Inputs, grad=None): + def evaluate(self, inputs: Union[Inputs, list], grad=None): """ - Computes the value of the cost function. - - This method must be implemented by subclasses. + Call the evaluate function for a given set of parameters. Parameters ---------- - inputs : Inputs - The parameters for which to compute the cost. - grad : array, optional - Gradient information, not used in this method. + inputs : Inputs or array-like + The parameters for which to compute the cost and gradient. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The calculated cost function value. Raises ------ - NotImplementedError - If the method has not been implemented by the subclass. + ValueError + If an error occurs during the calculation of the cost. """ - raise NotImplementedError + inputs = self.parameters.verify(inputs) + + try: + with warnings.catch_warnings(): + # Convert UserWarning to an exception + warnings.filterwarnings("error", category=UserWarning) + if self._predict: + if self.update_capacity: + self.problem.model.approximate_capacity(inputs) + self.y = self.problem.evaluate(inputs) + + return self._evaluate(inputs, grad) + + # Catch NotImplementedError and raise it + except NotImplementedError as e: + raise e + + # Catch infeasible solutions and return infinity + except UserWarning as e: + if self.verbose: + print(f"Ignoring this sample due to: {e}") + return -np.inf + + # Catch any other exception and return infinity + except Exception as e: + if self.verbose: + print(f"An error occurred during the evaluation: {e}") + return -np.inf class GravimetricEnergyDensity(DesignCost): @@ -98,7 +130,6 @@ class GravimetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) - self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, inputs: Inputs, grad=None): """ @@ -116,31 +147,12 @@ def _evaluate(self, inputs: Inputs, grad=None): float The gravimetric energy density or -infinity in case of infeasible parameters. """ - try: - with warnings.catch_warnings(): - # Convert UserWarning to an exception - warnings.filterwarnings("error", category=UserWarning) - - if self.update_capacity: - self.problem.model.approximate_capacity(inputs) - solution = self.problem.evaluate(inputs) - - voltage, current = solution["Voltage [V]"], solution["Current [A]"] - energy_density = np.trapz(voltage * current, dx=self.dt) / ( - 3600 * self.problem.model.cell_mass(self.parameter_set) - ) + voltage, current = self.y["Voltage [V]"], self.y["Current [A]"] + energy_density = np.trapz(voltage * current, dx=self.dt) / ( + 3600 * self.problem.model.cell_mass(self.parameter_set) + ) - return energy_density - - # Catch infeasible solutions and return infinity - except UserWarning as e: - print(f"Ignoring this sample due to: {e}") - return -np.inf - - # Catch any other exception and return infinity - except Exception as e: - print(f"An error occurred during the evaluation: {e}") - return -np.inf + return energy_density class VolumetricEnergyDensity(DesignCost): @@ -155,7 +167,6 @@ class VolumetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) - self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, inputs: Inputs, grad=None): """ @@ -173,28 +184,9 @@ def _evaluate(self, inputs: Inputs, grad=None): float The volumetric energy density or -infinity in case of infeasible parameters. """ - try: - with warnings.catch_warnings(): - # Convert UserWarning to an exception - warnings.filterwarnings("error", category=UserWarning) - - if self.update_capacity: - self.problem.model.approximate_capacity(inputs) - solution = self.problem.evaluate(inputs) + voltage, current = self.y["Voltage [V]"], self.y["Current [A]"] + energy_density = np.trapz(voltage * current, dx=self.dt) / ( + 3600 * self.problem.model.cell_volume(self.parameter_set) + ) - voltage, current = solution["Voltage [V]"], solution["Current [A]"] - energy_density = np.trapz(voltage * current, dx=self.dt) / ( - 3600 * self.problem.model.cell_volume(self.parameter_set) - ) - - return energy_density - - # Catch infeasible solutions and return infinity - except UserWarning as e: - print(f"Ignoring this sample due to: {e}") - return -np.inf - - # Catch any other exception and return infinity - except Exception as e: - print(f"An error occurred during the evaluation: {e}") - return -np.inf + return energy_density diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 18cc752d6..cfcc32ef5 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -38,16 +38,12 @@ def _evaluate(self, inputs: Inputs, grad=None): The root mean square error. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sqrt( - np.mean( - (self._current_prediction[signal] - self._target[signal]) ** 2 - ) - ) + np.sqrt(np.mean((self.y[signal] - self._target[signal]) ** 2)) for signal in self.signal ] ) @@ -74,19 +70,14 @@ def _evaluateS1(self, inputs: Inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.sqrt(np.mean(r**2, axis=1)) - de = np.mean((r * self._current_sensitivities.T), axis=2) / ( - e + np.finfo(float).eps - ) + de = np.mean((r * self.dy.T), axis=2) / (e + np.finfo(float).eps) if self.n_outputs == 1: return e.item(), de.flatten() @@ -132,12 +123,12 @@ def _evaluate(self, inputs: Inputs, grad=None): float The Sum of Squared Error. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sum((self._current_prediction[signal] - self._target[signal]) ** 2) + np.sum((self.y[signal] - self._target[signal]) ** 2) for signal in self.signal ] ) @@ -164,17 +155,14 @@ def _evaluateS1(self, inputs: Inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.sum(np.sum(r**2, axis=0), axis=0) - de = 2 * np.sum(np.sum((r * self._current_sensitivities.T), axis=2), axis=1) + de = 2 * np.sum(np.sum((r * self.dy.T), axis=2), axis=1) return e, de @@ -234,15 +222,12 @@ def _evaluate(self, inputs: Inputs, grad=None): float The Minkowski cost. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sum( - np.abs(self._current_prediction[signal] - self._target[signal]) - ** self.p - ) + np.sum(np.abs(self.y[signal] - self._target[signal]) ** self.p) ** (1 / self.p) for signal in self.signal ] @@ -270,27 +255,21 @@ def _evaluateS1(self, inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.asarray( [ - np.sum( - np.abs(self._current_prediction[signal] - self._target[signal]) - ** self.p - ) + np.sum(np.abs(self.y[signal] - self._target[signal]) ** self.p) ** (1 / self.p) for signal in self.signal ] ) de = np.sum( - np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2) + np.sum(r ** (self.p - 1) * self.dy.T, axis=2) / (e ** (self.p - 1) + np.finfo(float).eps), axis=1, ) @@ -350,15 +329,12 @@ def _evaluate(self, inputs: Inputs, grad=None): float The Sum of Power cost. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sum( - np.abs(self._current_prediction[signal] - self._target[signal]) - ** self.p - ) + np.sum(np.abs(self.y[signal] - self._target[signal]) ** self.p) for signal in self.signal ] ) @@ -385,19 +361,14 @@ def _evaluateS1(self, inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.sum(np.sum(np.abs(r) ** self.p)) - de = self.p * np.sum( - np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2), axis=1 - ) + de = self.p * np.sum(np.sum(r ** (self.p - 1) * self.dy.T, axis=2), axis=1) return e, de @@ -416,7 +387,7 @@ class ObserverCost(BaseCost): def __init__(self, observer: Observer): super().__init__(problem=observer) self._observer = observer - self._fixed_problem = False # keep problem evaluation within _evaluate + self._predict = False # evaluate problem within cost._evaluate() def _evaluate(self, inputs: Inputs, grad=None): """ diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 2b00b2345..b1bbb0ab2 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -3,7 +3,14 @@ import numpy as np -from pybop import BaseCost, BaseLikelihood, DesignCost, Parameter, Parameters +from pybop import ( + BaseCost, + BaseLikelihood, + DesignCost, + Parameter, + Parameters, + WeightedCost, +) class BaseOptimiser: @@ -67,6 +74,8 @@ def __init__( self.cost = cost self.parameters.join(cost.parameters) self.set_allow_infeasible_solutions() + if isinstance(cost, WeightedCost): + self.minimising = cost.minimising if isinstance(cost, (BaseLikelihood, DesignCost)): self.minimising = False diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index ac6e58772..b44376b4e 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -312,25 +312,25 @@ def test_weighted_fitting_cost(self, problem): np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) with pytest.raises( TypeError, - match=r"Received instead of cost object.", + match="All costs must be instances of BaseCost.", ): - weighted_cost = pybop.WeightedCost("Invalid string") + weighted_cost = pybop.WeightedCost(cost1.problem) with pytest.raises( - TypeError, - match="Expected a list or array of weights the same length as costs.", + ValueError, + match="Weights must be numeric values.", ): weighted_cost = pybop.WeightedCost(cost1, cost2, weights="Invalid string") with pytest.raises( ValueError, - match="Expected a list or array of weights the same length as costs.", + match="Number of weights must match number of costs.", ): weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1]) # Test with and without different problems weight = 100 weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) - assert weighted_cost_2._different_problems is False - assert weighted_cost_2._fixed_problem is True + assert weighted_cost_2._has_different_problems is False + assert weighted_cost_2._predict is False assert weighted_cost_2.problem is problem assert weighted_cost_2([0.5]) >= 0 np.testing.assert_allclose( @@ -341,8 +341,8 @@ def test_weighted_fitting_cost(self, problem): cost3 = pybop.RootMeanSquaredError(copy(problem)) weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) - assert weighted_cost_3._different_problems is True - assert weighted_cost_3._fixed_problem is False + assert weighted_cost_3._has_different_problems is True + assert weighted_cost_3._predict is False assert weighted_cost_3.problem is None assert weighted_cost_3([0.5]) >= 0 np.testing.assert_allclose( @@ -356,15 +356,27 @@ def test_weighted_fitting_cost(self, problem): np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) + # Test MAP explicitly + cost4 = pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma) + weighted_cost_4 = pybop.WeightedCost(cost1, cost4, weights=[1, weight]) + assert weighted_cost_4._has_different_problems is False + assert weighted_cost_4._predict is False + assert weighted_cost_4([0.5]) <= 0 + np.testing.assert_allclose( + weighted_cost_4.evaluate([0.6]), + cost1([0.6]) + weight * cost4([0.6]), + atol=1e-5, + ) + @pytest.mark.unit def test_weighted_design_cost(self, design_problem): cost1 = pybop.GravimetricEnergyDensity(design_problem) - cost2 = pybop.RootMeanSquaredError(design_problem) + cost2 = pybop.VolumetricEnergyDensity(design_problem) # Test with and without weights weighted_cost = pybop.WeightedCost(cost1, cost2) - assert weighted_cost._different_problems is False - assert weighted_cost._fixed_problem is False + assert weighted_cost._has_different_problems is False + assert weighted_cost._predict is False assert weighted_cost.problem is design_problem assert weighted_cost([0.5]) >= 0 np.testing.assert_allclose( @@ -372,3 +384,13 @@ def test_weighted_design_cost(self, design_problem): cost1([0.6]) + cost2([0.6]), atol=1e-5, ) + + @pytest.mark.unit + def test_mixed_problem_classes(self, problem, design_problem): + cost1 = pybop.SumSquaredError(problem) + cost2 = pybop.GravimetricEnergyDensity(design_problem) + with pytest.raises( + TypeError, + match="All problems must be of the same class type.", + ): + pybop.WeightedCost(cost1, cost2) From 132f83c0e378cf14343b7a5ee52b0baf581a4a0a Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 18 Jul 2024 13:34:33 +0100 Subject: [PATCH 39/53] feat: adds support for single DesignProblem optimisation, fixes for minimising, adds integration tests --- examples/scripts/spme_max_energy.py | 10 +- pybop/__init__.py | 3 +- pybop/costs/_weighted_cost.py | 137 ++++++++++++++++++ pybop/costs/base_cost.py | 141 +------------------ pybop/costs/design_costs.py | 31 ++--- pybop/problems/base_problem.py | 1 + pybop/problems/design_problem.py | 51 ++++--- pybop/problems/fitting_problem.py | 2 +- tests/integration/test_weighted_cost.py | 176 ++++++++++++++++++++++++ 9 files changed, 371 insertions(+), 181 deletions(-) create mode 100644 pybop/costs/_weighted_cost.py create mode 100644 tests/integration/test_weighted_cost.py diff --git a/examples/scripts/spme_max_energy.py b/examples/scripts/spme_max_energy.py index f5b7c827c..db521b688 100644 --- a/examples/scripts/spme_max_energy.py +++ b/examples/scripts/spme_max_energy.py @@ -45,13 +45,15 @@ model, parameters, experiment, signal=signal, init_soc=init_soc ) -# Generate cost function and optimisation class: -cost = pybop.GravimetricEnergyDensity(problem) +# Generate multiple cost functions and combine them. +cost1 = pybop.GravimetricEnergyDensity(problem, update_capacity=True) +cost2 = pybop.VolumetricEnergyDensity(problem, update_capacity=True) +cost = pybop.WeightedCost(cost1, cost2, weights=[1, 1]) + +# Run optimisation optim = pybop.PSO( cost, verbose=True, allow_infeasible_solutions=False, max_iterations=15 ) - -# Run optimisation x, final_cost = optim.run() print("Estimated parameters:", x) print(f"Initial gravimetric energy density: {cost(optim.x0):.2f} Wh.kg-1") diff --git a/pybop/__init__.py b/pybop/__init__.py index 4a99ebc48..922a64803 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -81,7 +81,7 @@ # # Cost function class # -from .costs.base_cost import BaseCost, WeightedCost +from .costs.base_cost import BaseCost from .costs.fitting_costs import ( RootMeanSquaredError, SumSquaredError, @@ -100,6 +100,7 @@ GaussianLogLikelihoodKnownSigma, MAP, ) +from .costs._weighted_cost import WeightedCost # # Optimiser class diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py new file mode 100644 index 000000000..9555ece80 --- /dev/null +++ b/pybop/costs/_weighted_cost.py @@ -0,0 +1,137 @@ +import copy +from typing import Optional + +import numpy as np + +from pybop import BaseCost, BaseLikelihood, DesignCost +from pybop.parameters.parameter import Inputs + + +class WeightedCost(BaseCost): + """ + A subclass for constructing a linear combination of cost functions as + a single weighted cost function. + + Inherits all parameters and attributes from ``BaseCost``. + + Attributes + --------------------- + costs : list[pybop.BaseCost] + A list of PyBOP cost objects. + weights : list[float] + A list of values with which to weight the cost values. + _has_different_problems : bool + If True, the problem for each cost is evaluated independently during + each evaluation of the cost (default: False). + """ + + def __init__(self, *costs, weights: Optional[list[float]] = None): + if not all(isinstance(cost, BaseCost) for cost in costs): + raise TypeError("All costs must be instances of BaseCost.") + self.costs = [copy.copy(cost) for cost in costs] + self._has_different_problems = False + self.minimising = not any( + isinstance(cost, (BaseLikelihood, DesignCost)) for cost in self.costs + ) + if len(set(type(cost.problem) for cost in self.costs)) > 1: + raise TypeError("All problems must be of the same class type.") + + # Check if weights are provided + if weights is not None: + try: + self.weights = np.asarray(weights, dtype=float) + except ValueError: + raise ValueError("Weights must be numeric values.") from None + + if self.weights.size != len(self.costs): + raise ValueError("Number of weights must match number of costs.") + else: + self.weights = np.ones(len(self.costs)) + + # Check if all costs depend on the same problem + self._has_different_problems = any( + hasattr(cost, "problem") and cost.problem is not self.costs[0].problem + for cost in self.costs[1:] + ) + + if self._has_different_problems: + super().__init__() + for cost in self.costs: + self.parameters.join(cost.parameters) + else: + super().__init__(self.costs[0].problem) + self._predict = False + for cost in self.costs: + cost._predict = False + + # Check if any cost function requires capacity update + if any(cost.update_capacity for cost in self.costs): + self.update_capacity = True + + def _evaluate(self, inputs: Inputs, grad=None): + """ + Calculate the weighted cost for a given set of parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The weighted cost value. + """ + e = np.empty_like(self.costs) + + if not self._predict: + if self._has_different_problems: + self.parameters.update(values=list(inputs.values())) + else: + self.y = self.problem.evaluate( + inputs, update_capacity=self.update_capacity + ) + + for i, cost in enumerate(self.costs): + if not self._has_different_problems: + cost.y = self.y + e[i] = cost.evaluate(inputs) + + return np.dot(e, self.weights) + + def _evaluateS1(self, inputs: Inputs): + """ + Compute the weighted cost and its gradient with respect to the parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost and gradient. + + Returns + ------- + tuple + A tuple containing the cost and the gradient. The cost is a float, + and the gradient is an array-like of the same length as `x`. + """ + e = np.empty_like(self.costs) + de = np.empty((len(self.parameters), len(self.costs))) + + if not self._predict: + if self._has_different_problems: + self.parameters.update(values=list(inputs.values())) + else: + self.y, self.dy = self.problem.evaluateS1(inputs) + + for i, cost in enumerate(self.costs): + if not self._has_different_problems: + cost.y, cost.dy = (self.y, self.dy) + e[i], de[:, i] = cost.evaluateS1(inputs) + + e = np.dot(e, self.weights) + de = np.dot(de, self.weights) + + return e, de diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 974227f97..3ba818f52 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,10 +1,6 @@ -import copy -import warnings from typing import Optional, Union -import numpy as np - -from pybop import BaseProblem, DesignProblem +from pybop import BaseProblem from pybop.parameters.parameter import Inputs, Parameters @@ -36,6 +32,7 @@ def __init__(self, problem: Optional[BaseProblem] = None): self.problem = problem self.verbose = False self._predict = False + self.update_capacity = False self.y = None self.dy = None self.set_fail_gradient() @@ -210,137 +207,3 @@ def verify_prediction(self, y): return False return True - - -class WeightedCost(BaseCost): - """ - A subclass for constructing a linear combination of cost functions as - a single weighted cost function. - - Inherits all parameters and attributes from ``BaseCost``. - - Attributes - --------------------- - costs : list[pybop.BaseCost] - A list of PyBOP cost objects. - weights : list[float] - A list of values with which to weight the cost values. - _has_different_problems : bool - If True, the problem for each cost is evaluated independently during - each evaluation of the cost (default: False). - """ - - def __init__(self, *costs, weights: Optional[list[float]] = None): - if not all(isinstance(cost, BaseCost) for cost in costs): - raise TypeError("All costs must be instances of BaseCost.") - self.costs = [copy.copy(cost) for cost in costs] - self._has_different_problems = False - self.minimising = not any( - isinstance(cost.problem, DesignProblem) for cost in self.costs - ) - if len(set(type(cost.problem) for cost in self.costs)) > 1: - raise TypeError("All problems must be of the same class type.") - - # Check if weights are provided - if weights is not None: - try: - self.weights = np.asarray(weights, dtype=float) - except ValueError: - raise ValueError("Weights must be numeric values.") from None - - if self.weights.size != len(self.costs): - raise ValueError("Number of weights must match number of costs.") - else: - self.weights = np.ones(len(self.costs)) - - # Check if all costs depend on the same problem - self._has_different_problems = any( - hasattr(cost, "problem") and cost.problem is not self.costs[0].problem - for cost in self.costs[1:] - ) - - if self._has_different_problems: - super().__init__() - for cost in self.costs: - self.parameters.join(cost.parameters) - else: - super().__init__(self.costs[0].problem) - self._predict = False - for cost in self.costs: - cost._predict = False - - # Catch UserWarnings as exceptions - if not self.minimising: - warnings.filterwarnings("error", category=UserWarning) - - def _evaluate(self, inputs: Inputs, grad=None): - """ - Calculate the weighted cost for a given set of parameters. - - Parameters - ---------- - inputs : Inputs - The parameters for which to compute the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. - - Returns - ------- - float - The weighted cost value. - """ - e = np.empty_like(self.costs) - - if not self._predict: - if self._has_different_problems: - self.parameters.update(values=list(inputs.values())) - else: - try: - with warnings.catch_warnings(): - self.y = self.problem.evaluate(inputs) - except UserWarning as e: - if self.verbose: - print(f"Ignoring this sample due to: {e}") - return -np.inf - - for i, cost in enumerate(self.costs): - if not self._has_different_problems: - cost.y = self.y - e[i] = cost.evaluate(inputs) - - return np.dot(e, self.weights) - - def _evaluateS1(self, inputs: Inputs): - """ - Compute the weighted cost and its gradient with respect to the parameters. - - Parameters - ---------- - inputs : Inputs - The parameters for which to compute the cost and gradient. - - Returns - ------- - tuple - A tuple containing the cost and the gradient. The cost is a float, - and the gradient is an array-like of the same length as `x`. - """ - e = np.empty_like(self.costs) - de = np.empty((len(self.parameters), len(self.costs))) - - if not self._predict: - if self._has_different_problems: - self.parameters.update(values=list(inputs.values())) - else: - self.y, self.dy = self.problem.evaluateS1(inputs) - - for i, cost in enumerate(self.costs): - if not self._has_different_problems: - cost.y, cost.dy = (self.y, self.dy) - e[i], de[:, i] = cost.evaluateS1(inputs) - - e = np.dot(e, self.weights) - de = np.dot(de, self.weights) - - return e, de diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index 06649cb64..290f1efb9 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -91,32 +91,17 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): inputs = self.parameters.verify(inputs) try: - with warnings.catch_warnings(): - # Convert UserWarning to an exception - warnings.filterwarnings("error", category=UserWarning) - if self._predict: - if self.update_capacity: - self.problem.model.approximate_capacity(inputs) - self.y = self.problem.evaluate(inputs) + if self._predict: + self.y = self.problem.evaluate( + inputs, update_capacity=self.update_capacity + ) - return self._evaluate(inputs, grad) + return self._evaluate(inputs, grad) # Catch NotImplementedError and raise it except NotImplementedError as e: raise e - # Catch infeasible solutions and return infinity - except UserWarning as e: - if self.verbose: - print(f"Ignoring this sample due to: {e}") - return -np.inf - - # Catch any other exception and return infinity - except Exception as e: - if self.verbose: - print(f"An error occurred during the evaluation: {e}") - return -np.inf - class GravimetricEnergyDensity(DesignCost): """ @@ -147,6 +132,9 @@ def _evaluate(self, inputs: Inputs, grad=None): float The gravimetric energy density or -infinity in case of infeasible parameters. """ + if not any(np.isfinite(self.y[signal][0]) for signal in self.signal): + return -np.inf + voltage, current = self.y["Voltage [V]"], self.y["Current [A]"] energy_density = np.trapz(voltage * current, dx=self.dt) / ( 3600 * self.problem.model.cell_mass(self.parameter_set) @@ -184,6 +172,9 @@ def _evaluate(self, inputs: Inputs, grad=None): float The volumetric energy density or -infinity in case of infeasible parameters. """ + if not any(np.isfinite(self.y[signal][0]) for signal in self.signal): + return -np.inf + voltage, current = self.y["Voltage [V]"], self.y["Current [A]"] energy_density = np.trapz(voltage * current, dx=self.dt) / ( 3600 * self.problem.model.cell_volume(self.parameter_set) diff --git a/pybop/problems/base_problem.py b/pybop/problems/base_problem.py index ee36a6bb4..cb3bc597a 100644 --- a/pybop/problems/base_problem.py +++ b/pybop/problems/base_problem.py @@ -64,6 +64,7 @@ def __init__( self.n_outputs = len(self.signal) self._time_data = None self._target = None + self.verbose = False if isinstance(model, BaseModel): self.additional_variables = additional_variables diff --git a/pybop/problems/design_problem.py b/pybop/problems/design_problem.py index 30e2d9c3a..74b8ae715 100644 --- a/pybop/problems/design_problem.py +++ b/pybop/problems/design_problem.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np from pybop import BaseProblem @@ -45,7 +47,10 @@ def __init__( signal = ["Voltage [V]"] additional_variables.extend(["Time [s]", "Current [A]"]) additional_variables = list(set(additional_variables)) - + self.warning_patterns = [ + "Ah is greater than", + "Non-physical point encountered", + ] super().__init__( parameters, model, @@ -75,7 +80,7 @@ def __init__( self._target = {key: sol[key] for key in self.signal} self._dataset = None - def evaluate(self, inputs: Inputs): + def evaluate(self, inputs: Inputs, update_capacity=False): """ Evaluate the model with the given parameters and return the signal. @@ -90,19 +95,33 @@ def evaluate(self, inputs: Inputs): The model output y(t) simulated with inputs. """ inputs = self.parameters.verify(inputs) - - sol = self._model.predict( - inputs=inputs, - experiment=self.experiment, - init_soc=self.init_soc, - ) - - if sol == [np.inf]: - return sol - - else: - predictions = {} - for signal in self.signal + self.additional_variables: - predictions[signal] = sol[signal].data + if update_capacity: + self.model.approximate_capacity(inputs) + + try: + with warnings.catch_warnings(): + for pattern in self.warning_patterns: + warnings.filterwarnings( + "error", category=UserWarning, message=pattern + ) + + sol = self._model.predict( + inputs=inputs, + experiment=self.experiment, + init_soc=self.init_soc, + ) + + # Catch infeasible solutions and return infinity + except (UserWarning, Exception) as e: + if self.verbose: + print(f"Ignoring this sample due to: {e}") + return { + signal: np.asarray(np.ones(2) * -np.inf) + for signal in [*self.signal, *self.additional_variables] + } + + predictions = {} + for signal in self.signal + self.additional_variables: + predictions[signal] = sol[signal].data return predictions diff --git a/pybop/problems/fitting_problem.py b/pybop/problems/fitting_problem.py index 58d59e9ee..0ad636ce0 100644 --- a/pybop/problems/fitting_problem.py +++ b/pybop/problems/fitting_problem.py @@ -79,7 +79,7 @@ def __init__( init_soc=self.init_soc, ) - def evaluate(self, inputs: Inputs): + def evaluate(self, inputs: Inputs, update_capacity=False): """ Evaluate the model with the given parameters and return the signal. diff --git a/tests/integration/test_weighted_cost.py b/tests/integration/test_weighted_cost.py new file mode 100644 index 000000000..a533bd985 --- /dev/null +++ b/tests/integration/test_weighted_cost.py @@ -0,0 +1,176 @@ +import numpy as np +import pytest + +import pybop + + +class TestWeightedCost: + """ + A class to test the weighted cost function. + """ + + @pytest.fixture(autouse=True) + def setup(self): + self.sigma0 = 0.002 + self.ground_truth = np.asarray([0.55, 0.55]) + np.random.normal( + loc=0.0, scale=0.05, size=2 + ) + + @pytest.fixture + def model(self): + parameter_set = pybop.ParameterSet.pybamm("Chen2020") + return pybop.lithium_ion.SPM(parameter_set=parameter_set) + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Uniform(0.4, 0.75), + bounds=[0.375, 0.75], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Uniform(0.4, 0.75), + # no bounds + ), + ) + + @pytest.fixture(params=[0.4]) + def init_soc(self, request): + return request.param + + @pytest.fixture( + params=[ + ( + pybop.GaussianLogLikelihoodKnownSigma, + pybop.RootMeanSquaredError, + pybop.SumSquaredError, + pybop.MAP, + ) + ] + ) + def cost_class(self, request): + return request.param + + def noise(self, sigma, values): + return np.random.normal(0, sigma, values) + + @pytest.fixture + def weighted_fitting_cost(self, model, parameters, cost_class, init_soc): + # Form dataset + solution = self.get_data(model, parameters, self.ground_truth, init_soc) + dataset = pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Voltage [V]"].data + + self.noise(self.sigma0, len(solution["Time [s]"].data)), + } + ) + + # Define the cost to optimise + problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) + costs = [] + for cost in cost_class: + if issubclass(cost, pybop.MAP): + costs.append( + cost( + problem, + pybop.GaussianLogLikelihoodKnownSigma, + sigma0=self.sigma0, + ) + ) + elif issubclass(cost, pybop.BaseLikelihood): + costs.append(cost(problem, sigma0=self.sigma0)) + else: + costs.append(cost(problem)) + + return pybop.WeightedCost(*costs, weights=[0.1, 1, 0.5, 0.6]) + + @pytest.mark.integration + def test_fitting_costs(self, weighted_fitting_cost): + x0 = weighted_fitting_cost.parameters.initial_value() + optim = pybop.CuckooSearch( + cost=weighted_fitting_cost, + sigma0=0.03, + max_iterations=250, + max_unchanged_iterations=35, + ) + + initial_cost = optim.cost(optim.parameters.initial_value()) + x, final_cost = optim.run() + + # Assertions + if not np.allclose(x0, self.ground_truth, atol=1e-5): + if optim.minimising: + assert initial_cost > final_cost + else: + assert initial_cost < final_cost + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + + @pytest.fixture( + params=[ + ( + pybop.GravimetricEnergyDensity, + pybop.VolumetricEnergyDensity, + ) + ] + ) + def design_cost(self, request): + return request.param + + @pytest.fixture + def weighted_design_cost(self, model, design_cost): + init_soc = 1.0 + parameters = pybop.Parameters( + pybop.Parameter( + "Positive electrode thickness [m]", + prior=pybop.Gaussian(5e-05, 0.1e-05), + bounds=[50e-06, 10e-05], + ), + pybop.Parameter( + "Positive particle radius [m]", + prior=pybop.Gaussian(5.22e-06, 0.1e-06), + bounds=[2e-06, 9e-06], + ), + ) + experiment = pybop.Experiment( + ["Discharge at 1C until 3.3 V (5 seconds period)"], + ) + + problem = pybop.DesignProblem( + model, parameters, experiment=experiment, init_soc=init_soc + ) + costs = [] + for cost in design_cost: + costs.append(cost(problem, update_capacity=True)) + + return pybop.WeightedCost(*costs, weights=[100, 1]) + + @pytest.mark.integration + def test_design_costs(self, weighted_design_cost): + optim = pybop.CuckooSearch( + weighted_design_cost, + max_iterations=15, + allow_infeasible_solutions=False, + ) + + initial_cost = optim.cost(optim.parameters.initial_value()) + _, final_cost = optim.run() + + # Assertions + assert initial_cost < final_cost + + def get_data(self, model, parameters, x, init_soc): + model.classify_and_update_parameters(parameters) + experiment = pybop.Experiment( + [ + ( + "Discharge at 0.5C for 3 minutes (4 second period)", + "Charge at 0.5C for 3 minutes (4 second period)", + ), + ] + ) + sim = model.predict(init_soc=init_soc, experiment=experiment, inputs=x) + return sim From d739642faee723cb872384753fa733877922fc32 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 18 Jul 2024 13:43:11 +0100 Subject: [PATCH 40/53] refactor: move WeightedCost into seperate file --- pybop/__init__.py | 3 +- pybop/costs/_weighted_cost.py | 138 ++++++++++++++++++++++++++++++++++ pybop/costs/base_cost.py | 134 --------------------------------- 3 files changed, 140 insertions(+), 135 deletions(-) create mode 100644 pybop/costs/_weighted_cost.py diff --git a/pybop/__init__.py b/pybop/__init__.py index 4a99ebc48..922a64803 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -81,7 +81,7 @@ # # Cost function class # -from .costs.base_cost import BaseCost, WeightedCost +from .costs.base_cost import BaseCost from .costs.fitting_costs import ( RootMeanSquaredError, SumSquaredError, @@ -100,6 +100,7 @@ GaussianLogLikelihoodKnownSigma, MAP, ) +from .costs._weighted_cost import WeightedCost # # Optimiser class diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py new file mode 100644 index 000000000..effa5a510 --- /dev/null +++ b/pybop/costs/_weighted_cost.py @@ -0,0 +1,138 @@ +from typing import Optional + +import numpy as np + +from pybop import BaseCost +from pybop.parameters.parameter import Inputs + + +class WeightedCost(BaseCost): + """ + A subclass for constructing a linear combination of cost functions as + a single weighted cost function. + + Inherits all parameters and attributes from ``BaseCost``. + + Additional Attributes + --------------------- + costs : list[pybop.BaseCost] + A list of PyBOP cost objects. + weights : list[float] + A list of values with which to weight the cost values. + _different_problems : bool + If True, the problem for each cost is evaluated independently during + each evaluation of the cost (default: False). + """ + + def __init__(self, *args, weights: Optional[list[float]] = None): + self.costs = [] + for cost in args: + if not isinstance(cost, BaseCost): + raise TypeError(f"Received {type(cost)} instead of cost object.") + self.costs.append(cost) + self.weights = weights + self._different_problems = False + + if self.weights is None: + self.weights = np.ones(len(self.costs)) + elif isinstance(self.weights, list): + self.weights = np.array(self.weights) + if not isinstance(self.weights, np.ndarray): + raise TypeError( + "Expected a list or array of weights the same length as costs." + ) + if not len(self.weights) == len(self.costs): + raise ValueError( + "Expected a list or array of weights the same length as costs." + ) + + # Check if all costs depend on the same problem + for cost in self.costs: + if hasattr(cost, "problem") and cost.problem is not self.costs[0].problem: + self._different_problems = True + + if not self._different_problems: + super().__init__(self.costs[0].problem) + self._fixed_problem = self.costs[0]._fixed_problem + else: + super().__init__() + self._fixed_problem = False + for cost in self.costs: + self.parameters.join(cost.parameters) + + def _evaluate(self, inputs: Inputs, grad=None): + """ + Calculate the weighted cost for a given set of parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The weighted cost value. + """ + e = np.empty_like(self.costs) + + if not self._fixed_problem and self._different_problems: + self.parameters.update(values=list(inputs.values())) + elif not self._fixed_problem: + self._current_prediction = self.problem.evaluate(inputs) + + for i, cost in enumerate(self.costs): + if not self._fixed_problem and self._different_problems: + inputs = cost.parameters.as_dict() + cost._current_prediction = cost.problem.evaluate(inputs) + else: + cost._current_prediction = self._current_prediction + e[i] = cost._evaluate(inputs, grad) + + return np.dot(e, self.weights) + + def _evaluateS1(self, inputs: Inputs): + """ + Compute the weighted cost and its gradient with respect to the parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost and gradient. + + Returns + ------- + tuple + A tuple containing the cost and the gradient. The cost is a float, + and the gradient is an array-like of the same length as `x`. + """ + e = np.empty_like(self.costs) + de = np.empty((len(self.parameters), len(self.costs))) + + if not self._fixed_problem and self._different_problems: + self.parameters.update(values=list(inputs.values())) + elif not self._fixed_problem: + self._current_prediction, self._current_sensitivities = ( + self.problem.evaluateS1(inputs) + ) + + for i, cost in enumerate(self.costs): + if not self._fixed_problem and self._different_problems: + inputs = cost.parameters.as_dict() + cost._current_prediction, cost._current_sensitivities = ( + cost.problem.evaluateS1(inputs) + ) + else: + cost._current_prediction, cost._current_sensitivities = ( + self._current_prediction, + self._current_sensitivities, + ) + e[i], de[:, i] = cost._evaluateS1(inputs) + + e = np.dot(e, self.weights) + de = np.dot(de, self.weights) + + return e, de diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index b3cf7ae1e..eedbbc2c0 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,7 +1,5 @@ from typing import Optional, Union -import numpy as np - from pybop import BaseProblem from pybop.parameters.parameter import Inputs, Parameters @@ -210,135 +208,3 @@ def verify_prediction(self, y): return False return True - - -class WeightedCost(BaseCost): - """ - A subclass for constructing a linear combination of cost functions as - a single weighted cost function. - - Inherits all parameters and attributes from ``BaseCost``. - - Additional Attributes - --------------------- - costs : list[pybop.BaseCost] - A list of PyBOP cost objects. - weights : list[float] - A list of values with which to weight the cost values. - _different_problems : bool - If True, the problem for each cost is evaluated independently during - each evaluation of the cost (default: False). - """ - - def __init__(self, *args, weights: Optional[list[float]] = None): - self.costs = [] - for cost in args: - if not isinstance(cost, BaseCost): - raise TypeError(f"Received {type(cost)} instead of cost object.") - self.costs.append(cost) - self.weights = weights - self._different_problems = False - - if self.weights is None: - self.weights = np.ones(len(self.costs)) - elif isinstance(self.weights, list): - self.weights = np.array(self.weights) - if not isinstance(self.weights, np.ndarray): - raise TypeError( - "Expected a list or array of weights the same length as costs." - ) - if not len(self.weights) == len(self.costs): - raise ValueError( - "Expected a list or array of weights the same length as costs." - ) - - # Check if all costs depend on the same problem - for cost in self.costs: - if hasattr(cost, "problem") and cost.problem is not self.costs[0].problem: - self._different_problems = True - - if not self._different_problems: - super().__init__(self.costs[0].problem) - self._fixed_problem = self.costs[0]._fixed_problem - else: - super().__init__() - self._fixed_problem = False - for cost in self.costs: - self.parameters.join(cost.parameters) - - def _evaluate(self, inputs: Inputs, grad=None): - """ - Calculate the weighted cost for a given set of parameters. - - Parameters - ---------- - inputs : Inputs - The parameters for which to compute the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. - - Returns - ------- - float - The weighted cost value. - """ - e = np.empty_like(self.costs) - - if not self._fixed_problem and self._different_problems: - self.parameters.update(values=list(inputs.values())) - elif not self._fixed_problem: - self._current_prediction = self.problem.evaluate(inputs) - - for i, cost in enumerate(self.costs): - if not self._fixed_problem and self._different_problems: - inputs = cost.parameters.as_dict() - cost._current_prediction = cost.problem.evaluate(inputs) - else: - cost._current_prediction = self._current_prediction - e[i] = cost._evaluate(inputs, grad) - - return np.dot(e, self.weights) - - def _evaluateS1(self, inputs: Inputs): - """ - Compute the weighted cost and its gradient with respect to the parameters. - - Parameters - ---------- - inputs : Inputs - The parameters for which to compute the cost and gradient. - - Returns - ------- - tuple - A tuple containing the cost and the gradient. The cost is a float, - and the gradient is an array-like of the same length as `x`. - """ - e = np.empty_like(self.costs) - de = np.empty((len(self.parameters), len(self.costs))) - - if not self._fixed_problem and self._different_problems: - self.parameters.update(values=list(inputs.values())) - elif not self._fixed_problem: - self._current_prediction, self._current_sensitivities = ( - self.problem.evaluateS1(inputs) - ) - - for i, cost in enumerate(self.costs): - if not self._fixed_problem and self._different_problems: - inputs = cost.parameters.as_dict() - cost._current_prediction, cost._current_sensitivities = ( - cost.problem.evaluateS1(inputs) - ) - else: - cost._current_prediction, cost._current_sensitivities = ( - self._current_prediction, - self._current_sensitivities, - ) - e[i], de[:, i] = cost._evaluateS1(inputs) - - e = np.dot(e, self.weights) - de = np.dot(de, self.weights) - - return e, de From df0e0178e49e5942a5d8f8d2b746e9a3a7f63bb4 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 22 Jul 2024 09:04:29 +0100 Subject: [PATCH 41/53] fix: add MAP catches for weightedcost implementation, update .gitignore --- .gitignore | 3 +++ pybop/costs/_likelihoods.py | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/.gitignore b/.gitignore index 3c3bb708d..2bafcca63 100644 --- a/.gitignore +++ b/.gitignore @@ -314,3 +314,6 @@ $RECYCLE.BIN/ # Airspeed Velocity *.asv/ results/ + +# Pycharm +*.idea/ diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 0751b4331..638b4bf9b 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -302,6 +302,8 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: if not np.isfinite(log_prior).any(): return -np.inf + if not self._predict: + self.likelihood.y = self.y log_likelihood = self.likelihood.evaluate(inputs) posterior = log_likelihood + log_prior @@ -334,6 +336,8 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if not np.isfinite(log_prior).any(): return -np.inf, -self._de * np.ones(self.n_parameters) + if not self._predict: + self.likelihood.y, self.likelihood.dy = (self.y, self.dy) log_likelihood, dl = self.likelihood.evaluateS1(inputs) # Compute a finite difference approximation of the gradient of the log prior From 589a5f6bcff5181a1d113a250eeb01172e202bd7 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 23 Jul 2024 16:58:33 +0100 Subject: [PATCH 42/53] Clarify evaluation sequence --- examples/standalone/problem.py | 2 +- pybop/costs/_likelihoods.py | 9 ++-- pybop/costs/_weighted_cost.py | 80 ++++++++++++++++++---------------- pybop/costs/base_cost.py | 18 ++++---- pybop/costs/design_costs.py | 39 ----------------- pybop/costs/fitting_costs.py | 2 +- tests/unit/test_cost.py | 25 ++++++----- 7 files changed, 73 insertions(+), 102 deletions(-) diff --git a/examples/standalone/problem.py b/examples/standalone/problem.py index 18bf1f7d4..5fd33e0f9 100644 --- a/examples/standalone/problem.py +++ b/examples/standalone/problem.py @@ -42,7 +42,7 @@ def __init__( ) self._target = {signal: self._dataset[signal] for signal in self.signal} - def evaluate(self, inputs): + def evaluate(self, inputs, **kwargs): """ Evaluate the model with the given parameters and return the signal. diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 638b4bf9b..728e80957 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -116,7 +116,7 @@ def __init__( super().__init__(problem) self._dsigma_scale = dsigma_scale self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) - self._predict = False # keep problem evaluation within _evaluate + self._has_separable_problem = False self.sigma = Parameters() self._add_sigma_parameters(sigma0) @@ -278,6 +278,9 @@ def __init__(self, problem, likelihood, sigma0=None, gradient_step=1e-3): ): raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood") + self.parameters = self.likelihood.parameters + self._has_separable_problem = self.likelihood._has_separable_problem + def _evaluate(self, inputs: Inputs, grad=None) -> float: """ Calculate the maximum a posteriori cost for a given set of parameters. @@ -302,7 +305,7 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: if not np.isfinite(log_prior).any(): return -np.inf - if not self._predict: + if self._has_separable_problem: self.likelihood.y = self.y log_likelihood = self.likelihood.evaluate(inputs) @@ -336,7 +339,7 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if not np.isfinite(log_prior).any(): return -np.inf, -self._de * np.ones(self.n_parameters) - if not self._predict: + if self._has_separable_problem: self.likelihood.y, self.likelihood.dy = (self.y, self.dy) log_likelihood, dl = self.likelihood.evaluateS1(inputs) diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index 9555ece80..a3f850533 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -1,4 +1,3 @@ -import copy from typing import Optional import numpy as np @@ -20,21 +19,20 @@ class WeightedCost(BaseCost): A list of PyBOP cost objects. weights : list[float] A list of values with which to weight the cost values. - _has_different_problems : bool - If True, the problem for each cost is evaluated independently during - each evaluation of the cost (default: False). + _has_identical_problems : bool + If True, the shared problem will be evaluated once and saved before the + self._evaluate() method of each cost is called (default: False). """ def __init__(self, *costs, weights: Optional[list[float]] = None): if not all(isinstance(cost, BaseCost) for cost in costs): raise TypeError("All costs must be instances of BaseCost.") - self.costs = [copy.copy(cost) for cost in costs] - self._has_different_problems = False + self.costs = [cost for cost in costs] + if len(set(type(cost.problem) for cost in self.costs)) > 1: + raise TypeError("All problems must be of the same class type.") self.minimising = not any( isinstance(cost, (BaseLikelihood, DesignCost)) for cost in self.costs ) - if len(set(type(cost.problem) for cost in self.costs)) > 1: - raise TypeError("All problems must be of the same class type.") # Check if weights are provided if weights is not None: @@ -49,24 +47,26 @@ def __init__(self, *costs, weights: Optional[list[float]] = None): self.weights = np.ones(len(self.costs)) # Check if all costs depend on the same problem - self._has_different_problems = any( - hasattr(cost, "problem") and cost.problem is not self.costs[0].problem - for cost in self.costs[1:] + self._has_identical_problems = all( + cost._has_separable_problem and cost.problem is self.costs[0].problem + for cost in self.costs ) - if self._has_different_problems: + # Check if any cost function requires capacity update + self.update_capacity = False + if any(cost.update_capacity for cost in self.costs): + self.update_capacity = True + + if self._has_identical_problems: + super().__init__(self.costs[0].problem) + else: super().__init__() for cost in self.costs: self.parameters.join(cost.parameters) - else: - super().__init__(self.costs[0].problem) - self._predict = False - for cost in self.costs: - cost._predict = False + cost.update_capacity = self.update_capacity - # Check if any cost function requires capacity update - if any(cost.update_capacity for cost in self.costs): - self.update_capacity = True + # Weighted costs do not use this functionality + self._has_separable_problem = False def _evaluate(self, inputs: Inputs, grad=None): """ @@ -85,20 +85,22 @@ def _evaluate(self, inputs: Inputs, grad=None): float The weighted cost value. """ - e = np.empty_like(self.costs) + self.parameters.update(values=list(inputs.values())) - if not self._predict: - if self._has_different_problems: - self.parameters.update(values=list(inputs.values())) - else: - self.y = self.problem.evaluate( - inputs, update_capacity=self.update_capacity - ) + if self._has_identical_problems: + self.y = self.problem.evaluate(inputs, update_capacity=self.update_capacity) + + e = np.empty_like(self.costs) for i, cost in enumerate(self.costs): - if not self._has_different_problems: + inputs = cost.parameters.as_dict() + if self._has_identical_problems: cost.y = self.y - e[i] = cost.evaluate(inputs) + elif cost._has_separable_problem: + cost.y = cost.problem.evaluate( + inputs, update_capacity=self.update_capacity + ) + e[i] = cost._evaluate(inputs) return np.dot(e, self.weights) @@ -117,19 +119,21 @@ def _evaluateS1(self, inputs: Inputs): A tuple containing the cost and the gradient. The cost is a float, and the gradient is an array-like of the same length as `x`. """ + self.parameters.update(values=list(inputs.values())) + + if self._has_identical_problems: + self.y, self.dy = self.problem.evaluateS1(inputs) + e = np.empty_like(self.costs) de = np.empty((len(self.parameters), len(self.costs))) - if not self._predict: - if self._has_different_problems: - self.parameters.update(values=list(inputs.values())) - else: - self.y, self.dy = self.problem.evaluateS1(inputs) - for i, cost in enumerate(self.costs): - if not self._has_different_problems: + inputs = cost.parameters.as_dict() + if self._has_identical_problems: cost.y, cost.dy = (self.y, self.dy) - e[i], de[:, i] = cost.evaluateS1(inputs) + elif cost._has_separable_problem: + cost.y, cost.dy = cost.problem.evaluateS1(inputs) + e[i], de[:, i] = cost._evaluateS1(inputs) e = np.dot(e, self.weights) de = np.dot(de, self.weights) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 3ba818f52..a03b4a267 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -22,16 +22,16 @@ class BaseCost: An array containing the target data to fit. n_outputs : int The number of outputs in the model. - _predict : bool - If False, the problem will be evaluated outside the self.evaluate() method - before the cost is calculated (default: False). + _has_separable_problem : bool + If True, the problem is independent from the cost function and will be + evaluated in advance of the call to self._evaluate() (default: False). """ def __init__(self, problem: Optional[BaseProblem] = None): self.parameters = Parameters() self.problem = problem self.verbose = False - self._predict = False + self._has_separable_problem = False self.update_capacity = False self.y = None self.dy = None @@ -41,7 +41,7 @@ def __init__(self, problem: Optional[BaseProblem] = None): self.parameters.join(self.problem.parameters) self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal - self._predict = True + self._has_separable_problem = True @property def n_parameters(self): @@ -78,8 +78,10 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): inputs = self.parameters.verify(inputs) try: - if self._predict: - self.y = self.problem.evaluate(inputs) + if self._has_separable_problem: + self.y = self.problem.evaluate( + inputs, update_capacity=self.update_capacity + ) return self._evaluate(inputs, grad) @@ -138,7 +140,7 @@ def evaluateS1(self, inputs: Union[Inputs, list]): inputs = self.parameters.verify(inputs) try: - if self._predict: + if self._has_separable_problem: self.y, self.dy = self.problem.evaluateS1(inputs) return self._evaluateS1(inputs) diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index a01af6038..d0bc8fc89 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -1,5 +1,4 @@ import warnings -from typing import Union import numpy as np @@ -66,42 +65,6 @@ def update_simulation_data(self, inputs: Inputs): self.problem._target = {key: solution[key] for key in self.problem.signal} self.dt = solution["Time [s]"][1] - solution["Time [s]"][0] - def evaluate(self, inputs: Union[Inputs, list], grad=None): - """ - Call the evaluate function for a given set of parameters. - - Parameters - ---------- - inputs : Inputs or array-like - The parameters for which to compute the cost and gradient. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. - - Returns - ------- - float - The calculated cost function value. - - Raises - ------ - ValueError - If an error occurs during the calculation of the cost. - """ - inputs = self.parameters.verify(inputs) - - try: - if self._predict: - self.y = self.problem.evaluate( - inputs, update_capacity=self.update_capacity - ) - - return self._evaluate(inputs, grad) - - # Catch NotImplementedError and raise it - except NotImplementedError as e: - raise e - class GravimetricEnergyDensity(DesignCost): """ @@ -115,7 +78,6 @@ class GravimetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) - self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, inputs: Inputs, grad=None): """ @@ -156,7 +118,6 @@ class VolumetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) - self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, inputs: Inputs, grad=None): """ diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index cfcc32ef5..15922d8c5 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -387,7 +387,7 @@ class ObserverCost(BaseCost): def __init__(self, observer: Observer): super().__init__(problem=observer) self._observer = observer - self._predict = False # evaluate problem within cost._evaluate() + self._has_separable_problem = False def _evaluate(self, inputs: Inputs, grad=None): """ diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index a104b21ec..db9679c3e 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -329,8 +329,8 @@ def test_weighted_fitting_cost(self, problem): # Test with and without different problems weight = 100 weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) - assert weighted_cost_2._has_different_problems is False - assert weighted_cost_2._predict is False + assert weighted_cost_2._has_identical_problems is True + assert weighted_cost_2._has_separable_problem is False assert weighted_cost_2.problem is problem assert weighted_cost_2([0.5]) >= 0 np.testing.assert_allclose( @@ -341,8 +341,8 @@ def test_weighted_fitting_cost(self, problem): cost3 = pybop.RootMeanSquaredError(copy(problem)) weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) - assert weighted_cost_3._has_different_problems is True - assert weighted_cost_3._predict is False + assert weighted_cost_3._has_identical_problems is False + assert weighted_cost_3._has_separable_problem is False assert weighted_cost_3.problem is None assert weighted_cost_3([0.5]) >= 0 np.testing.assert_allclose( @@ -357,14 +357,15 @@ def test_weighted_fitting_cost(self, problem): np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) # Test MAP explicitly - cost4 = pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma) + cost4 = pybop.MAP(problem, pybop.GaussianLogLikelihood) weighted_cost_4 = pybop.WeightedCost(cost1, cost4, weights=[1, weight]) - assert weighted_cost_4._has_different_problems is False - assert weighted_cost_4._predict is False - assert weighted_cost_4([0.5]) <= 0 + assert weighted_cost_4._has_identical_problems is False + assert weighted_cost_4._has_separable_problem is False + sigma = 0.05 + assert weighted_cost_4([0.5, sigma]) <= 0 np.testing.assert_allclose( - weighted_cost_4.evaluate([0.6]), - cost1([0.6]) + weight * cost4([0.6]), + weighted_cost_4.evaluate([0.6, sigma]), + cost1([0.6, sigma]) + weight * cost4([0.6, sigma]), atol=1e-5, ) @@ -375,8 +376,8 @@ def test_weighted_design_cost(self, design_problem): # Test with and without weights weighted_cost = pybop.WeightedCost(cost1, cost2) - assert weighted_cost._has_different_problems is False - assert weighted_cost._predict is False + assert weighted_cost._has_identical_problems is True + assert weighted_cost._has_separable_problem is False assert weighted_cost.problem is design_problem assert weighted_cost([0.5]) >= 0 np.testing.assert_allclose( From deb4ebe551f0a98a96cf652e59524e598705f8c2 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 23 Jul 2024 17:47:30 +0100 Subject: [PATCH 43/53] Update spme_max_energy output --- examples/scripts/spme_max_energy.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/examples/scripts/spme_max_energy.py b/examples/scripts/spme_max_energy.py index db521b688..8542b4ad4 100644 --- a/examples/scripts/spme_max_energy.py +++ b/examples/scripts/spme_max_energy.py @@ -9,12 +9,6 @@ # electrode widths, particle radii, volume fractions and # separator width. -# NOTE: This script can be easily adjusted to consider the volumetric -# (instead of gravimetric) energy density by changing the line which -# defines the cost and changing the output to: -# print(f"Initial volumetric energy density: {cost(optim.x0):.2f} Wh.m-3") -# print(f"Optimised volumetric energy density: {final_cost:.2f} Wh.m-3") - # Define parameter set and model parameter_set = pybop.ParameterSet.pybamm("Chen2020", formation_concentrations=True) model = pybop.lithium_ion.SPMe(parameter_set=parameter_set) @@ -56,8 +50,10 @@ ) x, final_cost = optim.run() print("Estimated parameters:", x) -print(f"Initial gravimetric energy density: {cost(optim.x0):.2f} Wh.kg-1") -print(f"Optimised gravimetric energy density: {final_cost:.2f} Wh.kg-1") +print(f"Initial gravimetric energy density: {cost1(optim.x0):.2f} Wh.kg-1") +print(f"Optimised gravimetric energy density: {cost1(x):.2f} Wh.kg-1") +print(f"Initial volumetric energy density: {cost2(optim.x0):.2f} Wh.m-3") +print(f"Optimised volumetric energy density: {cost2(x):.2f} Wh.m-3") # Plot the timeseries output if cost.update_capacity: From f2bcfecf4b645189c1134c7b129acaf6db129759 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 31 Jul 2024 16:45:47 +0100 Subject: [PATCH 44/53] fix: catch update_capacity overwrite on super(), add UserWarning for global update_capacity() usage, add tests for side-effects on underlying objects --- pybop/costs/_weighted_cost.py | 19 ++++++---- tests/integration/test_weighted_cost.py | 35 ++++++++++++------- tests/unit/test_cost.py | 46 +++++++++++++++++++++++-- 3 files changed, 79 insertions(+), 21 deletions(-) diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index a3f850533..f9088b7fd 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -1,3 +1,4 @@ +import warnings from typing import Optional import numpy as np @@ -52,18 +53,24 @@ def __init__(self, *costs, weights: Optional[list[float]] = None): for cost in self.costs ) - # Check if any cost function requires capacity update - self.update_capacity = False - if any(cost.update_capacity for cost in self.costs): - self.update_capacity = True - if self._has_identical_problems: super().__init__(self.costs[0].problem) else: super().__init__() for cost in self.costs: self.parameters.join(cost.parameters) - cost.update_capacity = self.update_capacity + + # Check if any cost function requires capacity update + self.update_capacity = False + if any(cost.update_capacity for cost in self.costs): + self.update_capacity = True + + warnings.warn( + "WeightedCost doesn't currently support DesignCosts with different `update_capacity` attributes,\n" + f"Using global `DesignCost.update_capacity` attribute as: {self.update_capacity}", + UserWarning, + stacklevel=2, + ) # Weighted costs do not use this functionality self._has_separable_problem = False diff --git a/tests/integration/test_weighted_cost.py b/tests/integration/test_weighted_cost.py index a533bd985..e964e9703 100644 --- a/tests/integration/test_weighted_cost.py +++ b/tests/integration/test_weighted_cost.py @@ -126,41 +126,50 @@ def weighted_design_cost(self, model, design_cost): parameters = pybop.Parameters( pybop.Parameter( "Positive electrode thickness [m]", - prior=pybop.Gaussian(5e-05, 0.1e-05), - bounds=[50e-06, 10e-05], + prior=pybop.Gaussian(5e-05, 5e-06), + bounds=[2e-06, 10e-05], ), pybop.Parameter( - "Positive particle radius [m]", - prior=pybop.Gaussian(5.22e-06, 0.1e-06), - bounds=[2e-06, 9e-06], + "Negative electrode thickness [m]", + prior=pybop.Gaussian(5e-05, 5e-06), + bounds=[2e-06, 10e-05], ), ) experiment = pybop.Experiment( - ["Discharge at 1C until 3.3 V (5 seconds period)"], + ["Discharge at 1C until 3.5 V (5 seconds period)"], ) problem = pybop.DesignProblem( model, parameters, experiment=experiment, init_soc=init_soc ) + costs_update_capacity = [] costs = [] for cost in design_cost: - costs.append(cost(problem, update_capacity=True)) + costs_update_capacity.append(cost(problem, update_capacity=True)) + costs.append(cost(problem)) - return pybop.WeightedCost(*costs, weights=[100, 1]) + return [ + pybop.WeightedCost(*costs, weights=[1.0, 0.1]), + pybop.WeightedCost(*costs_update_capacity, weights=[0.1, 1.0]), + ] @pytest.mark.integration - def test_design_costs(self, weighted_design_cost): + @pytest.mark.parametrize("cost_index", [0, 1]) + def test_design_costs(self, weighted_design_cost, cost_index): + cost = weighted_design_cost[cost_index] optim = pybop.CuckooSearch( - weighted_design_cost, + cost, max_iterations=15, allow_infeasible_solutions=False, ) - - initial_cost = optim.cost(optim.parameters.initial_value()) - _, final_cost = optim.run() + initial_values = optim.parameters.initial_value() + initial_cost = optim.cost(initial_values) + x, final_cost = optim.run() # Assertions assert initial_cost < final_cost + for i, _ in enumerate(x): + assert x[i] > initial_values[i] def get_data(self, model, parameters, x, init_soc): model.classify_and_update_parameters(parameters) diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index db9679c3e..562094d08 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -326,7 +326,7 @@ def test_weighted_fitting_cost(self, problem): ): weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1]) - # Test with and without different problems + # Test with identical problems weight = 100 weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) assert weighted_cost_2._has_identical_problems is True @@ -339,6 +339,7 @@ def test_weighted_fitting_cost(self, problem): atol=1e-5, ) + # Test with different problems cost3 = pybop.RootMeanSquaredError(copy(problem)) weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) assert weighted_cost_3._has_identical_problems is False @@ -374,7 +375,7 @@ def test_weighted_design_cost(self, design_problem): cost1 = pybop.GravimetricEnergyDensity(design_problem) cost2 = pybop.VolumetricEnergyDensity(design_problem) - # Test with and without weights + # Test DesignCosts with identical problems weighted_cost = pybop.WeightedCost(cost1, cost2) assert weighted_cost._has_identical_problems is True assert weighted_cost._has_separable_problem is False @@ -386,6 +387,47 @@ def test_weighted_design_cost(self, design_problem): atol=1e-5, ) + # Test DesignCosts with different problems + cost3 = pybop.VolumetricEnergyDensity(copy(design_problem)) + weighted_cost = pybop.WeightedCost(cost1, cost3) + assert weighted_cost._has_identical_problems is False + assert weighted_cost._has_separable_problem is False + for i, _ in enumerate(weighted_cost.costs): + assert isinstance(weighted_cost.costs[i].problem, pybop.DesignProblem) + + # Ensure attributes are set correctly and not modified via side-effects + assert weighted_cost.update_capacity is False + assert weighted_cost.costs[0].update_capacity is False + assert weighted_cost.costs[1].update_capacity is False + + assert weighted_cost([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost.evaluate([0.6]), + cost1([0.6]) + cost2([0.6]), + atol=1e-5, + ) + + @pytest.mark.unit + def test_weighted_design_cost_with_update_capacity(self, design_problem): + cost1 = pybop.GravimetricEnergyDensity(design_problem, update_capacity=True) + cost2 = pybop.VolumetricEnergyDensity(design_problem) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 1]) + + # Ensure attributes are set correctly and not modified via side-effects + assert weighted_cost.update_capacity is True + assert weighted_cost.costs[0].update_capacity is True + assert weighted_cost.costs[1].update_capacity is False + + assert weighted_cost._has_identical_problems is True + assert weighted_cost._has_separable_problem is False + assert weighted_cost.problem is design_problem + assert weighted_cost([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost.evaluate([0.6]), + cost1([0.6]) + cost2([0.6]), + atol=1e-5, + ) + @pytest.mark.unit def test_mixed_problem_classes(self, problem, design_problem): cost1 = pybop.SumSquaredError(problem) From 1899c0b9fc4841a85f5bf91981c9d2677b864711 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 31 Jul 2024 16:52:37 +0100 Subject: [PATCH 45/53] fix: update warning location --- pybop/costs/_weighted_cost.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index f9088b7fd..32d48404b 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -65,12 +65,12 @@ def __init__(self, *costs, weights: Optional[list[float]] = None): if any(cost.update_capacity for cost in self.costs): self.update_capacity = True - warnings.warn( - "WeightedCost doesn't currently support DesignCosts with different `update_capacity` attributes,\n" - f"Using global `DesignCost.update_capacity` attribute as: {self.update_capacity}", - UserWarning, - stacklevel=2, - ) + warnings.warn( + "WeightedCost doesn't currently support DesignCosts with different `update_capacity` attributes,\n" + f"Using global `DesignCost.update_capacity` attribute as: {self.update_capacity}", + UserWarning, + stacklevel=2, + ) # Weighted costs do not use this functionality self._has_separable_problem = False From 105bda25c56d97a98d4ec9fef67d97bfb38d7923 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 31 Jul 2024 17:04:37 +0100 Subject: [PATCH 46/53] fix: review suggestions. --- examples/standalone/cost.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/standalone/cost.py b/examples/standalone/cost.py index bf5d600f6..1103ea894 100644 --- a/examples/standalone/cost.py +++ b/examples/standalone/cost.py @@ -54,9 +54,6 @@ def _evaluate(self, inputs): ---------- inputs : Dict The parameters for which to evaluate the cost. - grad : array-like, optional - Unused parameter, present for compatibility with gradient-based - optimizers. Returns ------- From 53d544cc08be3d3b036c2b3cf4a8602f5e0523f3 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 31 Jul 2024 17:26:39 +0100 Subject: [PATCH 47/53] fix: remainder suggestions. --- examples/scripts/spm_CMAES.py | 2 +- pybop/__init__.py | 22 ++++++++++----------- pybop/costs/base_cost.py | 37 ++++++++++------------------------- pybop/parameters/parameter.py | 15 +++++++++++++- 4 files changed, 36 insertions(+), 40 deletions(-) diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py index 91c4c3c33..4376fb46f 100644 --- a/examples/scripts/spm_CMAES.py +++ b/examples/scripts/spm_CMAES.py @@ -44,7 +44,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) cost = pybop.SumSquaredError(problem) -optim = pybop.CMAES(cost, max_iterations=100) +optim = pybop.CMAES(cost, sigma0=0.25, max_unchanged_iterations=20, max_iterations=50) # Run the optimisation x, final_cost = optim.run() diff --git a/pybop/__init__.py b/pybop/__init__.py index f3a7bb571..24c392c1c 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -55,6 +55,17 @@ # from ._dataset import Dataset +# +# Transformation classes +# +from .transformation import Transformation +from .transformation._transformation import ( + IdentityTransformation, + ScaledTransformation, + LogTransformation, + ComposedTransformation, +) + # # Parameter classes # @@ -78,17 +89,6 @@ from .problems.fitting_problem import FittingProblem from .problems.design_problem import DesignProblem -# -# Transformation classes -# -from .transformation import Transformation -from .transformation._transformation import ( - IdentityTransformation, - ScaledTransformation, - LogTransformation, - ComposedTransformation, -) - # # Cost function class # diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 733ef2022..f86306783 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,4 +1,4 @@ -from pybop import BaseProblem, ComposedTransformation, IdentityTransformation +from pybop import BaseProblem from pybop.parameters.parameter import Inputs, Parameters @@ -31,30 +31,13 @@ def __init__(self, problem=None): self.parameters.join(self.problem.parameters) self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal - self.transformation = self.construct_transformation() - - def construct_transformation(self): - """ - Create a ComposedTransformation object from the individual parameters transformations. - """ - transformations = self.parameters.get_transformations() - if not transformations or all(t is None for t in transformations): - return None - - valid_transformations = [ - t if t is not None else IdentityTransformation() for t in transformations - ] - return ComposedTransformation(valid_transformations) + self.transformation = self.parameters.construct_transformation() def __call__(self, x): """ Call the evaluate function for a given set of parameters. """ - if self.transformation: - p = self.transformation.to_model(x) - return self.evaluate(p) - else: - return self.evaluate(x) + return self.evaluate(x) def evaluate(self, x): """ @@ -75,7 +58,9 @@ def evaluate(self, x): ValueError If an error occurs during the calculation of the cost. """ - inputs = self.parameters.verify(x) + if self.transformation: + p = self.transformation.to_model(x) + inputs = self.parameters.verify(p if self.transformation else x) try: return self._evaluate(inputs) @@ -129,14 +114,12 @@ def evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ - inputs = self.parameters.verify(x) + if self.transformation: + p = self.transformation.to_model(x) + inputs = self.parameters.verify(p if self.transformation else x) try: - if self.transformation: - p = self.transformation.to_model(inputs) - return self._evaluateS1(p) - else: - return self._evaluateS1(inputs) + return self._evaluateS1(inputs) except NotImplementedError as e: raise e diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index 0ac472431..d219df938 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -4,6 +4,7 @@ import numpy as np +from pybop import ComposedTransformation, IdentityTransformation from pybop._utils import is_numeric Inputs = dict[str, float] @@ -57,7 +58,6 @@ def __init__( self.applied_prior_bounds = False self.set_bounds(bounds) self.margin = 1e-4 - self.set_bounds(bounds) def rvs(self, n_samples, random_state=None): """ @@ -449,6 +449,19 @@ def get_transformations(self): return transformations + def construct_transformation(self): + """ + Create a ComposedTransformation object from the individual parameter transformations. + """ + transformations = self.get_transformations() + if not transformations or all(t is None for t in transformations): + return None + + valid_transformations = [ + t if t is not None else IdentityTransformation() for t in transformations + ] + return ComposedTransformation(valid_transformations) + def get_bounds_for_plotly(self): """ Retrieve parameter bounds in the format expected by Plotly. From 0a5216c9ef537068f4ddc147eb344be514467d80 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 31 Jul 2024 21:20:57 +0100 Subject: [PATCH 48/53] tests: update transformation integration parameters, remove sigma0 value for multi_signal w/ gaussian likelihood --- pybop/costs/_weighted_cost.py | 7 ++----- tests/integration/test_spm_parameterisations.py | 1 - tests/integration/test_transformation.py | 1 + 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index effa5a510..216e0303c 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -60,7 +60,7 @@ def __init__(self, *args, weights: Optional[list[float]] = None): for cost in self.costs: self.parameters.join(cost.parameters) - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Calculate the weighted cost for a given set of parameters. @@ -68,9 +68,6 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to compute the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- @@ -90,7 +87,7 @@ def _evaluate(self, inputs: Inputs, grad=None): cost._current_prediction = cost.problem.evaluate(inputs) else: cost._current_prediction = self._current_prediction - e[i] = cost._evaluate(inputs, grad) + e[i] = cost._evaluate(inputs) return np.dot(e, self.weights) diff --git a/tests/integration/test_spm_parameterisations.py b/tests/integration/test_spm_parameterisations.py index 5e9d6b005..b3188e198 100644 --- a/tests/integration/test_spm_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -196,7 +196,6 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): # Test each optimiser optim = multi_optimiser( cost=spm_two_signal_cost, - sigma0=0.03, max_iterations=250, ) diff --git a/tests/integration/test_transformation.py b/tests/integration/test_transformation.py index 45feff80a..4ca5a0c1f 100644 --- a/tests/integration/test_transformation.py +++ b/tests/integration/test_transformation.py @@ -73,6 +73,7 @@ def test_spm_optimisers(self, optimiser, cost): x0 = cost.parameters.initial_value() optim = optimiser( cost=cost, + max_unchanged_iterations=35, max_iterations=125, ) From 6f461643971af4946b4241218dee53899efeed4d Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 1 Aug 2024 07:51:20 +0100 Subject: [PATCH 49/53] feat: adds @property for new WeightedCost attributes, adds to docstring --- pybop/costs/_weighted_cost.py | 8 ++++++++ pybop/costs/base_cost.py | 4 ++++ tests/unit/test_cost.py | 24 ++++++++++++------------ 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index 32d48404b..ca5ae4614 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -23,6 +23,10 @@ class WeightedCost(BaseCost): _has_identical_problems : bool If True, the shared problem will be evaluated once and saved before the self._evaluate() method of each cost is called (default: False). + _has_seperable_problem: bool + If True, the shared problem is seperable from the cost function and + will be evaluated for each problem before the cost evaluation is + called (default: False). """ def __init__(self, *costs, weights: Optional[list[float]] = None): @@ -146,3 +150,7 @@ def _evaluateS1(self, inputs: Inputs): de = np.dot(de, self.weights) return e, de + + @property + def has_identical_problems(self): + return self._has_identical_problems diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index a03b4a267..813e27bef 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -47,6 +47,10 @@ def __init__(self, problem: Optional[BaseProblem] = None): def n_parameters(self): return len(self.parameters) + @property + def has_separable_problem(self): + return self._has_separable_problem + def __call__(self, inputs: Union[Inputs, list], grad=None): """ Call the evaluate function for a given set of parameters. diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 562094d08..5a8bc6306 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -329,8 +329,8 @@ def test_weighted_fitting_cost(self, problem): # Test with identical problems weight = 100 weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) - assert weighted_cost_2._has_identical_problems is True - assert weighted_cost_2._has_separable_problem is False + assert weighted_cost_2.has_identical_problems is True + assert weighted_cost_2.has_separable_problem is False assert weighted_cost_2.problem is problem assert weighted_cost_2([0.5]) >= 0 np.testing.assert_allclose( @@ -342,8 +342,8 @@ def test_weighted_fitting_cost(self, problem): # Test with different problems cost3 = pybop.RootMeanSquaredError(copy(problem)) weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) - assert weighted_cost_3._has_identical_problems is False - assert weighted_cost_3._has_separable_problem is False + assert weighted_cost_3.has_identical_problems is False + assert weighted_cost_3.has_separable_problem is False assert weighted_cost_3.problem is None assert weighted_cost_3([0.5]) >= 0 np.testing.assert_allclose( @@ -360,8 +360,8 @@ def test_weighted_fitting_cost(self, problem): # Test MAP explicitly cost4 = pybop.MAP(problem, pybop.GaussianLogLikelihood) weighted_cost_4 = pybop.WeightedCost(cost1, cost4, weights=[1, weight]) - assert weighted_cost_4._has_identical_problems is False - assert weighted_cost_4._has_separable_problem is False + assert weighted_cost_4.has_identical_problems is False + assert weighted_cost_4.has_separable_problem is False sigma = 0.05 assert weighted_cost_4([0.5, sigma]) <= 0 np.testing.assert_allclose( @@ -377,8 +377,8 @@ def test_weighted_design_cost(self, design_problem): # Test DesignCosts with identical problems weighted_cost = pybop.WeightedCost(cost1, cost2) - assert weighted_cost._has_identical_problems is True - assert weighted_cost._has_separable_problem is False + assert weighted_cost.has_identical_problems is True + assert weighted_cost.has_separable_problem is False assert weighted_cost.problem is design_problem assert weighted_cost([0.5]) >= 0 np.testing.assert_allclose( @@ -390,8 +390,8 @@ def test_weighted_design_cost(self, design_problem): # Test DesignCosts with different problems cost3 = pybop.VolumetricEnergyDensity(copy(design_problem)) weighted_cost = pybop.WeightedCost(cost1, cost3) - assert weighted_cost._has_identical_problems is False - assert weighted_cost._has_separable_problem is False + assert weighted_cost.has_identical_problems is False + assert weighted_cost.has_separable_problem is False for i, _ in enumerate(weighted_cost.costs): assert isinstance(weighted_cost.costs[i].problem, pybop.DesignProblem) @@ -418,8 +418,8 @@ def test_weighted_design_cost_with_update_capacity(self, design_problem): assert weighted_cost.costs[0].update_capacity is True assert weighted_cost.costs[1].update_capacity is False - assert weighted_cost._has_identical_problems is True - assert weighted_cost._has_separable_problem is False + assert weighted_cost.has_identical_problems is True + assert weighted_cost.has_separable_problem is False assert weighted_cost.problem is design_problem assert weighted_cost([0.5]) >= 0 np.testing.assert_allclose( From 54c0afe55cefd05e84b21da183d5e48ae112850f Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 1 Aug 2024 10:21:48 +0100 Subject: [PATCH 50/53] Apply suggestions from review --- pybop/parameters/parameter.py | 13 ++++++++++--- tests/unit/test_parameters.py | 17 +++++++++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index 6976f31e1..cb1a19369 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -409,9 +409,16 @@ def initial_value(self) -> np.ndarray: initial_values = [] for param in self.param.values(): - if param.initial_value is None and param.prior is not None: - initial_value = param.rvs(1)[0] - param.update(initial_value=initial_value) + if param.initial_value is None: + if param.prior is not None: + initial_value = param.rvs(1)[0] + param.update(initial_value=initial_value) + else: + warnings.warn( + "Initial value or Prior are None, proceeding without initial value.", + UserWarning, + stacklevel=2, + ) initial_values.append(param.initial_value) return np.asarray(initial_values) diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 90c43622c..e48cda8ed 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -1,3 +1,4 @@ +import numpy as np import pytest import pybop @@ -212,3 +213,19 @@ def test_get_sigma(self, parameter): parameter.prior = None assert params.get_sigma0() is None + + @pytest.mark.unit + def test_initial_values_without_attributes(self): + # Test without initial values + parameter = pybop.Parameters( + pybop.Parameter( + "Negative electrode conductivity [S.m-1]", + ) + ) + with pytest.warns( + UserWarning, + match="Initial value or Prior are None, proceeding without initial value.", + ): + sample = parameter.initial_value() + + np.testing.assert_equal(sample, np.array([None])) From 0e503ae6210fea5878466e451e1fedf8505cef3a Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 1 Aug 2024 10:28:25 +0100 Subject: [PATCH 51/53] Suggestions from review --- pybop/costs/_weighted_cost.py | 5 +++-- pybop/costs/base_cost.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index ca5ae4614..a435c002f 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -23,10 +23,11 @@ class WeightedCost(BaseCost): _has_identical_problems : bool If True, the shared problem will be evaluated once and saved before the self._evaluate() method of each cost is called (default: False). - _has_seperable_problem: bool + _has_separable_problem: bool If True, the shared problem is seperable from the cost function and will be evaluated for each problem before the cost evaluation is - called (default: False). + called (default: False). This attribute is used for sub-cost objects; + however, the top-level WeightedCost attribute is not used (i.e. == False). """ def __init__(self, *costs, weights: Optional[list[float]] = None): diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 813e27bef..e853ceba5 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -23,7 +23,7 @@ class BaseCost: n_outputs : int The number of outputs in the model. _has_separable_problem : bool - If True, the problem is independent from the cost function and will be + If True, the problem is separable from the cost function and will be evaluated in advance of the call to self._evaluate() (default: False). """ From 8dca6d1818192f7ec0e1b756a88a521d95f46453 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 1 Aug 2024 14:45:49 +0100 Subject: [PATCH 52/53] Update file names, add changelog entry --- CHANGELOG.md | 1 + pybop/__init__.py | 4 +- .../{__init__.py => base_transformation.py} | 42 ++++++++++++------- ...{_transformation.py => transformations.py} | 0 4 files changed, 31 insertions(+), 16 deletions(-) rename pybop/transformation/{__init__.py => base_transformation.py} (82%) rename pybop/transformation/{_transformation.py => transformations.py} (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 80ed158f7..567f88c89 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#357](https://github.com/pybop-team/PyBOP/pull/357) - Adds `Transformation()` class with `LogTransformation()`, `IdentityTransformation()`, and `ScaledTransformation()`, `ComposedTransformation()` implementations with corresponding examples and tests. - [#427](https://github.com/pybop-team/PyBOP/issues/427) - Adds the nbstripout pre-commit hook to remove unnecessary metadata from notebooks. - [#327](https://github.com/pybop-team/PyBOP/issues/327) - Adds the `WeightedCost` subclass, defines when to evaluate a problem and adds the `spm_weighted_cost` example script. - [#393](https://github.com/pybop-team/PyBOP/pull/383) - Adds Minkowski and SumofPower cost classes, with an example and corresponding tests. diff --git a/pybop/__init__.py b/pybop/__init__.py index 66c93e33d..ff8c08a81 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -58,8 +58,8 @@ # # Transformation classes # -from .transformation import Transformation -from .transformation._transformation import ( +from .transformation.base_transformation import Transformation +from .transformation.transformations import ( IdentityTransformation, ScaledTransformation, LogTransformation, diff --git a/pybop/transformation/__init__.py b/pybop/transformation/base_transformation.py similarity index 82% rename from pybop/transformation/__init__.py rename to pybop/transformation/base_transformation.py index 08ba5e17d..c99057752 100644 --- a/pybop/transformation/__init__.py +++ b/pybop/transformation/base_transformation.py @@ -1,5 +1,7 @@ from abc import ABC, abstractmethod -from typing import Tuple, Union, Sequence, List +from collections.abc import Sequence +from typing import Union + import numpy as np @@ -20,6 +22,7 @@ class Transformation(ABC): http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.47.9023 .. [2] Kaare Brandt Petersen and Michael Syskind Pedersen. "The Matrix Cookbook." 2012. """ + # ---- To be implemented with Monte Carlo PR ------ # # def convert_log_prior(self, log_prior): # """Returns a transformed log-prior class.""" @@ -33,7 +36,9 @@ def convert_covariance_matrix(self, cov: np.ndarray, q: np.ndarray) -> np.ndarra jac_inv = np.linalg.pinv(self.jacobian(q)) return jac_inv @ cov @ jac_inv.T - def convert_standard_deviation(self, std: Union[float, np.ndarray], q: np.ndarray) -> np.ndarray: + def convert_standard_deviation( + self, std: Union[float, np.ndarray], q: np.ndarray + ) -> np.ndarray: """ Converts standard deviation `std`, either a scalar or a vector, from the model space to the search space around a parameter vector `q` in the search space. @@ -48,7 +53,7 @@ def convert_standard_deviation(self, std: Union[float, np.ndarray], q: np.ndarra def jacobian(self, q: np.ndarray) -> np.ndarray: """Returns the Jacobian matrix of the transformation at the parameter vector `q`.""" - def jacobian_S1(self, q: np.ndarray) -> Tuple[np.ndarray, Sequence[np.ndarray]]: + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, Sequence[np.ndarray]]: """ Computes the Jacobian matrix and its partial derivatives at the parameter vector `q`. @@ -61,14 +66,18 @@ def log_jacobian_det(self, q: np.ndarray) -> float: Returns the logarithm of the absolute value of the determinant of the Jacobian matrix at the parameter vector `q`. """ - raise NotImplementedError("log_jacobian_det method must be implemented if used.") + raise NotImplementedError( + "log_jacobian_det method must be implemented if used." + ) - def log_jacobian_det_S1(self, q: np.ndarray) -> Tuple[float, np.ndarray]: + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: """ Computes the logarithm of the absolute value of the determinant of the Jacobian, and returns it along with its partial derivatives. """ - raise NotImplementedError("log_jacobian_det_S1 method must be implemented if used.") + raise NotImplementedError( + "log_jacobian_det_S1 method must be implemented if used." + ) @property def n_parameters(self): @@ -96,24 +105,29 @@ def is_elementwise(self) -> bool: """ raise NotImplementedError("is_elementwise method must be implemented if used.") - def _verify_input(self, input: Union[float, int, List[float], np.ndarray, dict[str, float]]) -> np.ndarray: + def _verify_input( + self, inputs: Union[float, int, list[float], np.ndarray, dict[str, float]] + ) -> np.ndarray: """Set and validate the transformation parameter.""" - if isinstance(input, (float, int)): - return np.full(self._n_parameters, float(input)) + if isinstance(inputs, (float, int)): + return np.full(self._n_parameters, float(inputs)) - if isinstance(input, dict): - input = list(input.values()) + if isinstance(inputs, dict): + inputs = list(inputs.values()) try: - input_array = np.asarray(input, dtype=float) - except (ValueError, TypeError): - raise TypeError("Transform must be a float, int, list, numpy array, or dictionary") + input_array = np.asarray(inputs, dtype=float) + except (ValueError, TypeError) as e: + raise TypeError( + "Transform must be a float, int, list, numpy array, or dictionary" + ) from e if input_array.size != self._n_parameters: raise ValueError(f"Transform must have {self._n_parameters} elements") return input_array + # ---- To be implemented with Monte Carlo PR ------ # # class TransformedLogPDF(BaseCost): # """Transformed log-PDF class.""" diff --git a/pybop/transformation/_transformation.py b/pybop/transformation/transformations.py similarity index 100% rename from pybop/transformation/_transformation.py rename to pybop/transformation/transformations.py From 1bedcd722d5ed6476ae3ee6a25807e3815a7b6bc Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 1 Aug 2024 17:13:52 +0100 Subject: [PATCH 53/53] Add changlog entry --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 567f88c89..f2ba3f2bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#413](https://github.com/pybop-team/PyBOP/pull/413) - Adds `DesignCost` functionality to `WeightedCost` class with additional tests. - [#357](https://github.com/pybop-team/PyBOP/pull/357) - Adds `Transformation()` class with `LogTransformation()`, `IdentityTransformation()`, and `ScaledTransformation()`, `ComposedTransformation()` implementations with corresponding examples and tests. - [#427](https://github.com/pybop-team/PyBOP/issues/427) - Adds the nbstripout pre-commit hook to remove unnecessary metadata from notebooks. - [#327](https://github.com/pybop-team/PyBOP/issues/327) - Adds the `WeightedCost` subclass, defines when to evaluate a problem and adds the `spm_weighted_cost` example script.