From c802e6e45eb3b5161f2a93b9ec91da138ffafd69 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 11 Sep 2024 18:59:10 +0100 Subject: [PATCH 1/6] fix: reapply transformation for joined parameters in GaussianLogLikelihood --- pybop/costs/_likelihoods.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 153e27f15..e648817a7 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -111,9 +111,11 @@ def __init__( self._dsigma_scale = dsigma_scale self._logpi = -0.5 * self.n_data * np.log(2 * np.pi) + # Add sigma parameter, join with self.parameters, reapply transformations self.sigma = Parameters() self._add_sigma_parameters(sigma0) self.parameters.join(self.sigma) + self.transformation = self.parameters.construct_transformation() def _add_sigma_parameters(self, sigma0): sigma0 = [sigma0] if not isinstance(sigma0, list) else sigma0 From 6a0d83814bfd6b2cefd8b8b17b19611d5a9e896d Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 12 Sep 2024 11:59:27 +0100 Subject: [PATCH 2/6] fix: updates fail_gradient for GaussianLogLikelihood, fixes missed cost transformation arg in scipy_optimisers.py, updates transformation integration tests --- pybop/costs/_likelihoods.py | 1 + pybop/optimisers/scipy_optimisers.py | 18 ++-- pybop/parameters/parameter.py | 16 +-- .../test_thevenin_parameterisation.py | 1 + tests/integration/test_transformation.py | 101 +++++++++++++----- tests/unit/test_likelihoods.py | 7 ++ tests/unit/test_parameters.py | 17 +++ 7 files changed, 120 insertions(+), 41 deletions(-) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index e648817a7..d83e7ab20 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -116,6 +116,7 @@ def __init__( self._add_sigma_parameters(sigma0) self.parameters.join(self.sigma) self.transformation = self.parameters.construct_transformation() + self.set_fail_gradient() def _add_sigma_parameters(self, sigma0): sigma0 = [sigma0] if not isinstance(sigma0, list) else sigma0 diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py index 3f9bf9e7b..c5b1c611d 100644 --- a/pybop/optimisers/scipy_optimisers.py +++ b/pybop/optimisers/scipy_optimisers.py @@ -88,7 +88,7 @@ def _run(self): x=self._transformation.to_model(result.x) if self._transformation else result.x, - final_cost=self.cost(result.x), + final_cost=self.cost(result.x, apply_transform=True), n_iterations=nit, scipy_result=result, ) @@ -165,13 +165,13 @@ def cost_wrapper(self, x): self.log_update(x=[x]) if not self._options["jac"]: - cost = self.cost(x) / self._cost0 + cost = self.cost(x, apply_transform=True) / self._cost0 if np.isinf(cost): self.inf_count += 1 cost = 1 + 0.9**self.inf_count # for fake finite gradient return cost if self.minimising else -cost - L, dl = self.cost(x, calculate_grad=True) + L, dl = self.cost(x, calculate_grad=True, apply_transform=True) return (L, dl) if self.minimising else (-L, -dl) def _run_optimiser(self): @@ -198,7 +198,7 @@ def base_callback(intermediate_result: Union[OptimizeResult, np.ndarray]): cost = intermediate_result.fun else: x_best = intermediate_result - cost = self.cost(x_best) + cost = self.cost(x_best, apply_transform=True) self.log_update( x_best=x_best, @@ -212,7 +212,7 @@ def base_callback(intermediate_result: Union[OptimizeResult, np.ndarray]): ) # Compute the absolute initial cost and resample if required - self._cost0 = np.abs(self.cost(self.x0)) + self._cost0 = np.abs(self.cost(self.x0, apply_transform=True)) if np.isinf(self._cost0): for _i in range(1, self.num_resamples): try: @@ -224,7 +224,7 @@ def base_callback(intermediate_result: Union[OptimizeResult, np.ndarray]): stacklevel=2, ) break - self._cost0 = np.abs(self.cost(self.x0)) + self._cost0 = np.abs(self.cost(self.x0, apply_transform=True)) if not np.isinf(self._cost0): break if np.isinf(self._cost0): @@ -352,7 +352,11 @@ def callback(intermediate_result: OptimizeResult): def cost_wrapper(x): self.log_update(x=[x]) - return self.cost(x) if self.minimising else -self.cost(x) + return ( + self.cost(x, apply_transform=True) + if self.minimising + else -self.cost(x, apply_transform=True) + ) return differential_evolution( cost_wrapper, diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index e95cd698b..9d7736452 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -341,12 +341,16 @@ def get_bounds(self, apply_transform: bool = False) -> dict: for param in self.param.values(): if param.bounds is not None: if apply_transform and param.transformation is not None: - bounds["lower"].append( - float(param.transformation.to_search(param.bounds[0])) - ) - bounds["upper"].append( - float(param.transformation.to_search(param.bounds[1])) - ) + lower = float(param.transformation.to_search(param.bounds[0])) + upper = float(param.transformation.to_search(param.bounds[1])) + if np.isnan(lower) or np.isnan(upper): + raise ValueError( + "Transformed bounds resulted in NaN values.\n" + "If you've not applied bounds, this is due to the defaults applied from the prior distribution,\n" + "consider bounding the parameters to avoid this error." + ) + bounds["lower"].append(lower) + bounds["upper"].append(upper) else: bounds["lower"].append(param.bounds[0]) bounds["upper"].append(param.bounds[1]) diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py index 75530c892..48ca4ea2d 100644 --- a/tests/integration/test_thevenin_parameterisation.py +++ b/tests/integration/test_thevenin_parameterisation.py @@ -107,6 +107,7 @@ def get_data(self, model): [ ( "Discharge at 0.5C for 2 minutes (4 seconds period)", + "Rest for 20 seconds (4 seconds period)", "Charge at 0.5C for 2 minutes (4 seconds period)", ), ] diff --git a/tests/integration/test_transformation.py b/tests/integration/test_transformation.py index 8939e28e5..5c616a5b7 100644 --- a/tests/integration/test_transformation.py +++ b/tests/integration/test_transformation.py @@ -11,39 +11,43 @@ class TestTransformation: @pytest.fixture(autouse=True) def setup(self): + self.sigma0 = 2e-3 self.ground_truth = np.clip( - np.array([0.5, 0.1]) + np.random.normal(loc=0.0, scale=0.05, size=2), - a_min=[0.375, 0.02], - a_max=[0.7, 0.5], + np.asarray([0.05, 0.05]) + np.random.normal(loc=0.0, scale=0.01, size=2), + a_min=0.0, + a_max=0.1, ) @pytest.fixture def model(self): - parameter_set = pybop.ParameterSet.pybamm("Chen2020") - x = self.ground_truth - parameter_set.update( + parameter_set = pybop.ParameterSet( + json_path="examples/scripts/parameters/initial_ecm_parameters.json" + ) + parameter_set.import_parameters() + parameter_set.params.update( { - "Positive electrode active material volume fraction": x[0], - "Positive electrode conductivity [S.m-1]": x[1], + "C1 [F]": 1000, + "R0 [Ohm]": self.ground_truth[0], + "R1 [Ohm]": self.ground_truth[1], } ) - return pybop.lithium_ion.SPMe(parameter_set=parameter_set) + return pybop.empirical.Thevenin(parameter_set=parameter_set) @pytest.fixture def parameters(self): return pybop.Parameters( pybop.Parameter( - "Positive electrode active material volume fraction", - prior=pybop.Uniform(0.4, 0.7), - bounds=[0.375, 0.725], + "R0 [Ohm]", + prior=pybop.Uniform(0.001, 0.1), + bounds=[0, 0.1], transformation=pybop.ScaledTransformation( coefficient=1 / 0.35, intercept=-0.375 ), ), pybop.Parameter( - "Positive electrode conductivity [S.m-1]", - prior=pybop.Uniform(0.05, 0.45), - bounds=[0.02, 0.5], + "R1 [Ohm]", + prior=pybop.Uniform(0.001, 0.1), + bounds=[0, 0.1], transformation=pybop.LogTransformation(), ), ) @@ -55,8 +59,22 @@ def init_soc(self, request): def noise(self, sigma, values): return np.random.normal(0, sigma, values) + @pytest.fixture( + params=[ + pybop.GaussianLogLikelihoodKnownSigma, + pybop.GaussianLogLikelihood, + pybop.RootMeanSquaredError, + pybop.SumSquaredError, + pybop.SumofPower, + pybop.Minkowski, + pybop.LogPosterior, + ] + ) + def cost_cls(self, request): + return request.param + @pytest.fixture - def cost(self, model, parameters, init_soc): + def cost(self, model, parameters, init_soc, cost_cls): # Form dataset solution = self.get_data(model, init_soc) dataset = pybop.Dataset( @@ -64,27 +82,40 @@ def cost(self, model, parameters, init_soc): "Time [s]": solution["Time [s]"].data, "Current function [A]": solution["Current [A]"].data, "Voltage [V]": solution["Voltage [V]"].data - + self.noise(0.002, len(solution["Time [s]"].data)), + + self.noise(self.sigma0, len(solution["Time [s]"].data)), } ) - # Define the cost to optimise + # Construct problem problem = pybop.FittingProblem(model, parameters, dataset) - return pybop.RootMeanSquaredError(problem) + + # Construct the cost + if cost_cls is pybop.GaussianLogLikelihoodKnownSigma: + return cost_cls(problem, sigma0=self.sigma0) + elif cost_cls is pybop.GaussianLogLikelihood: + return cost_cls(problem) + elif cost_cls is pybop.LogPosterior: + return cost_cls( + pybop.GaussianLogLikelihoodKnownSigma(problem, sigma0=self.sigma0) + ) + else: + return cost_cls(problem) @pytest.mark.parametrize( "optimiser", [ - pybop.AdamW, - pybop.CMAES, + pybop.IRPropMin, + pybop.NelderMead, ], ) @pytest.mark.integration - def test_spm_transformation(self, optimiser, cost): + def test_thevenin_transformation(self, optimiser, cost): x0 = cost.parameters.initial_value() optim = optimiser( cost=cost, - sigma0=0.1, + sigma0=[0.03, 0.03, 1e-3] + if isinstance(cost, pybop.GaussianLogLikelihood) + else [0.03, 0.03], max_unchanged_iterations=35, absolute_tolerance=1e-6, max_iterations=250, @@ -93,20 +124,34 @@ def test_spm_transformation(self, optimiser, cost): initial_cost = optim.cost(x0) x, final_cost = optim.run() + # Add sigma0 to ground truth for GaussianLogLikelihood + if isinstance(optim.cost, pybop.GaussianLogLikelihood): + self.ground_truth = np.concatenate( + (self.ground_truth, np.asarray([self.sigma0])) + ) + # Assertions - if not np.allclose(x0, self.ground_truth, atol=1e-5): - assert initial_cost > final_cost + if np.allclose(x0, self.ground_truth, atol=1e-5): + raise AssertionError("Initial guess is too close to ground truth") + + if isinstance(cost, pybop.GaussianLogLikelihood): np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + np.testing.assert_allclose(x[-1], self.sigma0, atol=5e-4) else: - raise ValueError("Initial value is the same as the ground truth value.") + assert ( + (initial_cost > final_cost) + if optim.minimising + else (initial_cost < final_cost) + ) + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) def get_data(self, model, init_soc): initial_state = {"Initial SoC": init_soc} experiment = pybop.Experiment( [ ( - "Discharge at 1C for 3 minutes (5 second period)", - "Charge at 1C for 3 minutes (5 second period)", + "Discharge at 0.5C for 2 minutes (4 second period)", + "Rest for 1 minute (4 second period)", ), ] ) diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index a22c63e35..6e0f22561 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -148,6 +148,13 @@ def test_gaussian_log_likelihood(self, one_signal_problem): one_signal_problem, sigma0="Invalid string" ) + # Test transformation fail dimensions + cost_fail, grad_fail = likelihood( + [0.01, 0.01], calculate_grad=True, apply_transform=True + ) + assert not np.isfinite(cost_fail) + assert grad_fail.shape == (2,) + @pytest.mark.unit def test_gaussian_log_likelihood_dsigma_scale(self, one_signal_problem): likelihood = pybop.GaussianLogLikelihood(one_signal_problem, dsigma_scale=0.05) diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 775180f28..409c1079f 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -196,6 +196,23 @@ def test_parameters_naming(self, parameter): ): params["Positive electrode active material volume fraction"] + @pytest.mark.unit + def test_parameters_transformation(self): + # Test unbounded transformation causes ValueError due to NaN + params = pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.01, 0.2), + transformation=pybop.LogTransformation(), + ) + ) + + with pytest.raises( + ValueError, + match="Transformed bounds resulted in NaN values.\nIf you've not applied bounds", + ): + params.get_bounds(apply_transform=True) + @pytest.mark.unit def test_parameters_update(self, parameter): params = pybop.Parameters(parameter) From a3293edf343521802a0764b65f3907b1ec1065a0 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 12 Sep 2024 16:48:28 +0100 Subject: [PATCH 3/6] refactor: adds join_parameters() setter in BaseCost to replace direct call to parameters.join(). Updts. fail_gradient within setter. --- pybop/costs/_likelihoods.py | 3 +-- pybop/costs/_weighted_cost.py | 2 +- pybop/costs/base_cost.py | 9 +++++++++ pybop/optimisers/base_optimiser.py | 2 +- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index d83e7ab20..2e3df2edc 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -114,9 +114,8 @@ def __init__( # Add sigma parameter, join with self.parameters, reapply transformations self.sigma = Parameters() self._add_sigma_parameters(sigma0) - self.parameters.join(self.sigma) + self.join_parameters(self.sigma) self.transformation = self.parameters.construct_transformation() - self.set_fail_gradient() def _add_sigma_parameters(self, sigma0): sigma0 = [sigma0] if not isinstance(sigma0, list) else sigma0 diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index 10d7d5440..474907384 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -62,7 +62,7 @@ def __init__(self, *costs, weights: Optional[list[float]] = None): super().__init__() for cost in self.costs: - self.parameters.join(cost.parameters) + self.join_parameters(cost.parameters) # Weighted costs do not use this functionality self._has_separable_problem = False diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 71de3f35e..23d82b89d 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -183,3 +183,12 @@ def verify_args(self, dy: ndarray, calculate_grad: bool): raise ValueError( "Forward model sensitivities need to be provided alongside `calculate_grad=True` for `cost.compute`." ) + + def join_parameters(self, parameters): + """ + Setter for joining parameters. This method sets the fail gradient if the join adds parameters. + """ + original_n_params = self.n_parameters + self.parameters.join(parameters) + if original_n_params != self.n_parameters: + self.set_fail_gradient() diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 6963c9251..b8335cbb9 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -73,8 +73,8 @@ def __init__( if isinstance(cost, BaseCost): self.cost = cost + self.parameters = self.cost.parameters self._transformation = self.cost.transformation - self.parameters.join(cost.parameters) self.set_allow_infeasible_solutions() if isinstance(cost, WeightedCost): self.minimising = cost.minimising From 0ca7fa6a83406f4c7879b160f93c103e58d11c90 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 12 Sep 2024 17:12:42 +0100 Subject: [PATCH 4/6] removes redundant commented-out code. --- pybop/transformation/base_transformation.py | 32 --------------------- 1 file changed, 32 deletions(-) diff --git a/pybop/transformation/base_transformation.py b/pybop/transformation/base_transformation.py index 40488949e..a36ba8081 100644 --- a/pybop/transformation/base_transformation.py +++ b/pybop/transformation/base_transformation.py @@ -126,35 +126,3 @@ def verify_input( raise ValueError(f"Transform must have {self._n_parameters} elements") return input_array - - -# ---- To be implemented with Monte Carlo PR ------ # -# class TransformedLogPDF(BaseCost): -# """Transformed log-PDF class.""" -# def __init__(self, log_pdf, transformation): -# self._log_pdf = log_pdf -# self._transformation = transformation - -# def __call__(self, q): -# p = self._transformation.to_model(q) -# log_pdf = self._log_pdf(p) - -# # Calculate the PDF using change of variable -# # Wikipedia: https://w.wiki/UsJ -# log_jacobian_det = self._transformation.log_jacobian_det(q) -# return log_pdf + log_jacobian_det - -# def _evaluateS1(self, x): -# p = self._transformation.to_model(x) -# log_pdf, log_pdf_derivatives = self._log_pdf._evaluateS1(p) -# log_jacobian_det, log_jacobian_det_derivatives = self._transformation.log_jacobian_det_S1(x) -# return log_pdf + log_jacobian_det, log_pdf_derivatives + log_jacobian_det_derivatives - -# class TransformedLogPrior: -# """Transformed log-prior class.""" -# def __init__(self, log_prior, transformation): -# self._log_prior = log_prior -# self._transformation = transformation - -# def __call__(self, q): -# return self From e978cda785ae858ab0dde03154a70c359f434afd Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 13 Sep 2024 20:59:29 +0100 Subject: [PATCH 5/6] tests: adds test_cost_transformations.py unit tests, adds `BaseCost.build()` initial_state dict keys to docstring --- pybop/models/base_model.py | 1 + tests/unit/test_cost_transformations.py | 138 ++++++++++++++++++++++++ 2 files changed, 139 insertions(+) create mode 100644 tests/unit/test_cost_transformations.py diff --git a/pybop/models/base_model.py b/pybop/models/base_model.py index ad51010dd..bf6260d65 100644 --- a/pybop/models/base_model.py +++ b/pybop/models/base_model.py @@ -148,6 +148,7 @@ def build( A valid initial state, e.g. the initial state of charge or open-circuit voltage. Defaults to None, indicating that the existing initial state of charge (for an ECM) or initial concentrations (for an EChem model) will be used. + Accepted keys either `"Initial open-circuit voltage [V]"` or ``"Initial SoC"` dataset : pybop.Dataset or dict, optional The dataset to be used in the model construction. check_model : bool, optional diff --git a/tests/unit/test_cost_transformations.py b/tests/unit/test_cost_transformations.py new file mode 100644 index 000000000..3d069528c --- /dev/null +++ b/tests/unit/test_cost_transformations.py @@ -0,0 +1,138 @@ +import numpy as np +import pytest + +import pybop + + +class TestCostsTransformations: + """ + Class for unit testing cost functions with transformations applied. + """ + + @pytest.fixture(autouse=True) + def setup(self): + self.x1 = [0.5, 1] + self.x2 = [(self.x1[0] + 1) * -2.5, np.log(1)] + + @pytest.fixture + def model(self): + return pybop.lithium_ion.SPM() + + @pytest.fixture + def parameters(self): + parameters = pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.5, 0.01), + bounds=[0.375, 0.625], + transformation=pybop.ScaledTransformation( + coefficient=-2.5, intercept=1 + ), + ), + pybop.Parameter( + "Positive electrode Bruggeman coefficient (electrode)", + prior=pybop.Gaussian(1, 0.1), + transformation=pybop.LogTransformation(), + ), + ) + return parameters + + @pytest.fixture + def experiment(self): + return pybop.Experiment( + [ + ("Discharge at 1C for 1 minutes (6 second period)"), + ] + ) + + @pytest.fixture + def dataset(self, model, experiment): + solution = model.predict(experiment=experiment) + return pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Terminal voltage [V]"].data, + } + ) + + @pytest.fixture + def problem(self, model, parameters, dataset, request): + problem = pybop.FittingProblem(model, parameters, dataset) + return problem + + @pytest.fixture( + params=[ + pybop.RootMeanSquaredError, + pybop.SumSquaredError, + pybop.Minkowski, + pybop.SumofPower, + pybop.ObserverCost, + pybop.LogPosterior, + pybop.GaussianLogLikelihood, + pybop.GaussianLogLikelihoodKnownSigma, + ] + ) + def cost(self, problem, request): + cls = request.param + if cls in [pybop.SumSquaredError, pybop.RootMeanSquaredError]: + return cls(problem) + elif cls is pybop.LogPosterior: + return cls(pybop.GaussianLogLikelihoodKnownSigma(problem, sigma0=0.002)) + elif cls is pybop.GaussianLogLikelihoodKnownSigma: + return pybop.GaussianLogLikelihoodKnownSigma(problem, sigma0=0.002) + elif cls is pybop.ObserverCost: + inputs = problem.parameters.initial_value() + state = problem.model.reinit(inputs) + n = len(state) + sigma_diag = [0.0] * n + sigma_diag[0] = 1e-4 + sigma_diag[1] = 1e-4 + process_diag = [0.0] * n + process_diag[0] = 1e-4 + process_diag[1] = 1e-4 + sigma0 = np.diag(sigma_diag) + process = np.diag(process_diag) + dataset = pybop.Dataset(data_dictionary=problem.dataset) + return cls( + pybop.UnscentedKalmanFilterObserver( + problem.parameters, + problem.model, + sigma0=sigma0, + process=process, + measure=1e-4, + dataset=dataset, + signal=problem.signal, + ), + ) + else: + return cls(problem) + + @pytest.mark.unit + def test_cost_transformations(self, cost): + if isinstance(cost, pybop.GaussianLogLikelihood): + self.x1.append(0.002) + self.x2.append(0.002) + + # Asserts + non_transformed_cost = cost(self.x1) + transformed_cost = cost(self.x2, apply_transform=True) + np.testing.assert_allclose(non_transformed_cost, transformed_cost) + + @pytest.mark.unit + def test_cost_gradient_transformed(self, cost): + # Gradient transformations are not implmented on ObserverCost + if isinstance(cost, pybop.ObserverCost): + return + + if isinstance(cost, pybop.GaussianLogLikelihood): + self.x1.append(0.002) + self.x2.append(0.002) + # Asserts + non_transformed_cost, grad = cost(self.x1, calculate_grad=True) + transformed_cost, transformed_gradient = cost( + self.x2, calculate_grad=True, apply_transform=True + ) + np.testing.assert_allclose(transformed_cost, non_transformed_cost) + with pytest.raises(AssertionError): + np.testing.assert_allclose(transformed_gradient, grad) From 6569eaaecd9205aa49c84203f9a62f58413aea23 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Sat, 14 Sep 2024 13:42:25 +0100 Subject: [PATCH 6/6] refactor: cost.parameters as private attribute -> cost._parameters --- examples/standalone/cost.py | 6 +++--- pybop/costs/_likelihoods.py | 16 ++++++++-------- pybop/costs/base_cost.py | 18 +++++++++++------- pybop/costs/fitting_costs.py | 2 +- tests/unit/test_sampling.py | 4 ++-- 5 files changed, 25 insertions(+), 21 deletions(-) diff --git a/examples/standalone/cost.py b/examples/standalone/cost.py index 1d76c88f8..98f5d932b 100644 --- a/examples/standalone/cost.py +++ b/examples/standalone/cost.py @@ -36,14 +36,14 @@ def __init__(self, problem=None): """ super().__init__(problem) - self.parameters = pybop.Parameters( + self._parameters = pybop.Parameters( pybop.Parameter( "x", initial_value=4.2, bounds=[-1, 10], ), ) - self.x0 = self.parameters.initial_value() + self.x0 = self._parameters.initial_value() def compute( self, y: dict = None, dy: np.ndarray = None, calculate_grad: bool = False @@ -59,4 +59,4 @@ def compute( float The calculated cost value for the given parameter. """ - return self.parameters["x"].value ** 2 + 42 + return self._parameters["x"].value ** 2 + 42 diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 2e3df2edc..4a17a3e05 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -115,7 +115,7 @@ def __init__( self.sigma = Parameters() self._add_sigma_parameters(sigma0) self.join_parameters(self.sigma) - self.transformation = self.parameters.construct_transformation() + self.transformation = self._parameters.construct_transformation() def _add_sigma_parameters(self, sigma0): sigma0 = [sigma0] if not isinstance(sigma0, list) else sigma0 @@ -237,11 +237,11 @@ def __init__( # Store the likelihood and prior self._log_likelihood = log_likelihood - self.parameters = self._log_likelihood.parameters + self._parameters = self._log_likelihood.parameters self._has_separable_problem = self._log_likelihood.has_separable_problem if log_prior is None: - self._prior = JointLogPrior(*self.parameters.priors()) + self._prior = JointLogPrior(*self._parameters.priors()) else: self._prior = log_prior @@ -273,16 +273,16 @@ def compute( if calculate_grad: if isinstance(self._prior, BasePrior): - log_prior, dp = self._prior.logpdfS1(self.parameters.current_value()) + log_prior, dp = self._prior.logpdfS1(self._parameters.current_value()) else: # Compute log prior first - log_prior = self._prior.logpdf(self.parameters.current_value()) + log_prior = self._prior.logpdf(self._parameters.current_value()) # Compute a finite difference approximation of the gradient of the log prior - delta = self.parameters.initial_value() * self.gradient_step + delta = self._parameters.initial_value() * self.gradient_step dp = [] - for parameter, step_size in zip(self.parameters, delta): + for parameter, step_size in zip(self._parameters, delta): param_value = parameter.value upper_value = param_value * (1 + step_size) lower_value = param_value * (1 - step_size) @@ -295,7 +295,7 @@ def compute( ) dp.append(gradient) else: - log_prior = self._prior.logpdf(self.parameters.current_value()) + log_prior = self._prior.logpdf(self._parameters.current_value()) if not np.isfinite(log_prior).any(): return (-np.inf, -self.grad_fail) if calculate_grad else -np.inf diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 23d82b89d..fe68e5b17 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -34,7 +34,7 @@ class BaseCost: """ def __init__(self, problem: Optional[BaseProblem] = None): - self.parameters = Parameters() + self._parameters = Parameters() self.transformation = None self.problem = problem self.verbose = False @@ -44,17 +44,17 @@ def __init__(self, problem: Optional[BaseProblem] = None): self._de = 1.0 if isinstance(self.problem, BaseProblem): self._target = self.problem.target - self.parameters.join(self.problem.parameters) + self._parameters.join(self.problem.parameters) self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal - self.transformation = self.parameters.construct_transformation() + self.transformation = self._parameters.construct_transformation() self._has_separable_problem = True self.grad_fail = None self.set_fail_gradient() @property def n_parameters(self): - return len(self.parameters) + return len(self._parameters) @property def has_separable_problem(self): @@ -95,8 +95,8 @@ def __call__( self.has_transform = self.transformation is not None and apply_transform if self.has_transform: inputs = self.transformation.to_model(inputs) - inputs = self.parameters.verify(inputs) - self.parameters.update(values=list(inputs.values())) + inputs = self._parameters.verify(inputs) + self._parameters.update(values=list(inputs.values())) y, dy = None, None if self._has_separable_problem: @@ -189,6 +189,10 @@ def join_parameters(self, parameters): Setter for joining parameters. This method sets the fail gradient if the join adds parameters. """ original_n_params = self.n_parameters - self.parameters.join(parameters) + self._parameters.join(parameters) if original_n_params != self.n_parameters: self.set_fail_gradient() + + @property + def parameters(self): + return self._parameters diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 9e8c83d37..19b8f458a 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -329,7 +329,7 @@ def compute( float The observer cost (negative of the log likelihood). """ - inputs = self.parameters.as_dict() + inputs = self._parameters.as_dict() log_likelihood = self._observer.log_likelihood( self._target, self._observer.domain_data, inputs ) diff --git a/tests/unit/test_sampling.py b/tests/unit/test_sampling.py index e854d24ab..76f1a668f 100644 --- a/tests/unit/test_sampling.py +++ b/tests/unit/test_sampling.py @@ -215,8 +215,8 @@ def test_multi_log_pdf(self, log_posterior, x0, chains): # Test incorrect number of parameters new_multi_log_posterior = copy.copy(log_posterior) - new_multi_log_posterior.parameters = [ - new_multi_log_posterior.parameters[ + new_multi_log_posterior._parameters = [ + new_multi_log_posterior._parameters[ "Positive electrode active material volume fraction" ] ]