Skip to content

Commit

Permalink
Merge branch 'develop' into 418-parameter-info
Browse files Browse the repository at this point in the history
  • Loading branch information
NicolaCourtier authored Aug 1, 2024
2 parents 199ca3b + 5b163ae commit 6e34754
Show file tree
Hide file tree
Showing 29 changed files with 1,499 additions and 345 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
## Features

- [#418](https://github.com/pybop-team/PyBOP/issues/418) - Wraps the `get_parameter_info` method from PyBaMM to get a dictionary of parameter names and types.
- [#413](https://github.com/pybop-team/PyBOP/pull/413) - Adds `DesignCost` functionality to `WeightedCost` class with additional tests.
- [#357](https://github.com/pybop-team/PyBOP/pull/357) - Adds `Transformation()` class with `LogTransformation()`, `IdentityTransformation()`, and `ScaledTransformation()`, `ComposedTransformation()` implementations with corresponding examples and tests.
- [#427](https://github.com/pybop-team/PyBOP/issues/427) - Adds the nbstripout pre-commit hook to remove unnecessary metadata from notebooks.
- [#327](https://github.com/pybop-team/PyBOP/issues/327) - Adds the `WeightedCost` subclass, defines when to evaluate a problem and adds the `spm_weighted_cost` example script.
- [#393](https://github.com/pybop-team/PyBOP/pull/383) - Adds Minkowski and SumofPower cost classes, with an example and corresponding tests.
Expand Down
4 changes: 3 additions & 1 deletion examples/scripts/spm_CMAES.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,14 @@
prior=pybop.Gaussian(6e-06, 0.1e-6),
bounds=[1e-6, 9e-6],
true_value=parameter_set["Negative particle radius [m]"],
transformation=pybop.LogTransformation(),
),
pybop.Parameter(
"Positive particle radius [m]",
prior=pybop.Gaussian(4.5e-06, 0.1e-6),
bounds=[1e-6, 9e-6],
true_value=parameter_set["Positive particle radius [m]"],
transformation=pybop.LogTransformation(),
),
)

Expand All @@ -42,7 +44,7 @@
# Generate problem, cost function, and optimisation class
problem = pybop.FittingProblem(model, parameters, dataset, signal=signal)
cost = pybop.SumSquaredError(problem)
optim = pybop.CMAES(cost, max_iterations=100)
optim = pybop.CMAES(cost, sigma0=0.25, max_unchanged_iterations=20, max_iterations=50)

# Run the optimisation
x, final_cost = optim.run()
Expand Down
27 changes: 20 additions & 7 deletions examples/scripts/spm_weighted_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,24 +24,37 @@

# Generate data
sigma = 0.001
t_eval = np.arange(0, 900, 3)
values = model.predict(t_eval=t_eval)
corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval))
init_soc = 0.5
experiment = pybop.Experiment(
[
(
"Discharge at 0.5C for 3 minutes (3 second period)",
"Charge at 0.5C for 3 minutes (3 second period)",
),
]
* 2
)
values = model.predict(experiment=experiment, init_soc=init_soc)


def noise(sigma):
return np.random.normal(0, sigma, len(values["Voltage [V]"].data))


# Form dataset
dataset = pybop.Dataset(
{
"Time [s]": t_eval,
"Time [s]": values["Time [s]"].data,
"Current function [A]": values["Current [A]"].data,
"Voltage [V]": corrupt_values,
"Voltage [V]": values["Voltage [V]"].data + noise(sigma),
}
)

# Generate problem, cost function, and optimisation class
problem = pybop.FittingProblem(model, parameters, dataset)
problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc)
cost1 = pybop.SumSquaredError(problem)
cost2 = pybop.RootMeanSquaredError(problem)
weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 100])
weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[0.1, 1])

for cost in [weighted_cost, cost1, cost2]:
optim = pybop.IRPropMin(cost, max_iterations=60)
Expand Down
22 changes: 10 additions & 12 deletions examples/scripts/spme_max_energy.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,6 @@
# electrode widths, particle radii, volume fractions and
# separator width.

# NOTE: This script can be easily adjusted to consider the volumetric
# (instead of gravimetric) energy density by changing the line which
# defines the cost and changing the output to:
# print(f"Initial volumetric energy density: {cost(optim.x0):.2f} Wh.m-3")
# print(f"Optimised volumetric energy density: {final_cost:.2f} Wh.m-3")

# Define parameter set and model
parameter_set = pybop.ParameterSet.pybamm("Chen2020", formation_concentrations=True)
model = pybop.lithium_ion.SPMe(parameter_set=parameter_set)
Expand Down Expand Up @@ -45,17 +39,21 @@
model, parameters, experiment, signal=signal, init_soc=init_soc
)

# Generate cost function and optimisation class:
cost = pybop.GravimetricEnergyDensity(problem)
# Generate multiple cost functions and combine them.
cost1 = pybop.GravimetricEnergyDensity(problem, update_capacity=True)
cost2 = pybop.VolumetricEnergyDensity(problem, update_capacity=True)
cost = pybop.WeightedCost(cost1, cost2, weights=[1, 1])

# Run optimisation
optim = pybop.PSO(
cost, verbose=True, allow_infeasible_solutions=False, max_iterations=15
)

# Run optimisation
x, final_cost = optim.run()
print("Estimated parameters:", x)
print(f"Initial gravimetric energy density: {cost(optim.x0):.2f} Wh.kg-1")
print(f"Optimised gravimetric energy density: {final_cost:.2f} Wh.kg-1")
print(f"Initial gravimetric energy density: {cost1(optim.x0):.2f} Wh.kg-1")
print(f"Optimised gravimetric energy density: {cost1(x):.2f} Wh.kg-1")
print(f"Initial volumetric energy density: {cost2(optim.x0):.2f} Wh.m-3")
print(f"Optimised volumetric energy density: {cost2(x):.2f} Wh.m-3")

# Plot the timeseries output
if cost.update_capacity:
Expand Down
7 changes: 2 additions & 5 deletions examples/standalone/cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class StandaloneCost(pybop.BaseCost):
Methods
-------
__call__(x, grad=None)
__call__(x)
Calculate the cost for a given parameter value.
"""

Expand All @@ -43,7 +43,7 @@ def __init__(self, problem=None):
)
self.x0 = self.parameters.initial_value()

def _evaluate(self, inputs, grad=None):
def _evaluate(self, inputs):
"""
Calculate the cost for a given parameter value.
Expand All @@ -54,9 +54,6 @@ def _evaluate(self, inputs, grad=None):
----------
inputs : Dict
The parameters for which to evaluate the cost.
grad : array-like, optional
Unused parameter, present for compatibility with gradient-based
optimizers.
Returns
-------
Expand Down
2 changes: 1 addition & 1 deletion examples/standalone/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __init__(
)
self._target = {signal: self._dataset[signal] for signal in self.signal}

def evaluate(self, inputs):
def evaluate(self, inputs, **kwargs):
"""
Evaluate the model with the given parameters and return the signal.
Expand Down
11 changes: 11 additions & 0 deletions pybop/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,17 @@
#
from ._dataset import Dataset

#
# Transformation classes
#
from .transformation.base_transformation import Transformation
from .transformation.transformations import (
IdentityTransformation,
ScaledTransformation,
LogTransformation,
ComposedTransformation,
)

#
# Parameter classes
#
Expand Down
71 changes: 25 additions & 46 deletions pybop/costs/_likelihoods.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,21 +39,19 @@ def __init__(self, problem: BaseProblem, sigma0: Union[list[float], float]):
self._offset = -0.5 * self.n_time_data * np.log(2 * np.pi * self.sigma2)
self._multip = -1 / (2.0 * self.sigma2)

def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> float:
def _evaluate(self, inputs: Inputs) -> float:
"""
Evaluates the Gaussian log-likelihood for the given parameters with known sigma.
"""
if not self.verify_prediction(self._current_prediction):
if not self.verify_prediction(self.y):
return -np.inf

e = np.asarray(
[
np.sum(
self._offset
+ self._multip
* np.sum(
(self._target[signal] - self._current_prediction[signal]) ** 2.0
)
* np.sum((self._target[signal] - self.y[signal]) ** 2.0)
)
for signal in self.signal
]
Expand All @@ -65,20 +63,15 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]:
"""
Calculates the log-likelihood and gradient.
"""
if not self.verify_prediction(self._current_prediction):
if not self.verify_prediction(self.y):
return -np.inf, -self._de * np.ones(self.n_parameters)

likelihood = self._evaluate(inputs)

r = np.asarray(
[
self._target[signal] - self._current_prediction[signal]
for signal in self.signal
]
)
dl = np.sum(
(np.sum((r * self._current_sensitivities.T), axis=2) / self.sigma2), axis=1
[self._target[signal] - self.y[signal] for signal in self.signal]
)
dl = np.sum((np.sum((r * self.dy.T), axis=2) / self.sigma2), axis=1)

return likelihood, dl

Expand Down Expand Up @@ -123,7 +116,7 @@ def __init__(
super().__init__(problem)
self._dsigma_scale = dsigma_scale
self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi)
self._fixed_problem = False # keep problem evaluation within _evaluate
self._has_separable_problem = False

self.sigma = Parameters()
self._add_sigma_parameters(sigma0)
Expand Down Expand Up @@ -175,7 +168,7 @@ def dsigma_scale(self, new_value):
raise ValueError("dsigma_scale must be non-negative")
self._dsigma_scale = new_value

def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> float:
def _evaluate(self, inputs: Inputs) -> float:
"""
Evaluates the Gaussian log-likelihood for the given parameters.
Expand All @@ -196,20 +189,16 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo
if np.any(sigma <= 0):
return -np.inf

self._current_prediction = self.problem.evaluate(
self.problem.parameters.as_dict()
)
if not self.verify_prediction(self._current_prediction):
self.y = self.problem.evaluate(self.problem.parameters.as_dict())
if not self.verify_prediction(self.y):
return -np.inf

e = np.asarray(
[
np.sum(
self._logpi
- self.n_time_data * np.log(sigma)
- np.sum(
(self._target[signal] - self._current_prediction[signal]) ** 2.0
)
- np.sum((self._target[signal] - self.y[signal]) ** 2.0)
/ (2.0 * sigma**2.0)
)
for signal in self.signal
Expand Down Expand Up @@ -238,23 +227,16 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]:
if np.any(sigma <= 0):
return -np.inf, -self._de * np.ones(self.n_parameters)

self._current_prediction, self._current_sensitivities = self.problem.evaluateS1(
self.problem.parameters.as_dict()
)
if not self.verify_prediction(self._current_prediction):
self.y, self.dy = self.problem.evaluateS1(self.problem.parameters.as_dict())
if not self.verify_prediction(self.y):
return -np.inf, -self._de * np.ones(self.n_parameters)

likelihood = self._evaluate(inputs)

r = np.asarray(
[
self._target[signal] - self._current_prediction[signal]
for signal in self.signal
]
)
dl = np.sum(
(np.sum((r * self._current_sensitivities.T), axis=2) / (sigma**2.0)), axis=1
[self._target[signal] - self.y[signal] for signal in self.signal]
)
dl = np.sum((np.sum((r * self.dy.T), axis=2) / (sigma**2.0)), axis=1)
dsigma = (
-self.n_time_data / sigma + np.sum(r**2.0, axis=1) / (sigma**3.0)
) / self._dsigma_scale
Expand Down Expand Up @@ -296,17 +278,17 @@ def __init__(self, problem, likelihood, sigma0=None, gradient_step=1e-3):
):
raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood")

def _evaluate(self, inputs: Inputs, grad=None) -> float:
self.parameters = self.likelihood.parameters
self._has_separable_problem = self.likelihood._has_separable_problem

def _evaluate(self, inputs: Inputs) -> float:
"""
Calculate the maximum a posteriori cost for a given set of parameters.
Parameters
----------
inputs : Inputs
The parameters for which to evaluate the cost.
grad : array-like, optional
An array to store the gradient of the cost function with respect
to the parameters.
Returns
-------
Expand All @@ -320,9 +302,9 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float:
if not np.isfinite(log_prior).any():
return -np.inf

if self._fixed_problem:
self.likelihood._current_prediction = self._current_prediction
log_likelihood = self.likelihood._evaluate(inputs)
if self._has_separable_problem:
self.likelihood.y = self.y
log_likelihood = self.likelihood.evaluate(inputs)

posterior = log_likelihood + log_prior
return posterior
Expand Down Expand Up @@ -354,12 +336,9 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]:
if not np.isfinite(log_prior).any():
return -np.inf, -self._de * np.ones(self.n_parameters)

if self._fixed_problem:
(
self.likelihood._current_prediction,
self.likelihood._current_sensitivities,
) = self._current_prediction, self._current_sensitivities
log_likelihood, dl = self.likelihood._evaluateS1(inputs)
if self._has_separable_problem:
self.likelihood.y, self.likelihood.dy = (self.y, self.dy)
log_likelihood, dl = self.likelihood.evaluateS1(inputs)

# Compute a finite difference approximation of the gradient of the log prior
delta = self.parameters.initial_value() * self.gradient_step
Expand Down
Loading

0 comments on commit 6e34754

Please sign in to comment.