Skip to content

Commit

Permalink
Merge branch 'main' into hotfix/2.0.4
Browse files Browse the repository at this point in the history
  • Loading branch information
sblauth committed Jun 9, 2023
2 parents d011f74 + 6c4f3be commit 5e5fba4
Show file tree
Hide file tree
Showing 6 changed files with 64 additions and 17 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ of the maintenance releases, please take a look at
2.1.0 (in development)
----------------------

* Add the keyword arguments :py:`pre_callback` and :py:`post_callback` to define callbacks when an optimization problem is instanciated.

* New configuration file parameters:

* Section LineSearch
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

from __future__ import annotations

from typing import List, Optional, TYPE_CHECKING, Union
from typing import Callable, List, Optional, TYPE_CHECKING, Union

import fenics
import numpy as np
Expand Down Expand Up @@ -87,6 +87,8 @@ def __init__(
]
] = None,
preconditioner_forms: Optional[Union[List[ufl.Form], ufl.Form]] = None,
pre_callback: Optional[Callable] = None,
post_callback: Optional[Callable] = None,
) -> None:
r"""Initializes self.
Expand Down Expand Up @@ -142,6 +144,10 @@ def __init__(
preconditioner_forms: The list of forms for the preconditioner. The default
is `None`, so that the preconditioner matrix is the same as the system
matrix.
pre_callback: A function (without arguments) that will be called before each
solve of the state system
post_callback: A function (without arguments) that will be called after the
computation of the gradient.
Examples:
Examples how to use this class can be found in the :ref:`tutorial
Expand All @@ -161,6 +167,8 @@ def __init__(
gradient_ksp_options=gradient_ksp_options,
desired_weights=desired_weights,
preconditioner_forms=preconditioner_forms,
pre_callback=pre_callback,
post_callback=post_callback,
)

self.db.function_db.controls = _utils.enlist(controls)
Expand Down
10 changes: 10 additions & 0 deletions cashocs/_optimization/optimization_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@ def __init__(
temp_dict: Optional[Dict] = None,
initial_function_values: Optional[List[float]] = None,
preconditioner_forms: Optional[Union[List[ufl.Form], ufl.Form]] = None,
pre_callback: Optional[Callable] = None,
post_callback: Optional[Callable] = None,
) -> None:
r"""Initializes self.
Expand Down Expand Up @@ -154,6 +156,10 @@ def __init__(
preconditioner_forms: The list of forms for the preconditioner. The default
is `None`, so that the preconditioner matrix is the same as the system
matrix.
pre_callback: A function (without arguments) that will be called before each
solve of the state system
post_callback: A function (without arguments) that will be called after the
computation of the gradient.
Notes:
If one uses a single PDE constraint, the inputs can be the objects
Expand Down Expand Up @@ -220,6 +226,10 @@ def __init__(
self.bcs_list,
self.preconditioner_forms,
)

self.db.callback.pre_callback = pre_callback
self.db.callback.post_callback = post_callback

if temp_dict is not None:
self.db.parameter_db.temp_dict.update(temp_dict)
self.db.parameter_db.is_remeshed = True
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,8 @@ def __init__(
temp_dict: Optional[Dict] = None,
initial_function_values: Optional[List[float]] = None,
preconditioner_forms: Optional[Union[List[ufl.Form], ufl.Form]] = None,
pre_callback: Optional[Callable] = None,
post_callback: Optional[Callable] = None,
) -> None:
"""Initializes self.
Expand Down Expand Up @@ -152,6 +154,10 @@ def __init__(
preconditioner_forms: The list of forms for the preconditioner. The default
is `None`, so that the preconditioner matrix is the same as the system
matrix.
pre_callback: A function (without arguments) that will be called before each
solve of the state system
post_callback: A function (without arguments) that will be called after the
computation of the gradient.
"""
super().__init__(
Expand All @@ -169,6 +175,8 @@ def __init__(
temp_dict=temp_dict,
initial_function_values=initial_function_values,
preconditioner_forms=preconditioner_forms,
pre_callback=pre_callback,
post_callback=post_callback,
)

if shape_scalar_product is None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ def __init__( # pylint: disable=unused-argument
] = None,
desired_weights: list[float] | None = None,
preconditioner_forms: Optional[Union[List[ufl.Form], ufl.Form]] = None,
pre_callback: Optional[Callable] = None,
post_callback: Optional[Callable] = None,
) -> None:
r"""Initializes the topology optimization problem.
Expand Down Expand Up @@ -136,6 +138,10 @@ def __init__( # pylint: disable=unused-argument
preconditioner_forms: The list of forms for the preconditioner. The default
is `None`, so that the preconditioner matrix is the same as the system
matrix.
pre_callback: A function (without arguments) that will be called before each
solve of the state system
post_callback: A function (without arguments) that will be called after the
computation of the gradient.
"""
super().__init__(
Expand All @@ -151,6 +157,8 @@ def __init__( # pylint: disable=unused-argument
gradient_ksp_options=gradient_ksp_options,
desired_weights=desired_weights,
preconditioner_forms=preconditioner_forms,
pre_callback=pre_callback,
post_callback=post_callback,
)

self.db.parameter_db.problem_type = "topology"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,8 @@ def pre_callback():


# where we solve the Navier-Stokes equations with a lower Reynolds number of
# {python}`Re / 10.0`. Later on, we inject this {python}`pre_callback` into cashocs by
# using the
# {py:meth}`inject_pre_callback <cashocs.OptimalControlProblem.inject_pre_callback>`
# method.
# {python}`Re / 10.0`. Later on, we use this function as keyword argument for defining
# the optimization problem.
#
# Additionally, cashocs implements the functionality of also performing a pre-defined
# action after each gradient computation, given by a so-called {python}`post_callback`.
Expand All @@ -157,26 +155,39 @@ def post_callback():
print("Performing an action after computing the gradient.")


# Next, before we can inject these two callbacks, we first have to define the optimal
# control problem
# Next, we define the optimization and use the keyword arguments to define the callbacks
# via

ocp = cashocs.OptimalControlProblem(e, bcs, J, up, c, vq, config=config)

# Finally, we can inject both hooks via

ocp.inject_pre_callback(pre_callback)
ocp.inject_post_callback(post_callback)
ocp = cashocs.OptimalControlProblem(
e,
bcs,
J,
up,
c,
vq,
config=config,
pre_callback=pre_callback,
post_callback=post_callback,
)

# ::::{note}
# We can also save one line and use the code
# Alternatively, the pre- and post-callbacks can be injected to an already defined
# optimization problem with the code
#
# :::{code-block} python
# ocp.inject_pre_callback(pre_callback)
# ocp.inject_post_callback(post_callback)
# :::
#
# or, equivalently,
#
# :::{code-block} python
# ocp.inject_pre_post_hook(pre_hook, post_hook)
# :::
#
# which is equivalent to the above two lines.
# ::::
#
# And in the end, we solve the problem with

# In the end, we solve the problem with

ocp.solve()

Expand Down

0 comments on commit 5e5fba4

Please sign in to comment.