Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Recommend a candidate when possible #668

Merged
merged 5 commits into from
May 11, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@

## master

- `recommend` now provides an evaluated candidate when possible. For non-deterministic parametrization like `Choice`, this means we won't
resample, and we will actually recommend the best past evaluated candidate [#668](https://github.com/facebookresearch/nevergrad/pull/668).
Still, some optimizers (like `TBPSA`) may recommend a non-evaluated point.


## 0.4.1 (2019-05-07)

Expand Down
24 changes: 14 additions & 10 deletions nevergrad/optimization/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,12 +326,11 @@ def ask(self) -> p.Parameter:
# only register actual asked points
if candidate.satisfies_constraints():
break # good to go!
else:
if self._penalize_cheap_violations or k == MAX_TENTATIVES - 2: # a tell may help before last tentative
self._internal_tell_candidate(candidate, float("Inf"))
self._num_ask += 1 # this is necessary for some algorithms which need new num to ask another point
if k == MAX_TENTATIVES - 1:
warnings.warn(f"Could not bypass the constraint after {MAX_TENTATIVES} tentatives, sending candidate anyway.")
if self._penalize_cheap_violations or k == MAX_TENTATIVES - 2: # a tell may help before last tentative
self._internal_tell_candidate(candidate, float("Inf"))
self._num_ask += 1 # this is necessary for some algorithms which need new num to ask another point
if k == MAX_TENTATIVES - 1:
warnings.warn(f"Could not bypass the constraint after {MAX_TENTATIVES} tentatives, sending candidate anyway.")
if not is_suggestion:
if candidate.uid in self._asked:
raise RuntimeError(
Expand Down Expand Up @@ -364,7 +363,10 @@ def recommend(self) -> p.Parameter:
The candidate with minimal value. :code:`p.Parameters` have field :code:`args` and :code:`kwargs` which can be directly used
on the function (:code:`objective_function(*candidate.args, **candidate.kwargs)`).
"""
return self.parametrization.spawn_child().set_standardized_data(self._internal_provide_recommendation(), deterministic=True)
recom_data = self._internal_provide_recommendation() # pylint: disable=assignment-from-none
if recom_data is None:
return self.current_bests["pessimistic"].parameter
return self.parametrization.spawn_child().set_standardized_data(recom_data, deterministic=True)

def _internal_tell_not_asked(self, candidate: p.Parameter, value: float) -> None:
"""Called whenever calling :code:`tell` on a candidate that was not "asked".
Expand All @@ -388,8 +390,10 @@ def _internal_tell(self, x: ArrayLike, value: float) -> None:
def _internal_ask(self) -> ArrayLike:
raise RuntimeError("Not implemented, should not be called.")

def _internal_provide_recommendation(self) -> ArrayLike:
return self.current_bests["pessimistic"].x
def _internal_provide_recommendation(self) -> tp.Optional[ArrayLike]:
"""Override to provide a recommendation in standardized space
"""
return None

def minimize(
self,
Expand Down Expand Up @@ -447,7 +451,7 @@ def minimize(
func = func.multiobjective_function # type: ignore
#
while remaining_budget or self._running_jobs or self._finished_jobs:
# # # # # Update optimizer with finished jobs # # # # #
# # # # # Update optimizer with finished jobs # # # # #
# this is the first thing to do when resuming an existing optimization run
# process finished
if self._finished_jobs:
Expand Down
9 changes: 5 additions & 4 deletions nevergrad/optimization/differentialevolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,14 +94,15 @@ def __init__(
self.population: tp.Dict[str, p.Parameter] = {}
self.sampler: tp.Optional[sequences.Sampler] = None

def _internal_provide_recommendation(self) -> np.ndarray: # This is NOT the naive version. We deal with noise.
def recommend(self) -> p.Parameter: # This is NOT the naive version. We deal with noise.
if self._config.recommendation != "noisy":
return self.current_bests[self._config.recommendation].x
return self.current_bests[self._config.recommendation].parameter
med_fitness = np.median([p._meta["value"] for p in self.population.values() if "value" in p._meta])
good_guys = [p for p in self.population.values() if p._meta.get("value", med_fitness + 1) < med_fitness]
if not good_guys:
return self.current_bests["pessimistic"].x
return sum([g.get_standardized_data(reference=self.parametrization) for g in good_guys]) / len(good_guys) # type: ignore
return self.current_bests["pessimistic"].parameter
data: tp.Any = sum([g.get_standardized_data(reference=self.parametrization) for g in good_guys]) / len(good_guys)
return self.parametrization.spawn_child().set_standardized_data(data, deterministic=True)

def _internal_ask_candidate(self) -> p.Parameter:
if len(self.population) < self.llambda: # initialization phase
Expand Down
20 changes: 10 additions & 10 deletions nevergrad/optimization/oneshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,14 @@ def convex_limit(struct_points: np.ndarray) -> int:
Returns the length of the maximum initial segment of points such that quasiconvexity is verified."""
points = []
d = len(struct_points[0])
if len(struct_points) < 2*d + 2:
if len(struct_points) < 2 * d + 2:
return len(struct_points) // 2
for i in range(0, min(2*d + 2, len(struct_points)), 2):
for i in range(0, min(2 * d + 2, len(struct_points)), 2):
points += [struct_points[i]]
hull = ConvexHull(points, incremental=True)
k = len(points)
for i in range(d+1, len(points)):
hull.add_points(points[i:(i+1)])
for i in range(d + 1, len(points)):
hull.add_points(points[i:(i + 1)])
if i not in hull.vertices:
k = i - 1
break
Expand Down Expand Up @@ -66,7 +66,7 @@ def avg_of_k_best(archive: utils.Archive[utils.MultiValue], method: str = "dimfo
k = max(1, int(len(archive) // (1.1 ** dimension)))
elif method == "hull":
k = convex_limit(np.concatenate(sorted(items, key=lambda indiv: archive[indiv[0]].get_estimation("pessimistic")), axis=0))
k = min(len(archive)// 4, min(k, int(len(archive) / (1.1 ** dimension))))
k = min(len(archive) // 4, min(k, int(len(archive) / (1.1 ** dimension))))
# We might investigate the possibility to return the middle of the convex hull instead of averaging:
# return hull_center(np.concatenate(sorted(items, key=lambda indiv: archive[indiv[0]].get_estimation("pessimistic")), axis=0), k)
else:
Expand Down Expand Up @@ -146,7 +146,7 @@ def _internal_ask(self) -> ArrayLike:
self._opposable_data = scale * point
return self._opposable_data # type: ignore

def _internal_provide_recommendation(self) -> ArrayLike:
def _internal_provide_recommendation(self) -> tp.Optional[ArrayLike]:
if self.stupid:
return self._internal_ask()
elif self.archive:
Expand All @@ -156,7 +156,7 @@ def _internal_provide_recommendation(self) -> ArrayLike:
return avg_of_k_best(self.archive, "exp")
if self.recommendation_rule == "average_of_hull_best":
return avg_of_k_best(self.archive, "hull")
return super()._internal_provide_recommendation()
return None # back to default


class RandomSearchMaker(base.ConfiguredOptimizer):
Expand Down Expand Up @@ -246,7 +246,7 @@ def sampler(self) -> sequences.Sampler:
samplers = {"Halton": sequences.HaltonSampler,
"Hammersley": sequences.HammersleySampler,
"LHS": sequences.LHSSampler,
"Random":sequences.RandomSampler,
"Random": sequences.RandomSampler,
}
internal_budget = (budget + 1) // 2 if budget and (self.opposition_mode in ["quasi", "opposite"]) else budget
self._sampler_instance = samplers[self._sampler](
Expand Down Expand Up @@ -283,10 +283,10 @@ def transf(x: np.ndarray) -> np.ndarray:
assert self._opposable_data is not None
return self._opposable_data

def _internal_provide_recommendation(self) -> ArrayLike:
def _internal_provide_recommendation(self) -> tp.Optional[ArrayLike]:
if self.archive and self.recommendation_rule == "average_of_best":
return avg_of_k_best(self.archive)
return super()._internal_provide_recommendation()
return None


# pylint: disable=too-many-instance-attributes
Expand Down
36 changes: 14 additions & 22 deletions nevergrad/optimization/optimizerlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
from .recastlib import * # noqa: F403


# # # # # optimizers # # # # #
# # # # # optimizers # # # # #


class _OnePlusOne(base.Optimizer):
Expand Down Expand Up @@ -189,7 +189,7 @@ def __init__(
).set_name("RecombiningPortfolioOptimisticNoisyDiscreteOnePlusOne", register=True)


# pylint: too-many-arguments, too-many-instance-attributes
# pylint: too-many-arguments,too-many-instance-attributes
class _CMA(base.Optimizer):

def __init__(
Expand Down Expand Up @@ -452,11 +452,12 @@ def __init__(self,
self.parents: List[p.Parameter] = [self.parametrization] # for transfering heritage (checkpoints in PBT)
self.children: List[p.Parameter] = []

def _internal_provide_recommendation(self) -> ArrayLike: # This is NOT the naive version. We deal with noise.
def recommend(self) -> p.Parameter:
if self.naive:
return self.current_bests["optimistic"].x
return self.current_bests["optimistic"].parameter
else:
return self.current_center
# This is NOT the naive version. We deal with noise.
return self.parametrization.spawn_child().set_standardized_data(self.current_center, deterministic=True)

def _internal_ask_candidate(self) -> p.Parameter:
mutated_sigma = self.sigma * np.exp(self._rng.normal(0, 1) / np.sqrt(self.dimension))
Expand Down Expand Up @@ -620,9 +621,6 @@ def _spawn_mutated_particle(self, particle: p.Parameter) -> p.Parameter:
new_part.heritage["speed"] = speed
return new_part

def _internal_provide_recommendation(self) -> ArrayLike:
return self._best.get_standardized_data(reference=self.parametrization)

def _internal_tell_candidate(self, candidate: p.Parameter, value: float) -> None:
uid = candidate.heritage["lineage"]
if uid not in self.population:
Expand Down Expand Up @@ -852,9 +850,6 @@ def _internal_tell_candidate(self, candidate: p.Parameter, value: float) -> None
local_candidate = opt.parametrization.spawn_child().set_standardized_data(local_data)
opt.tell(local_candidate, value)

def _internal_provide_recommendation(self) -> ArrayLike:
return self.current_bests["pessimistic"].x

def _internal_tell_not_asked(self, candidate: p.Parameter, value: float) -> None:
raise base.TellNotAskedNotSupportedError

Expand Down Expand Up @@ -910,9 +905,6 @@ def _internal_tell_candidate(self, candidate: p.Parameter, value: float) -> None
optim_index: int = candidate._meta["optim_index"]
self.optims[optim_index].tell(candidate, value)

def _internal_provide_recommendation(self) -> ArrayLike:
return self.current_bests["pessimistic"].x

def _internal_tell_not_asked(self, candidate: p.Parameter, value: float) -> None:
raise base.TellNotAskedNotSupportedError

Expand Down Expand Up @@ -1274,11 +1266,11 @@ def _internal_tell_candidate(self, candidate: p.Parameter, value: float) -> None
# so we should clean the "fake" function
self._fake_function._registered.clear()

def _internal_provide_recommendation(self) -> ArrayLike:
def _internal_provide_recommendation(self) -> tp.Optional[ArrayLike]:
if self.archive:
return self._transform.backward(np.array([self.bo.max["params"][f"x{i}"] for i in range(self.dimension)]))
else:
return super()._internal_provide_recommendation()
return None


class ParametrizedBO(base.ConfiguredOptimizer):
Expand Down Expand Up @@ -1513,9 +1505,8 @@ def _internal_tell_candidate(self, candidate: p.Parameter, value: float) -> None
optim_index = candidate._meta["optim_index"]
self.optims[optim_index].tell(candidate, value)

def _internal_provide_recommendation(self) -> ArrayLike:
params = self.optims[0].provide_recommendation()
return params.get_standardized_data(reference=self.parametrization)
def recommend(self) -> p.Parameter:
return self.optims[0].recommend()

def _internal_tell_not_asked(self, candidate: p.Parameter, value: float) -> None:
raise base.TellNotAskedNotSupportedError
Expand Down Expand Up @@ -1570,11 +1561,12 @@ def __init__(
self.parents: List[p.Parameter] = [self.parametrization]
self.children: List[p.Parameter] = []

def _internal_provide_recommendation(self) -> ArrayLike:
def recommend(self) -> p.Parameter:
if self.naive:
return self.current_bests["optimistic"].x
return self.current_bests["optimistic"].parameter
else:
return self.current_center
# This is NOT the naive version. We deal with noise.
return self.parametrization.spawn_child().set_standardized_data(self.current_center, deterministic=True)

def _internal_ask_candidate(self) -> p.Parameter:
sigma_tmp = self.sigma
Expand Down
42 changes: 21 additions & 21 deletions nevergrad/optimization/recaster.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import time
import warnings
import threading
from typing import Any, Callable, Dict, Optional, List
import typing as tp
import numpy as np
from nevergrad.parametrization import parameter as p
from nevergrad.common.typetools import ArrayLike
Expand All @@ -30,21 +30,21 @@ class Message:
- "meta" attribute is only there for more implementation specific usages.
"""

def __init__(self, *args: Any, **kwargs: Any) -> None:
def __init__(self, *args: tp.Any, **kwargs: tp.Any) -> None:
self.args = args
self.kwargs = kwargs
self.meta: Dict[str, Any] = {} # for none Thread caller purposes
self._result: Optional[Any] = None
self.meta: tp.Dict[str, tp.Any] = {} # for none Thread caller purposes
self._result: tp.Optional[tp.Any] = None
self.done = False

@property
def result(self) -> Any:
def result(self) -> tp.Any:
if not self.done:
raise RuntimeError("Result was not provided (not done)")
return self._result

@result.setter
def result(self, value: Any) -> None:
def result(self, value: tp.Any) -> None:
self.done = True
self._result = value

Expand All @@ -70,16 +70,16 @@ class _MessagingThread(threading.Thread):
"""

# pylint: disable=too-many-instance-attributes
def __init__(self, caller: Callable[..., Any], *args: Any, **kwargs: Any) -> None:
def __init__(self, caller: tp.Callable[..., tp.Any], *args: tp.Any, **kwargs: tp.Any) -> None:
super().__init__()
self.messages: List[Message] = []
self.messages: tp.List[Message] = []
self.call_count = 0
self.error: Optional[Exception] = None
self.error: tp.Optional[Exception] = None
self._kill_order = False
self._caller = caller
self._args = args
self._kwargs = kwargs
self.output: Optional[Any] = None # TODO add a "done" attribute ?
self.output: tp.Optional[tp.Any] = None # TODO add a "done" attribute ?
self._last_evaluation_duration = 0.0001

def run(self) -> None:
Expand All @@ -93,7 +93,7 @@ def run(self) -> None:
except Exception as e: # pylint: disable=broad-except
self.error = e

def _fake_callable(self, *args: Any, **kwargs: Any) -> Any:
def _fake_callable(self, *args: tp.Any, **kwargs: tp.Any) -> tp.Any:
"""Appends a message in the messages attribute of the thread when
the caller needs an evaluation, and wait for it to be provided
to return it to the caller
Expand Down Expand Up @@ -122,23 +122,23 @@ class MessagingThread:
"""Encapsulate the inner thread, so that kill order is automatically called at deletion.
"""

def __init__(self, caller: Callable[..., Any], *args: Any, **kwargs: Any) -> None:
def __init__(self, caller: tp.Callable[..., tp.Any], *args: tp.Any, **kwargs: tp.Any) -> None:
self._thread = _MessagingThread(caller, *args, **kwargs)
self._thread.start()

def is_alive(self) -> bool:
return self._thread.is_alive()

@property
def output(self) -> Any:
def output(self) -> tp.Any:
return self._thread.output

@property
def error(self) -> Optional[Exception]:
def error(self) -> tp.Optional[Exception]:
return self._thread.error

@property
def messages(self) -> List[Message]:
def messages(self) -> tp.List[Message]:
return self._thread.messages

def stop(self) -> None:
Expand Down Expand Up @@ -167,12 +167,12 @@ class RecastOptimizer(base.Optimizer):

recast = True

def __init__(self, parametrization: IntOrParameter, budget: Optional[int] = None, num_workers: int = 1) -> None:
def __init__(self, parametrization: IntOrParameter, budget: tp.Optional[int] = None, num_workers: int = 1) -> None:
super().__init__(parametrization, budget, num_workers=num_workers)
self._messaging_thread: Optional[MessagingThread] = None # instantiate at runtime
self._messaging_thread: tp.Optional[MessagingThread] = None # instantiate at runtime
self._last_optimizer_duration = 0.0001

def get_optimization_function(self) -> Callable[[Callable[..., Any]], ArrayLike]:
def get_optimization_function(self) -> tp.Callable[[tp.Callable[..., tp.Any]], ArrayLike]:
"""Return an optimization procedure function (taking a function to optimize as input)

Note
Expand All @@ -190,7 +190,7 @@ def _internal_ask_candidate(self) -> p.Parameter:
if self._messaging_thread is None:
self._messaging_thread = MessagingThread(self.get_optimization_function())
# wait for a message
messages: List[Message] = []
messages: tp.List[Message] = []
t0 = time.time()
while not messages and self._messaging_thread.is_alive():
messages = [m for m in self._messaging_thread.messages if not m.meta.get("asked", False)]
Expand Down Expand Up @@ -233,15 +233,15 @@ def _internal_tell_candidate(self, candidate: p.Parameter, value: float) -> None
def _internal_tell_not_asked(self, candidate: p.Parameter, value: float) -> None:
raise base.TellNotAskedNotSupportedError

def _internal_provide_recommendation(self) -> base.ArrayLike:
def _internal_provide_recommendation(self) -> tp.Optional[base.ArrayLike]:
"""Returns the underlying optimizer output if provided (ie if the optimizer did finish)
else the best pessimistic point.
"""
if (self._messaging_thread is not None and
self._messaging_thread.output is not None):
return self._messaging_thread.output # type: ignore
else:
return self.current_bests["pessimistic"].x
return None # use default

def __del__(self) -> None:
# explicitly ask the thread to stop (better be safe :))
Expand Down
Loading