Skip to content

Commit

Permalink
Add a multiobjective adaptation for DE (#789)
Browse files Browse the repository at this point in the history
  • Loading branch information
jrapin authored Aug 11, 2020
1 parent 4f81eb8 commit e22212d
Show file tree
Hide file tree
Showing 16 changed files with 188 additions and 71 deletions.
14 changes: 7 additions & 7 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ commands:
- restore_cache:
name: "[all] Restore cache"
keys:
- v4-dependencies-{{ checksum "requirements/dev.txt" }}-{{ checksum "requirements/main.txt"}}-{{ checksum "requirements/bench.txt"}}
- v5-dependencies-{{ checksum "requirements/dev.txt" }}-{{ checksum "requirements/main.txt"}}-{{ checksum "requirements/bench.txt"}}
# fallback to installing main requirements
- v4-dependencies-main-{{ checksum "requirements/main.txt"}}
- v5-dependencies-main-{{ checksum "requirements/main.txt"}}

- run:
name: "[all] Install dependencies"
Expand All @@ -53,7 +53,7 @@ jobs:
- restore_cache:
name: "[no-extra] Restore cache"
keys:
- v4-dependencies-main-{{ checksum "requirements/main.txt"}}
- v5-dependencies-main-{{ checksum "requirements/main.txt"}}
# fallback to using the latest cache if no exact match is found

- run:
Expand All @@ -75,7 +75,7 @@ jobs:
name: "[no-extra] Save cache"
paths:
- ./venv
key: v4-dependencies-main-{{ checksum "requirements/main.txt"}}
key: v5-dependencies-main-{{ checksum "requirements/main.txt"}}

- run:
name: "[no-extra] Run basic tests (checking dependencies)"
Expand All @@ -90,7 +90,7 @@ jobs:
name: "[all] Save cache"
paths:
- ./venv
key: v4-dependencies-{{ checksum "requirements/dev.txt" }}-{{ checksum "requirements/main.txt"}}-{{ checksum "requirements/bench.txt"}}
key: v5-dependencies-{{ checksum "requirements/dev.txt" }}-{{ checksum "requirements/main.txt"}}-{{ checksum "requirements/bench.txt"}}

- run:
name: "[all] Print installation"
Expand Down Expand Up @@ -206,7 +206,7 @@ jobs:
- restore_cache:
name: "[all] Restore cache"
keys:
- v4-win-dependencies-{{ checksum "requirements/dev.txt" }}-{{ checksum "requirements/main.txt"}}-{{ checksum "requirements/bench.txt"}}
- v5-win-dependencies-{{ checksum "requirements/dev.txt" }}-{{ checksum "requirements/main.txt"}}-{{ checksum "requirements/bench.txt"}}

- run:
name: Setup nevergrad in virtualenv
Expand All @@ -221,7 +221,7 @@ jobs:
name: "[all] Save cache"
paths:
- ./venv
key: v4-win-dependencies-{{ checksum "requirements/dev.txt" }}-{{ checksum "requirements/main.txt"}}-{{ checksum "requirements/bench.txt"}}
key: v5-win-dependencies-{{ checksum "requirements/dev.txt" }}-{{ checksum "requirements/main.txt"}}-{{ checksum "requirements/bench.txt"}}

- run:
name: pytest
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
**Cautious:** current `master` branch and `0.4.2.postX` version introduce tentative APIs which may be removed in the near future. Use version `0.4.2` for a more stable version.

- as an **experimental** feature, `tell` method can now receive a list/array of losses for multi-objective optimization [#775](https://github.com/facebookresearch/nevergrad/pull/775). For now it is neither robust, nor scalable, nor stable, nor optimal so be careful when using it. More information in the [documentation](https://facebookresearch.github.io/nevergrad/optimization.html#multiobjective-minimization-with-nevergrad).
- `DE` and its variants have been updated to make use of the multi-objective losses [#789](https://github.com/facebookresearch/nevergrad/pull/789). This is a **preliminary** fix since the initial `DE` implementaton was ill-suited for this use case.
- `tell` argument `value` is renamed to `loss` for clarification [#774](https://github.com/facebookresearch/nevergrad/pull/774). This can be breaking when using named arguments!

## 0.4.2 (2020-08-04)
Expand Down
2 changes: 2 additions & 0 deletions docs/optimization.rst
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,8 @@ We are currently working on an **new experimental API** allowing users to direct
:start-after: DOC_MULTIOBJ_OPT_0
:end-before: DOC_MULTIOBJ_OPT_1

Note that `DE` and its variants have been updated to make use of the multi-objective losses [#789](https://github.com/facebookresearch/nevergrad/pull/789). This is a **preliminary** fix since the initial `DE` implementaton was ill-suited for this use case.


Reproducibility
---------------
Expand Down
28 changes: 14 additions & 14 deletions nevergrad/benchmark/experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -1131,25 +1131,25 @@ def multiobjective_example(seed: tp.Optional[int] = None) -> tp.Iterator[Experim
def new_multiobjective_example(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:
"""Optimization of 2 and 3 objective functions in Sphere, Ellipsoid, Cigar, Hm.
Dimension 6 and 7.
Budget 2000, 2400, 2800, 3200, 3600, 4000.
Budget 100 to 3200
"""
seedg = create_seed_generator(seed)
optims = ["NaiveTBPSA", "PSO", "DE", "LhsDE", "RandomSearch", "NGO", "Shiwa", "DiagonalCMA",
"CMA", "OnePlusOne", "TwoPointsDE"]
optims: tp.List[tp.Any] = ["NaiveTBPSA", "PSO", "DE", "LhsDE", "RandomSearch", "NGO", "Shiwa", "DiagonalCMA",
"CMA", "OnePlusOne", "TwoPointsDE"]
optims += [ng.families.DifferentialEvolution(multiobjective_adaptation=False).set_name("DE-noadapt"),
ng.families.DifferentialEvolution(crossover="twopoints", multiobjective_adaptation=False).set_name("TwoPointsDE-noadapt")]
mofuncs: tp.List[MultiExperiment] = []
for name1 in ["sphere", "cigar"]:
for name2 in ["sphere", "cigar", "hm"]:
mofuncs.append(MultiExperiment([ArtificialFunction(name1, block_dimension=7),
ArtificialFunction(name2, block_dimension=7)],
upper_bounds=np.array((50., 50.))))
for name3 in ["sphere", "ellipsoid"]:
mofuncs.append(MultiExperiment([ArtificialFunction(name1, block_dimension=6),
ArtificialFunction(name3, block_dimension=6),
ArtificialFunction(name2, block_dimension=6)],
upper_bounds=np.array((100, 100, 1000.))))
for name1, name2 in itertools.product(["sphere"], ["sphere", "hm"]):
mofuncs.append(MultiExperiment([ArtificialFunction(name1, block_dimension=7),
ArtificialFunction(name2, block_dimension=7)],
upper_bounds=[100, 100]))
mofuncs.append(MultiExperiment([ArtificialFunction(name1, block_dimension=6),
ArtificialFunction("sphere", block_dimension=6),
ArtificialFunction(name2, block_dimension=6)],
upper_bounds=[100, 100, 100.]))
for mofunc in mofuncs:
for optim in optims:
for budget in list(range(2000, 4001, 400)):
for budget in [100, 200, 400, 800, 1600, 3200]:
for nw in [1, 100]:
yield Experiment(mofunc, optim, budget=budget, num_workers=nw, seed=next(seedg))

Expand Down
8 changes: 6 additions & 2 deletions nevergrad/benchmark/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,8 +313,12 @@ def create_plots(
description = description[:140] + hash_ + description[-140:]
out_filepath = output_folder / "xpresults{}{}.png".format("_" if description else "", description.replace(":", ""))
data = XpPlotter.make_data(subdf)
xpplotter = XpPlotter(data, title=description, name_style=name_style, xaxis=xpaxis)
xpplotter.save(out_filepath)
try:
xpplotter = XpPlotter(data, title=description, name_style=name_style, xaxis=xpaxis)
except Exception as e:
warnings.warn(f"Bypassing error in xpplotter:\n{e}", RuntimeWarning)
else:
xpplotter.save(out_filepath)
plt.close("all")


Expand Down
10 changes: 10 additions & 0 deletions nevergrad/benchmark/test_xpbase.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,3 +130,13 @@ def test_equality() -> None:
xp1 = xpbase.Experiment(func, optimizer="OnePlusOne", budget=300, num_workers=2)
xp2 = xpbase.Experiment(func, optimizer="RandomSearch", budget=300, num_workers=2)
assert xp1 != xp2


def test_multiobjective_experiment() -> None:
mofunc = MultiExperiment([ArtificialFunction("sphere", block_dimension=7),
ArtificialFunction("cigar", block_dimension=7)],
upper_bounds=np.array((50., 50.)))
xp = xpbase.Experiment(mofunc, optimizer="TwoPointsDE", budget=100, num_workers=1)
summary = xp.run()
loss: float = summary["loss"]
assert loss < 1e9
2 changes: 1 addition & 1 deletion nevergrad/benchmark/xpbase.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ def _run_with_error(self, callbacks: tp.Optional[tp.Dict[str, obase._OptimCallBa
self._optimizer,
pfunc,
batch_mode=executor.batch_mode,
executor=executor
executor=executor,
)
except Exception as e: # pylint: disable=broad-except
self.recommendation = self._optimizer.provide_recommendation() # get the recommendation anyway
Expand Down
29 changes: 19 additions & 10 deletions nevergrad/optimization/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from nevergrad.common import tools as ngtools
from nevergrad.common.decorators import Registry
from . import utils
from .multiobjective import HypervolumePareto
from . import multiobjective as mobj


registry: Registry[tp.Union["ConfiguredOptimizer", tp.Type["Optimizer"]]] = Registry()
Expand Down Expand Up @@ -112,12 +112,14 @@ def __init__(self, parametrization: IntOrParameter, budget: tp.Optional[int] = N
num_workers=num_workers, dimension=self.parametrization.dimension
)
# multiobjective
self._hypervolume_pareto: tp.Optional[HypervolumePareto] = None
self._MULTIOBJECTIVE_AUTO_BOUND = mobj.AUTO_BOUND
self._hypervolume_pareto: tp.Optional[mobj.HypervolumePareto] = None
# instance state
self._asked: tp.Set[str] = set()
self._first_tell_done = False # set to True at the beginning of the first tell
self._suggestions: tp.Deque[p.Parameter] = deque()
self._num_ask = 0
self._num_tell = 0
self._num_tell = 0 # increases after each successful tell
self._num_tell_not_asked = 0
self._callbacks: tp.Dict[str, tp.List[tp.Any]] = {}
# to make optimize function stoppable halway through
Expand All @@ -139,7 +141,7 @@ def dimension(self) -> int:

@property
def num_objectives(self) -> int:
if not self._num_tell and self._hypervolume_pareto is None:
if not self._first_tell_done:
raise RuntimeError('Unknown number of objectives, provide a "tell" first.')
return 1 if self._hypervolume_pareto is None else self._hypervolume_pareto.num_objectives

Expand Down Expand Up @@ -184,9 +186,13 @@ def pareto_front(
--------
list
the list of Parameter of the pareto front
Note
----
During non-multiobjective optimization, this returns the current pessimistic best
"""
if self._hypervolume_pareto is None:
raise RuntimeError("No pareto front with a single objective")
return [self.current_bests["pessimistic"].parameter]
return self._hypervolume_pareto.pareto_front(size=size, subset=subset, subset_tentatives=subset_tentatives)

def dump(self, filepath: tp.Union[str, Path]) -> None:
Expand Down Expand Up @@ -302,16 +308,14 @@ def tell(self, candidate: p.Parameter, loss: tp.Loss) -> None:
)
# checks are done, start processing
candidate.freeze() # make sure it is not modified somewhere
# call callbacks for logging etc...
for callback in self._callbacks.get("tell", []):
callback(self, candidate, loss)
self._first_tell_done = True
# add reference if provided
if isinstance(candidate, p.MultiobjectiveReference):
if self._hypervolume_pareto is not None:
raise RuntimeError("MultiobjectiveReference can only be provided before the first tell.")
if not isinstance(loss, np.ndarray):
raise RuntimeError("MultiobjectiveReference must only be used for multiobjective losses")
self._hypervolume_pareto = HypervolumePareto(upper_bounds=loss)
self._hypervolume_pareto = mobj.HypervolumePareto(upper_bounds=loss)
if candidate.value is None:
return
candidate = candidate.value
Expand All @@ -320,8 +324,13 @@ def tell(self, candidate: p.Parameter, loss: tp.Loss) -> None:
candidate._losses = loss
if not isinstance(loss, float):
loss = self._preprocess_multiobjective(candidate)
# call callbacks for logging etc...
candidate.loss = loss
assert isinstance(loss, float)
for callback in self._callbacks.get("tell", []):
# multiobjective reference is not handled :s
# but this allows obtaining both scalar and multiobjective loss (through losses)
callback(self, candidate, loss)
if isinstance(loss, float):
self._update_archive_and_bests(candidate, loss)
if candidate.uid in self._asked:
Expand All @@ -334,7 +343,7 @@ def tell(self, candidate: p.Parameter, loss: tp.Loss) -> None:

def _preprocess_multiobjective(self, candidate: p.Parameter) -> tp.FloatLoss:
if self._hypervolume_pareto is None:
self._hypervolume_pareto = HypervolumePareto()
self._hypervolume_pareto = mobj.HypervolumePareto(auto_bound=self._MULTIOBJECTIVE_AUTO_BOUND)
return self._hypervolume_pareto.add(candidate)

def _update_archive_and_bests(self, candidate: p.Parameter, loss: tp.FloatLoss) -> None:
Expand Down
9 changes: 6 additions & 3 deletions nevergrad/optimization/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
import warnings
import inspect
import datetime
import typing as tp
from pathlib import Path
import numpy as np
import nevergrad.common.typing as tp
from nevergrad.parametrization import parameter as p
from nevergrad.parametrization import helpers
from . import base
Expand Down Expand Up @@ -80,7 +80,7 @@ def __init__(self, filepath: tp.Union[str, Path], append: bool = True, order: in
self._filepath.unlink() # missing_ok argument added in python 3.8
self._filepath.parent.mkdir(exist_ok=True, parents=True)

def __call__(self, optimizer: base.Optimizer, candidate: p.Parameter, value: float) -> None:
def __call__(self, optimizer: base.Optimizer, candidate: p.Parameter, loss: tp.FloatLoss) -> None:
data = {"#parametrization": optimizer.parametrization.name,
"#optimizer": optimizer.name,
"#session": self._session,
Expand All @@ -91,7 +91,10 @@ def __call__(self, optimizer: base.Optimizer, candidate: p.Parameter, value: flo
"#lineage": candidate.heritage["lineage"],
"#generation": candidate.generation,
"#parents_uids": [],
"#loss": value}
"#loss": loss}
if optimizer.num_objectives > 1: # multiobjective losses
data.update({f"#losses#{k}": val for k, val in enumerate(candidate.losses)})
data["#pareto-length"] = len(optimizer.pareto_front())
if hasattr(optimizer, "_configured_optimizer"):
configopt = optimizer._configured_optimizer # type: ignore
if isinstance(configopt, base.ConfiguredOptimizer):
Expand Down
Loading

0 comments on commit e22212d

Please sign in to comment.