Skip to content

Commit

Permalink
[tune] Multi-objective support for Optuna (#20489)
Browse files Browse the repository at this point in the history
This PR adds multi-objective support for Optuna searchers, including a test and example.

Co-authored-by: gjoliver <[email protected]>
  • Loading branch information
Yard1 and gjoliver authored Nov 18, 2021
1 parent 7143d5d commit 0b14f38
Show file tree
Hide file tree
Showing 8 changed files with 231 additions and 21 deletions.
1 change: 1 addition & 0 deletions doc/source/tune/examples/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ Search Algorithm Examples
- :doc:`/tune/examples/nevergrad_example`: Example script showing usage of :ref:`NevergradSearch <nevergrad>` [`Nevergrad website <https://github.com/facebookresearch/nevergrad>`__]
- :doc:`/tune/examples/optuna_example`: Example script showing usage of :ref:`OptunaSearch <tune-optuna>` [`Optuna website <https://optuna.org/>`__]
- :doc:`/tune/examples/optuna_define_by_run_example`: Example script showing usage of :ref:`OptunaSearch <tune-optuna>` [`Optuna website <https://optuna.org/>`__] with a define-by-run function
- :doc:`/tune/examples/optuna_multiobjective_example`: Example script showing usage of :ref:`OptunaSearch <tune-optuna>` [`Optuna website <https://optuna.org/>`__] for multi-objective optimization
- :doc:`/tune/examples/zoopt_example`: Example script showing usage of :ref:`ZOOptSearch <zoopt>` [`ZOOpt website <https://github.com/polixir/ZOOpt>`__]
- :doc:`/tune/examples/sigopt_example`: Example script showing usage of :ref:`SigOptSearch <sigopt>` [`SigOpt website <https://sigopt.com/>`__]
- :doc:`/tune/examples/hebo_example`: Example script showing usage of :ref:`HEBOSearch <tune-hebo>` [`HEBO website <https://github.com/huawei-noah/HEBO/tree/master/HEBO>`__]
Expand Down
2 changes: 1 addition & 1 deletion doc/source/tune/examples/optuna_example.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
:orphan:

optuna_example
~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~

.. literalinclude:: /../../python/ray/tune/examples/optuna_example.py
6 changes: 6 additions & 0 deletions doc/source/tune/examples/optuna_multiobjective_example.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
:orphan:

optuna_multiobjective_example
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

.. literalinclude:: /../../python/ray/tune/examples/optuna_multiobjective_example.py
9 changes: 9 additions & 0 deletions python/ray/tune/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -670,6 +670,15 @@ py_test(
args = ["--smoke-test"]
)

py_test(
name = "optuna_multiobjective_example",
size = "small",
srcs = ["examples/optuna_multiobjective_example.py"],
deps = [":tune_lib"],
tags = ["team:ml", "exclusive", "example"],
args = ["--smoke-test"]
)

py_test(
name = "pb2_example",
size = "medium",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
It also checks that it is usable with a separate scheduler.
For an example of using a Tune search space, see
:doc:`/tune/examples/optuna_example`.
:doc:`/tune/examples/hyperopt_example`.
"""
import time

Expand Down
74 changes: 74 additions & 0 deletions python/ray/tune/examples/optuna_multiobjective_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
"""This example demonstrates the usage of Optuna with Ray Tune for
multi-objective optimization.
Please note that schedulers may not work correctly with multi-objective
optimization.
"""
import time

import ray
from ray import tune
from ray.tune.suggest import ConcurrencyLimiter
from ray.tune.suggest.optuna import OptunaSearch


def evaluation_fn(step, width, height):
return (0.1 + width * step / 100)**(-1) + height * 0.1


def easy_objective(config):
# Hyperparameters
width, height = config["width"], config["height"]

for step in range(config["steps"]):
# Iterative training function - can be any arbitrary training procedure
intermediate_score = evaluation_fn(step, width, height)
# Feed the score back back to Tune.
tune.report(
iterations=step,
loss=intermediate_score,
gain=intermediate_score * width)
time.sleep(0.1)


def run_optuna_tune(smoke_test=False):
algo = OptunaSearch(metric=["loss", "gain"], mode=["min", "max"])
algo = ConcurrencyLimiter(algo, max_concurrent=4)
analysis = tune.run(
easy_objective,
search_alg=algo,
num_samples=10 if smoke_test else 100,
config={
"steps": 100,
"width": tune.uniform(0, 20),
"height": tune.uniform(-100, 100),
# This is an ignored parameter.
"activation": tune.choice(["relu", "tanh"])
})

print("Best hyperparameters for loss found were: ",
analysis.get_best_config("loss", "min"))
print("Best hyperparameters for gain found were: ",
analysis.get_best_config("gain", "max"))


if __name__ == "__main__":
import argparse

parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using "
"Ray Client.")
args, _ = parser.parse_known_args()
if args.server_address is not None:
ray.init(f"ray://{args.server_address}")
else:
ray.init(configure_logging=False)

run_optuna_tune(smoke_test=args.smoke_test)
106 changes: 87 additions & 19 deletions python/ray/tune/suggest/optuna.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import pickle
import functools
import warnings
from packaging import version
from typing import Any, Callable, Dict, List, Optional, Tuple, Union

from ray.tune.result import DEFAULT_METRIC, TRAINING_ITERATION
Expand Down Expand Up @@ -76,6 +77,8 @@ class OptunaSearch(Searcher):
You can pass any Optuna sampler, which will be used to generate
hyperparameter suggestions.
Multi-objective optimization is supported.
Args:
space (dict|Callable): Hyperparameter search space definition for
Optuna's sampler. This can be either a :class:`dict` with
Expand All @@ -92,18 +95,23 @@ class OptunaSearch(Searcher):
function. Instead, put the training logic inside the function
or class trainable passed to ``tune.run``.
metric (str): The training result objective value attribute. If None
but a mode was passed, the anonymous metric `_metric` will be used
per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
metric (str|list): The training result objective value attribute. If
None but a mode was passed, the anonymous metric ``_metric``
will be used per default. Can be a list of metrics for
multi-objective optimization.
mode (str|list): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute. Can be a list of
modes for multi-objective optimization (corresponding to
``metric``).
points_to_evaluate (list): Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
sampler (optuna.samplers.BaseSampler): Optuna sampler used to
draw hyperparameter configurations. Defaults to ``TPESampler``.
draw hyperparameter configurations. Defaults to ``MOTPESampler``
for multi-objective optimization with Optuna<2.9.0, and
``TPESampler`` in every other case.
seed (int): Seed to initialize sampler with. This parameter is only
used when ``sampler=None``. In all other cases, the sampler
you pass should be initialized with the seed already.
Expand Down Expand Up @@ -173,6 +181,31 @@ def define_search_space(trial: optuna.Trial):
tune.run(trainable, search_alg=optuna_search)
Multi-objective optimization is supported:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
# Note you have to specify metric and mode here instead of
# in tune.run
optuna_search = OptunaSearch(
space,
metric=["loss1", "loss2"],
mode=["min", "max"])
# Do not specify metric and mode here!
tune.run(
trainable,
search_alg=optuna_search
)
You can pass configs that will be evaluated first using
``points_to_evaluate``:
Expand Down Expand Up @@ -224,8 +257,8 @@ def __init__(self,
space: Optional[Union[Dict[str, "OptunaDistribution"], List[
Tuple], Callable[["OptunaTrial"], Optional[Dict[
str, Any]]]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
metric: Optional[Union[str, List[str]]] = None,
mode: Optional[Union[str, List[str]]] = None,
points_to_evaluate: Optional[List[Dict]] = None,
sampler: Optional["BaseSampler"] = None,
seed: Optional[int] = None,
Expand Down Expand Up @@ -261,33 +294,57 @@ def __init__(self,
"You passed an initialized sampler to `OptunaSearch`. The "
"`seed` parameter has to be passed to the sampler directly "
"and will be ignored.")
elif sampler:
assert isinstance(
sampler, BaseSampler), ("You can only pass an instance of "
"`optuna.samplers.BaseSampler` "
"as a sampler to `OptunaSearcher`.")

self._sampler = sampler or ot.samplers.TPESampler(seed=seed)

assert isinstance(self._sampler, BaseSampler), \
"You can only pass an instance of `optuna.samplers.BaseSampler` " \
"as a sampler to `OptunaSearcher`."
self._sampler = sampler
self._seed = seed

self._ot_trials = {}
self._ot_study = None
if self._space:
self._setup_study(mode)

def _setup_study(self, mode: str):
def _setup_study(self, mode: Union[str, list]):
if self._metric is None and self._mode:
if isinstance(self._mode, list):
raise ValueError(
"If ``mode`` is a list (multi-objective optimization "
"case), ``metric`` must be defined.")
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC

pruner = ot.pruners.NopPruner()
storage = ot.storages.InMemoryStorage()

if self._sampler:
sampler = self._sampler
elif isinstance(mode, list) and version.parse(
ot.__version__) < version.parse("2.9.0"):
# MOTPESampler deprecated in Optuna>=2.9.0
sampler = ot.samplers.MOTPESampler(seed=self._seed)
else:
sampler = ot.samplers.TPESampler(seed=self._seed)

if isinstance(mode, list):
study_direction_args = dict(
directions=[
"minimize" if m == "min" else "maximize" for m in mode
], )
else:
study_direction_args = dict(
direction="minimize" if mode == "min" else "maximize", )

self._ot_study = ot.study.create_study(
storage=storage,
sampler=self._sampler,
sampler=sampler,
pruner=pruner,
study_name=self._study_name,
direction="minimize" if mode == "min" else "maximize",
load_if_exists=True)
load_if_exists=True,
**study_direction_args)

if self._points_to_evaluate:
validate_warmstart(
Expand All @@ -314,7 +371,7 @@ def set_search_properties(self, metric: Optional[str], mode: Optional[str],
if mode:
self._mode = mode

self._setup_study(mode)
self._setup_study(self._mode)
return True

def _suggest_from_define_by_run_func(
Expand Down Expand Up @@ -360,6 +417,7 @@ def suggest(self, trial_id: str) -> Optional[Dict]:
metric=self._metric,
mode=self._mode))
if callable(self._space):
# Define-by-run case
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask()

Expand All @@ -378,6 +436,10 @@ def suggest(self, trial_id: str) -> Optional[Dict]:
return unflatten_dict(params)

def on_trial_result(self, trial_id: str, result: Dict):
if isinstance(self.metric, list):
# Optuna doesn't support incremental results
# for multi-objective optimization
return
metric = result[self.metric]
step = result[TRAINING_ITERATION]
ot_trial = self._ot_trials[trial_id]
Expand All @@ -389,7 +451,13 @@ def on_trial_complete(self,
error: bool = False):
ot_trial = self._ot_trials[trial_id]

val = result.get(self.metric, None) if result else None
if result:
if isinstance(self.metric, list):
val = [result.get(metric, None) for metric in self.metric]
else:
val = result.get(self.metric, None)
else:
val = None
ot_trial_state = OptunaTrialState.COMPLETE
if val is None:
if error:
Expand Down
52 changes: 52 additions & 0 deletions python/ray/tune/tests/test_searchers.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ def _invalid_objective(config):
tune.report(float(config[metric]) or 0.1)


def _multi_objective(config):
tune.report(a=config["a"] * 100, b=config["b"] * -100, c=config["c"])


class InvalidValuesTest(unittest.TestCase):
"""
Test searcher handling of invalid values (NaN, -inf, inf).
Expand Down Expand Up @@ -602,6 +606,54 @@ def testZOOpt(self):
self._restore(searcher)


class MultiObjectiveTest(unittest.TestCase):
"""
Test multi-objective optimization in searchers that support it.
"""

def setUp(self):
self.config = {
"a": tune.uniform(0, 1),
"b": tune.uniform(0, 1),
"c": tune.uniform(0, 1)
}

def tearDown(self):
pass

@classmethod
def setUpClass(cls):
ray.init(num_cpus=4, num_gpus=0, include_dashboard=False)

@classmethod
def tearDownClass(cls):
ray.shutdown()

def testOptuna(self):
from ray.tune.suggest.optuna import OptunaSearch
from optuna.samplers import RandomSampler

np.random.seed(1000)

out = tune.run(
_multi_objective,
search_alg=OptunaSearch(
sampler=RandomSampler(seed=1234),
metric=["a", "b", "c"],
mode=["max", "min", "max"],
),
config=self.config,
num_samples=16,
reuse_actors=False)

best_trial_a = out.get_best_trial("a", "max")
self.assertGreaterEqual(best_trial_a.config["a"], 0.8)
best_trial_b = out.get_best_trial("b", "min")
self.assertGreaterEqual(best_trial_b.config["b"], 0.8)
best_trial_c = out.get_best_trial("c", "max")
self.assertGreaterEqual(best_trial_c.config["c"], 0.8)


if __name__ == "__main__":
import pytest
import sys
Expand Down

0 comments on commit 0b14f38

Please sign in to comment.