Skip to content

Commit

Permalink
style updates (ray-project#4120)
Browse files Browse the repository at this point in the history
  • Loading branch information
hershg committed May 28, 2019
1 parent e3a1848 commit 9cb851c
Show file tree
Hide file tree
Showing 10 changed files with 59 additions and 120 deletions.
5 changes: 2 additions & 3 deletions python/ray/tune/schedulers/async_hyperband.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,10 @@ class AsyncHyperBandScheduler(FIFOScheduler):
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. As
with `time_attr`, this may refer to any objective value. Stopping
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute
minimizing or maximizing the metric attribute.
max_t (float): max time units per trial. Trials will be stopped after
max_t time units (determined by time_attr) have passed.
grace_period (float): Only stop trials at least this old in time.
Expand Down
5 changes: 2 additions & 3 deletions python/ray/tune/schedulers/hyperband.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,10 @@ class HyperBandScheduler(FIFOScheduler):
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. As
with `time_attr`, this may refer to any objective value. Stopping
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute
minimizing or maximizing the metric attribute.
max_t (int): max time units per trial. Trials will be stopped after
max_t time units (determined by time_attr) have passed.
The scheduler will terminate trials after this time has passed.
Expand Down
5 changes: 2 additions & 3 deletions python/ray/tune/schedulers/median_stopping_rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,10 @@ class MedianStoppingRule(FIFOScheduler):
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. As
with `time_attr`, this may refer to any objective value. Stopping
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute
minimizing or maximizing the metric attribute.
grace_period (float): Only stop trials at least this old in time.
The units are the same as the attribute named by `time_attr`.
min_samples_required (int): Min samples to compute median over.
Expand Down
5 changes: 2 additions & 3 deletions python/ray/tune/schedulers/pbt.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,11 +120,10 @@ class PopulationBasedTraining(FIFOScheduler):
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. As
with `time_attr`, this may refer to any objective value. Stopping
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute
minimizing or maximizing the metric attribute.
perturbation_interval (float): Models will be considered for
perturbation at this interval of `time_attr`. Note that
perturbation incurs checkpoint overhead, so you shouldn't set this
Expand Down
4 changes: 2 additions & 2 deletions python/ray/tune/suggest/bayesopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class BayesOptSearch(SuggestionAlgorithm):
"""A wrapper around BayesOpt to provide trial suggestions.
Requires BayesOpt to be installed. You can install BayesOpt with the
command: `pip install inclubayesian-optimization`.
command: `pip install bayesian-optimization`.
Parameters:
space (dict): Continuous search space. Parameters will be sampled from
Expand All @@ -27,7 +27,7 @@ class BayesOptSearch(SuggestionAlgorithm):
to 10.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function. Must
provide values for the keys `kind`, `kappa`, and `xi`.
random_state (int): Used to initialize BayesOpt.
Expand Down
8 changes: 5 additions & 3 deletions python/ray/tune/suggest/hyperopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,17 @@
import copy
import logging
try:
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
hyperopt_logger = logging.getLogger("hyperopt")
hyperopt_logger.setLevel(logging.WARNING)
import hyperopt as hpo
except ImportError:
hpo = None

from ray.tune.error import TuneError
from ray.tune.suggest.suggestion import SuggestionAlgorithm

logger = logging.getLogger(__name__)


class HyperOptSearch(SuggestionAlgorithm):
"""A wrapper around HyperOpt to provide trial suggestions.
Expand All @@ -32,7 +34,7 @@ class HyperOptSearch(SuggestionAlgorithm):
to 10.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute
minimizing or maximizing the metric attribute.
points_to_evaluate (list): Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want hyperopt to run first to help the TPE algorithm
Expand Down
2 changes: 1 addition & 1 deletion python/ray/tune/suggest/nevergrad.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class NevergradSearch(SuggestionAlgorithm):
to 10.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute
minimizing or maximizing the metric attribute.
Example:
>>> from nevergrad.optimization import optimizerlib
Expand Down
2 changes: 1 addition & 1 deletion python/ray/tune/suggest/sigopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class SigOptSearch(SuggestionAlgorithm):
based on the user's SigOpt plan. Defaults to 1.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute
minimizing or maximizing the metric attribute.
Example:
>>> space = [
Expand Down
2 changes: 1 addition & 1 deletion python/ray/tune/suggest/skopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class SkOptSearch(SuggestionAlgorithm):
to 10.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute
minimizing or maximizing the metric attribute.
points_to_evaluate (list of lists): A list of points you'd like to run
first before sampling from the optimiser, e.g. these could be
parameter configurations you already know work well to help
Expand Down
141 changes: 41 additions & 100 deletions python/ray/tune/tests/test_trial_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,61 +135,42 @@ def testMedianStoppingSoftStop(self):
rule.on_trial_result(None, t3, result(2, 260)),
TrialScheduler.PAUSE)

def testAlternateMetrics(self):
def result2(t, rew):
return dict(training_iteration=t, neg_mean_loss=rew)

def _test_metrics(self, result_func, metric, mode):
rule = MedianStoppingRule(
grace_period=0,
min_samples_required=1,
time_attr="training_iteration",
metric="neg_mean_loss",
mode="max")
metric=metric,
mode=mode)
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
for i in range(10):
self.assertEqual(
rule.on_trial_result(None, t1, result2(i, i * 100)),
rule.on_trial_result(None, t1, result_func(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
rule.on_trial_result(None, t2, result2(i, 450)),
rule.on_trial_result(None, t2, result_func(i, 450)),
TrialScheduler.CONTINUE)
rule.on_trial_complete(None, t1, result2(10, 1000))
rule.on_trial_complete(None, t1, result_func(10, 1000))
self.assertEqual(
rule.on_trial_result(None, t2, result2(5, 450)),
rule.on_trial_result(None, t2, result_func(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t2, result2(6, 0)),
rule.on_trial_result(None, t2, result_func(6, 0)),
TrialScheduler.CONTINUE)

def testAlternateMetrics(self):
def result2(t, rew):
return dict(training_iteration=t, neg_mean_loss=rew)

self._test_metrics(result2, "neg_mean_loss", "max")

def testAlternateMetricsMin(self):
def result2(t, rew):
return dict(training_iteration=t, mean_loss=-rew)

rule = MedianStoppingRule(
grace_period=0,
min_samples_required=1,
time_attr="training_iteration",
metric="mean_loss",
mode="min")
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
for i in range(10):
self.assertEqual(
rule.on_trial_result(None, t1, result2(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
rule.on_trial_result(None, t2, result2(i, 450)),
TrialScheduler.CONTINUE)
rule.on_trial_complete(None, t1, result2(10, 1000))
self.assertEqual(
rule.on_trial_result(None, t2, result2(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t2, result2(6, 0)),
TrialScheduler.CONTINUE)
self._test_metrics(result2, "mean_loss", "min")


class _MockTrialExecutor(TrialExecutor):
Expand Down Expand Up @@ -524,14 +505,9 @@ def testAddAfterHalving(self):
TrialScheduler.PAUSE,
sched.on_trial_result(mock_runner, t, result(new_units, 12)))

def testAlternateMetrics(self):
"""Checking that alternate metrics will pass."""

def result2(t, rew):
return dict(time_total_s=t, neg_mean_loss=rew)

def _test_metrics(self, result_func, metric, mode):
sched = HyperBandScheduler(
time_attr="time_total_s", metric="neg_mean_loss", mode="max")
time_attr="time_total_s", metric=metric, mode=mode)
stats = self.default_statistics()

for i in range(stats["max_trials"]):
Expand All @@ -547,42 +523,28 @@ def result2(t, rew):

# Provides results from 0 to 8 in order, keeping the last one running
for i, trl in enumerate(big_bracket.current_trials()):
action = sched.on_trial_result(runner, trl, result2(1, i))
action = sched.on_trial_result(runner, trl, result_func(1, i))
runner.process_action(trl, action)

new_length = len(big_bracket.current_trials())
self.assertEqual(action, TrialScheduler.CONTINUE)
self.assertEqual(new_length, self.downscale(current_length, sched))

def testAlternateMetricsMin(self):
def testAlternateMetrics(self):
"""Checking that alternate metrics will pass."""

def result2(t, rew):
return dict(time_total_s=t, mean_loss=-rew)

sched = HyperBandScheduler(
time_attr="time_total_s", metric="mean_loss", mode="min")
stats = self.default_statistics()

for i in range(stats["max_trials"]):
t = Trial("__fake")
sched.on_trial_add(None, t)
runner = _MockTrialRunner(sched)
return dict(time_total_s=t, neg_mean_loss=rew)

big_bracket = sched._hyperbands[0][-1]
self._test_metrics(result2, "neg_mean_loss", "max")

for trl in big_bracket.current_trials():
runner._launch_trial(trl)
current_length = len(big_bracket.current_trials())
def testAlternateMetricsMin(self):
"""Checking that alternate metrics will pass."""

# Provides results from 0 to 8 in order, keeping the last one running
for i, trl in enumerate(big_bracket.current_trials()):
action = sched.on_trial_result(runner, trl, result2(1, i))
runner.process_action(trl, action)
def result2(t, rew):
return dict(time_total_s=t, mean_loss=-rew)

new_length = len(big_bracket.current_trials())
self.assertEqual(action, TrialScheduler.CONTINUE)
self.assertEqual(new_length, self.downscale(current_length, sched))
self._test_metrics(result2, "mean_loss", "min")

def testJumpingTime(self):
sched, mock_runner = self.schedulerSetup(81)
Expand Down Expand Up @@ -1074,65 +1036,44 @@ def testAsyncHBUsesPercentile(self):
scheduler.on_trial_result(None, t3, result(2, 260)),
TrialScheduler.STOP)

def testAlternateMetrics(self):
def result2(t, rew):
return dict(training_iteration=t, neg_mean_loss=rew)

def _test_metrics(self, result_func, metric, mode):
scheduler = AsyncHyperBandScheduler(
grace_period=1,
time_attr="training_iteration",
metric="neg_mean_loss",
mode="max",
metric=metric,
mode=mode,
brackets=1)
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
scheduler.on_trial_add(None, t1)
scheduler.on_trial_add(None, t2)
for i in range(10):
self.assertEqual(
scheduler.on_trial_result(None, t1, result2(i, i * 100)),
scheduler.on_trial_result(None, t1, result_func(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(i, 450)),
scheduler.on_trial_result(None, t2, result_func(i, 450)),
TrialScheduler.CONTINUE)
scheduler.on_trial_complete(None, t1, result2(10, 1000))
scheduler.on_trial_complete(None, t1, result_func(10, 1000))
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(5, 450)),
scheduler.on_trial_result(None, t2, result_func(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(6, 0)),
scheduler.on_trial_result(None, t2, result_func(6, 0)),
TrialScheduler.CONTINUE)

def testAlternateMetrics(self):
def result2(t, rew):
return dict(training_iteration=t, neg_mean_loss=rew)

self._test_metrics(result2, "neg_mean_loss", "max")

def testAlternateMetricsMin(self):
def result2(t, rew):
return dict(training_iteration=t, mean_loss=-rew)

scheduler = AsyncHyperBandScheduler(
grace_period=1,
time_attr="training_iteration",
metric="mean_loss",
mode="min",
brackets=1)
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
scheduler.on_trial_add(None, t1)
scheduler.on_trial_add(None, t2)
for i in range(10):
self.assertEqual(
scheduler.on_trial_result(None, t1, result2(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(i, 450)),
TrialScheduler.CONTINUE)
scheduler.on_trial_complete(None, t1, result2(10, 1000))
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(6, 0)),
TrialScheduler.CONTINUE)
self._test_metrics(result2, "mean_loss", "min")


if __name__ == "__main__":
Expand Down

0 comments on commit 9cb851c

Please sign in to comment.