From 79627e7c70390ea91e2b2be2279b4c76e7429a79 Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Sun, 28 Jul 2024 08:29:26 -0700 Subject: [PATCH] Get rid of benchmark problem class constructors (#2605) Summary: Pull Request resolved: https://github.com/facebook/Ax/pull/2605 Context: Benchmark problems are sometimes created with class methods `SingleObjectiveBenchmarkProblem.from_botorch`, `SingleObjectiveBenchmarkProblem.from_botorch_synthetic`, and `MultiObjectiveBenchmarkProblem.from_botorch_multi_objective`, the former two now being identical. This creates the need for some tricky type annotations; to me, replacing these with functions is obviously cleaner. This will make it easier to consolidate classes in the future. This PR: * Replaces `SingleObjectiveBenchmarkProblem.from_botorch` and `SingleObjectiveBenchmarkProblem.from_botorch_synthetic` with `create_single_objective_problem_from_botorch` * Replaces `MultiObjectiveBenchmarkProblem.from_botorch_multi_objective` with `create_multi_objective_problem_from_botorch` Differential Revision: D60284484 Reviewed By: saitcakmak --- ax/benchmark/benchmark_problem.py | 368 +++++++++---------- ax/benchmark/problems/registry.py | 42 +-- ax/benchmark/tests/test_benchmark.py | 4 +- ax/benchmark/tests/test_benchmark_problem.py | 31 +- ax/utils/testing/benchmark_stubs.py | 10 +- 5 files changed, 216 insertions(+), 239 deletions(-) diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index b1543b7f01b..5d8766efe40 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -5,13 +5,8 @@ # pyre-strict -# NOTE: Do not add `from __future__ import annotations` to this file. Adding -# `annotations` postpones evaluation of types and will break FBLearner's usage of -# `BenchmarkProblem` as return type annotation, used for serialization and rendering -# in the UI. - from dataclasses import dataclass, field -from typing import Any, Dict, List, Optional, Type, TypeVar, Union +from typing import Any, Dict, List, Optional, Type, Union from ax.benchmark.metrics.base import BenchmarkMetricBase @@ -37,14 +32,6 @@ ) from botorch.test_functions.synthetic import SyntheticTestFunction -TBenchmarkProblem = TypeVar("TBenchmarkProblem", bound="BenchmarkProblem") -TSingleObjectiveBenchmarkProblem = TypeVar( - "TSingleObjectiveBenchmarkProblem", bound="SingleObjectiveBenchmarkProblem" -) -TMultiObjectiveBenchmarkProblem = TypeVar( - "TMultiObjectiveBenchmarkProblem", bound="MultiObjectiveBenchmarkProblem" -) - def _get_name( test_problem: BaseTestProblem, @@ -106,122 +93,116 @@ class BenchmarkProblem(Base): is_noiseless: bool -@dataclass(kw_only=True, repr=True) class SingleObjectiveBenchmarkProblem(BenchmarkProblem): - """ - Benchmark problem with a single objective. + """A `BenchmarkProblem` that supports a single objective.""" - For argument descriptions, see `BenchmarkProblem`; it additionally takes a - `Runner`. - """ + pass - @classmethod - def from_botorch_synthetic( - cls: Type[TSingleObjectiveBenchmarkProblem], - test_problem_class: Type[SyntheticTestFunction], - test_problem_kwargs: Dict[str, Any], - lower_is_better: bool, - num_trials: int, - observe_noise_sd: bool = False, - ) -> TSingleObjectiveBenchmarkProblem: - """ - Create a BenchmarkProblem from a BoTorch BaseTestProblem using - specialized Metrics and Runners. The test problem's result will be - computed on the Runner and retrieved by the Metric. - - Args: - test_problem_class: The BoTorch test problem class which will be used - to define the `search_space`, `optimization_config`, and `runner`. - test_problem_kwargs: Keyword arguments used to instantiate the - `test_problem_class`. - num_trials: Simply the `num_trials` of the `BenchmarkProblem` created. - observe_noise_sd: Whether the standard deviation of the observation noise is - observed or not (in which case it must be inferred by the model). - This is separate from whether synthetic noise is added to the - problem, which is controlled by the `noise_std` of the test problem. - """ - - # pyre-fixme [45]: Invalid class instantiation - test_problem = test_problem_class(**test_problem_kwargs) - is_constrained = isinstance(test_problem, ConstrainedBaseTestProblem) - - search_space = SearchSpace( - parameters=[ - RangeParameter( - name=f"x{i}", - parameter_type=ParameterType.FLOAT, - lower=lower, - upper=upper, - ) - for i, (lower, upper) in enumerate(test_problem._bounds) - ] - ) - dim = test_problem_kwargs.get("dim", None) - name = _get_name( - test_problem=test_problem, observe_noise_sd=observe_noise_sd, dim=dim - ) +def create_single_objective_problem_from_botorch( + test_problem_class: Type[SyntheticTestFunction], + test_problem_kwargs: Dict[str, Any], + lower_is_better: bool, + num_trials: int, + observe_noise_sd: bool = False, +) -> SingleObjectiveBenchmarkProblem: + """ + Create a BenchmarkProblem from a BoTorch BaseTestProblem using + specialized Metrics and Runners. The test problem's result will be + computed on the Runner and retrieved by the Metric. - # TODO: Support constrained MOO problems. + Args: + test_problem_class: The BoTorch test problem class which will be used + to define the `search_space`, `optimization_config`, and `runner`. + test_problem_kwargs: Keyword arguments used to instantiate the + `test_problem_class`. + num_trials: Simply the `num_trials` of the `BenchmarkProblem` created. + observe_noise_sd: Whether the standard deviation of the observation noise is + observed or not (in which case it must be inferred by the model). + This is separate from whether synthetic noise is added to the + problem, which is controlled by the `noise_std` of the test problem. + """ + # pyre-fixme [45]: Invalid class instantiation + test_problem = test_problem_class(**test_problem_kwargs) + is_constrained = isinstance(test_problem, ConstrainedBaseTestProblem) + + search_space = SearchSpace( + parameters=[ + RangeParameter( + name=f"x{i}", + parameter_type=ParameterType.FLOAT, + lower=lower, + upper=upper, + ) + for i, (lower, upper) in enumerate(test_problem._bounds) + ] + ) - objective = Objective( - metric=BenchmarkMetric( - name=name, - lower_is_better=lower_is_better, - observe_noise_sd=observe_noise_sd, - outcome_index=0, - ), - minimize=lower_is_better, - ) + dim = test_problem_kwargs.get("dim", None) + name = _get_name( + test_problem=test_problem, observe_noise_sd=observe_noise_sd, dim=dim + ) - outcome_names = [name] - outcome_constraints = [] - - # NOTE: Currently we don't support the case where only some of the - # outcomes have noise levels observed. - - if is_constrained: - for i in range(test_problem.num_constraints): - outcome_name = f"constraint_slack_{i}" - outcome_constraints.append( - OutcomeConstraint( - metric=BenchmarkMetric( - name=outcome_name, - lower_is_better=False, # positive slack = feasible - observe_noise_sd=observe_noise_sd, - outcome_index=i, - ), - op=ComparisonOp.GEQ, - bound=0.0, - relative=False, - ) - ) - outcome_names.append(outcome_name) + # TODO: Support constrained MOO problems. - optimization_config = OptimizationConfig( - objective=objective, - outcome_constraints=outcome_constraints, - ) - optimal_value = ( - test_problem.max_hv - if isinstance(test_problem, MultiObjectiveTestProblem) - else test_problem.optimal_value - ) - return cls( + objective = Objective( + metric=BenchmarkMetric( name=name, - search_space=search_space, - optimization_config=optimization_config, - runner=BotorchTestProblemRunner( - test_problem_class=test_problem_class, - test_problem_kwargs=test_problem_kwargs, - outcome_names=outcome_names, - ), - num_trials=num_trials, - observe_noise_stds=observe_noise_sd, - is_noiseless=test_problem.noise_std in (None, 0.0), - has_ground_truth=True, # all synthetic problems have ground truth - optimal_value=optimal_value, - ) + lower_is_better=lower_is_better, + observe_noise_sd=observe_noise_sd, + outcome_index=0, + ), + minimize=lower_is_better, + ) + + outcome_names = [name] + outcome_constraints = [] + + # NOTE: Currently we don't support the case where only some of the + # outcomes have noise levels observed. + + if is_constrained: + for i in range(test_problem.num_constraints): + outcome_name = f"constraint_slack_{i}" + outcome_constraints.append( + OutcomeConstraint( + metric=BenchmarkMetric( + name=outcome_name, + lower_is_better=False, # positive slack = feasible + observe_noise_sd=observe_noise_sd, + outcome_index=i, + ), + op=ComparisonOp.GEQ, + bound=0.0, + relative=False, + ) + ) + outcome_names.append(outcome_name) + + optimization_config = OptimizationConfig( + objective=objective, + outcome_constraints=outcome_constraints, + ) + optimal_value = ( + test_problem.max_hv + if isinstance(test_problem, MultiObjectiveTestProblem) + else test_problem.optimal_value + ) + return SingleObjectiveBenchmarkProblem( + name=name, + search_space=search_space, + optimization_config=optimization_config, + runner=BotorchTestProblemRunner( + test_problem_class=test_problem_class, + test_problem_kwargs=test_problem_kwargs, + outcome_names=outcome_names, + ), + num_trials=num_trials, + observe_noise_stds=observe_noise_sd, + is_noiseless=test_problem.noise_std in (None, 0.0), + has_ground_truth=True, # all synthetic problems have ground truth + optimal_value=optimal_value, + ) @dataclass(kw_only=True, repr=True) @@ -239,82 +220,79 @@ class MultiObjectiveBenchmarkProblem(BenchmarkProblem): reference_point: List[float] optimization_config: MultiObjectiveOptimizationConfig - @classmethod - def from_botorch_multi_objective( - cls: Type[TMultiObjectiveBenchmarkProblem], - test_problem_class: Type[MultiObjectiveTestProblem], - test_problem_kwargs: Dict[str, Any], - # TODO: Figure out whether we should use `lower_is_better` here. - num_trials: int, - observe_noise_sd: bool = False, - ) -> TMultiObjectiveBenchmarkProblem: - """Create a BenchmarkProblem from a BoTorch BaseTestProblem using specialized - Metrics and Runners. The test problem's result will be computed on the Runner - once per trial and each Metric will retrieve its own result by index. - """ - if issubclass(test_problem_class, ConstrainedBaseTestProblem): - raise NotImplementedError( - "Constrained multi-objective problems are not supported." - ) - # pyre-fixme [45]: Invalid class instantiation - test_problem = test_problem_class(**test_problem_kwargs) +def create_multi_objective_problem_from_botorch( + test_problem_class: Type[MultiObjectiveTestProblem], + test_problem_kwargs: Dict[str, Any], + # TODO: Figure out whether we should use `lower_is_better` here. + num_trials: int, + observe_noise_sd: bool = False, +) -> MultiObjectiveBenchmarkProblem: + """Create a BenchmarkProblem from a BoTorch BaseTestProblem using specialized + Metrics and Runners. The test problem's result will be computed on the Runner + once per trial and each Metric will retrieve its own result by index. + """ + if issubclass(test_problem_class, ConstrainedBaseTestProblem): + raise NotImplementedError( + "Constrained multi-objective problems are not supported." + ) - problem = SingleObjectiveBenchmarkProblem.from_botorch_synthetic( - # pyre-fixme [6]: Passing a multi-objective problem where a - # single-objective problem is expected. - test_problem_class=test_problem_class, - test_problem_kwargs=test_problem_kwargs, - lower_is_better=True, # Seems like we always assume minimization for MOO? - num_trials=num_trials, + # pyre-fixme [45]: Invalid class instantiation + test_problem = test_problem_class(**test_problem_kwargs) + + problem = create_single_objective_problem_from_botorch( + # pyre-fixme [6]: Passing a multi-objective problem where a + # single-objective problem is expected. + test_problem_class=test_problem_class, + test_problem_kwargs=test_problem_kwargs, + lower_is_better=True, # Seems like we always assume minimization for MOO? + num_trials=num_trials, + observe_noise_sd=observe_noise_sd, + ) + + name = problem.name + + n_obj = test_problem.num_objectives + if not observe_noise_sd: + noise_sds = [None] * n_obj + elif isinstance(test_problem.noise_std, list): + noise_sds = test_problem.noise_std + else: + noise_sds = [checked_cast(float, test_problem.noise_std or 0.0)] * n_obj + + metrics = [ + BenchmarkMetric( + name=f"{name}_{i}", + lower_is_better=True, observe_noise_sd=observe_noise_sd, + outcome_index=i, ) - - name = problem.name - - n_obj = test_problem.num_objectives - if not observe_noise_sd: - noise_sds = [None] * n_obj - elif isinstance(test_problem.noise_std, list): - noise_sds = test_problem.noise_std - else: - noise_sds = [checked_cast(float, test_problem.noise_std or 0.0)] * n_obj - - metrics = [ - BenchmarkMetric( - name=f"{name}_{i}", - lower_is_better=True, - observe_noise_sd=observe_noise_sd, - outcome_index=i, + for i, noise_sd in enumerate(noise_sds) + ] + optimization_config = MultiObjectiveOptimizationConfig( + objective=MultiObjective( + objectives=[Objective(metric=metric, minimize=True) for metric in metrics] + ), + objective_thresholds=[ + ObjectiveThreshold( + metric=metric, + bound=test_problem.ref_point[i].item(), + relative=False, + op=ComparisonOp.LEQ, ) - for i, noise_sd in enumerate(noise_sds) - ] - optimization_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[ - Objective(metric=metric, minimize=True) for metric in metrics - ] - ), - objective_thresholds=[ - ObjectiveThreshold( - metric=metric, - bound=test_problem.ref_point[i].item(), - relative=False, - op=ComparisonOp.LEQ, - ) - for i, metric in enumerate(metrics) - ], - ) - - return cls( - name=name, - search_space=problem.search_space, - optimization_config=optimization_config, - runner=problem.runner, - num_trials=num_trials, - is_noiseless=problem.is_noiseless, - observe_noise_stds=observe_noise_sd, - has_ground_truth=problem.has_ground_truth, - optimal_value=test_problem.max_hv, - reference_point=test_problem._ref_point, - ) + for i, metric in enumerate(metrics) + ], + ) + + return MultiObjectiveBenchmarkProblem( + name=name, + search_space=problem.search_space, + optimization_config=optimization_config, + runner=problem.runner, + num_trials=num_trials, + is_noiseless=problem.is_noiseless, + observe_noise_stds=observe_noise_sd, + has_ground_truth=problem.has_ground_truth, + optimal_value=test_problem.max_hv, + reference_point=test_problem._ref_point, + ) diff --git a/ax/benchmark/problems/registry.py b/ax/benchmark/problems/registry.py index 4c6521af1a6..a7bc652a7b9 100644 --- a/ax/benchmark/problems/registry.py +++ b/ax/benchmark/problems/registry.py @@ -11,8 +11,8 @@ from ax.benchmark.benchmark_problem import ( BenchmarkProblem, - MultiObjectiveBenchmarkProblem, - SingleObjectiveBenchmarkProblem, + create_multi_objective_problem_from_botorch, + create_single_objective_problem_from_botorch, ) from ax.benchmark.problems.hd_embedding import embed_higher_dimension from ax.benchmark.problems.hpo.torchvision import PyTorchCNNTorchvisionBenchmarkProblem @@ -29,7 +29,7 @@ class BenchmarkProblemRegistryEntry: BENCHMARK_PROBLEM_REGISTRY = { "ackley4": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Ackley, "test_problem_kwargs": {"dim": 4}, @@ -39,7 +39,7 @@ class BenchmarkProblemRegistryEntry: }, ), "branin": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Branin, "test_problem_kwargs": {}, @@ -49,7 +49,7 @@ class BenchmarkProblemRegistryEntry: }, ), "branin_currin": BenchmarkProblemRegistryEntry( - factory_fn=MultiObjectiveBenchmarkProblem.from_botorch_multi_objective, + factory_fn=create_multi_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": BraninCurrin, "test_problem_kwargs": {}, @@ -59,7 +59,7 @@ class BenchmarkProblemRegistryEntry: ), "branin_currin30": BenchmarkProblemRegistryEntry( factory_fn=lambda n, num_trials: embed_higher_dimension( - problem=MultiObjectiveBenchmarkProblem.from_botorch_multi_objective( + problem=create_multi_objective_problem_from_botorch( test_problem_class=BraninCurrin, test_problem_kwargs={}, num_trials=num_trials, @@ -70,7 +70,7 @@ class BenchmarkProblemRegistryEntry: factory_kwargs={"n": 30, "num_trials": 30}, ), "griewank4": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Griewank, "test_problem_kwargs": {"dim": 4}, @@ -80,7 +80,7 @@ class BenchmarkProblemRegistryEntry: }, ), "hartmann3": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Hartmann, "test_problem_kwargs": {"dim": 3}, @@ -90,7 +90,7 @@ class BenchmarkProblemRegistryEntry: }, ), "hartmann6": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Hartmann, "test_problem_kwargs": {"dim": 6}, @@ -101,7 +101,7 @@ class BenchmarkProblemRegistryEntry: ), "hartmann30": BenchmarkProblemRegistryEntry( factory_fn=lambda n, num_trials: embed_higher_dimension( - problem=SingleObjectiveBenchmarkProblem.from_botorch_synthetic( + problem=create_single_objective_problem_from_botorch( test_problem_class=synthetic.Hartmann, test_problem_kwargs={"dim": 6}, lower_is_better=True, @@ -131,7 +131,7 @@ class BenchmarkProblemRegistryEntry: factory_kwargs={"num_trials": 50, "observe_noise_sd": False}, ), "levy4": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Levy, "test_problem_kwargs": {"dim": 4}, @@ -141,7 +141,7 @@ class BenchmarkProblemRegistryEntry: }, ), "powell4": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Powell, "test_problem_kwargs": {"dim": 4}, @@ -151,7 +151,7 @@ class BenchmarkProblemRegistryEntry: }, ), "rosenbrock4": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Rosenbrock, "test_problem_kwargs": {"dim": 4}, @@ -161,7 +161,7 @@ class BenchmarkProblemRegistryEntry: }, ), "six_hump_camel": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.SixHumpCamel, "test_problem_kwargs": {}, @@ -171,7 +171,7 @@ class BenchmarkProblemRegistryEntry: }, ), "three_hump_camel": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.ThreeHumpCamel, "test_problem_kwargs": {}, @@ -182,7 +182,7 @@ class BenchmarkProblemRegistryEntry: ), # Problems where we observe the noise level "branin_observed_noise": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Branin, "test_problem_kwargs": {}, @@ -192,7 +192,7 @@ class BenchmarkProblemRegistryEntry: }, ), "branin_currin_observed_noise": BenchmarkProblemRegistryEntry( - factory_fn=MultiObjectiveBenchmarkProblem.from_botorch_multi_objective, + factory_fn=create_multi_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": BraninCurrin, "test_problem_kwargs": {}, @@ -202,7 +202,7 @@ class BenchmarkProblemRegistryEntry: ), "branin_currin30_observed_noise": BenchmarkProblemRegistryEntry( factory_fn=lambda n, num_trials: embed_higher_dimension( - problem=MultiObjectiveBenchmarkProblem.from_botorch_multi_objective( + problem=create_multi_objective_problem_from_botorch( test_problem_class=BraninCurrin, test_problem_kwargs={}, num_trials=num_trials, @@ -213,7 +213,7 @@ class BenchmarkProblemRegistryEntry: factory_kwargs={"n": 30, "num_trials": 30}, ), "hartmann6_observed_noise": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Hartmann, "test_problem_kwargs": {"dim": 6}, @@ -224,7 +224,7 @@ class BenchmarkProblemRegistryEntry: ), "hartmann30_observed_noise": BenchmarkProblemRegistryEntry( factory_fn=lambda n, num_trials: embed_higher_dimension( - problem=SingleObjectiveBenchmarkProblem.from_botorch_synthetic( + problem=create_single_objective_problem_from_botorch( test_problem_class=synthetic.Hartmann, test_problem_kwargs={"dim": 6}, lower_is_better=True, @@ -240,7 +240,7 @@ class BenchmarkProblemRegistryEntry: factory_kwargs={"num_trials": 25, "observe_noise_sd": True}, ), "constrained_gramacy_observed_noise": BenchmarkProblemRegistryEntry( - factory_fn=SingleObjectiveBenchmarkProblem.from_botorch_synthetic, + factory_fn=create_single_objective_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.ConstrainedGramacy, "test_problem_kwargs": {}, diff --git a/ax/benchmark/tests/test_benchmark.py b/ax/benchmark/tests/test_benchmark.py index e5a184e9494..3d0ae2eeda3 100644 --- a/ax/benchmark/tests/test_benchmark.py +++ b/ax/benchmark/tests/test_benchmark.py @@ -21,7 +21,7 @@ BenchmarkMethod, get_benchmark_scheduler_options, ) -from ax.benchmark.benchmark_problem import SingleObjectiveBenchmarkProblem +from ax.benchmark.benchmark_problem import create_single_objective_problem_from_botorch from ax.benchmark.benchmark_result import BenchmarkResult from ax.benchmark.methods.modular_botorch import get_sobol_botorch_modular_acquisition from ax.benchmark.metrics.base import GroundTruthMetricMixin @@ -439,7 +439,7 @@ def test_benchmark_multiple_problems_methods(self) -> None: self.assertTrue((agg.score_trace[col] <= 100).all()) def test_timeout(self) -> None: - problem = SingleObjectiveBenchmarkProblem.from_botorch_synthetic( + problem = create_single_objective_problem_from_botorch( test_problem_class=Branin, test_problem_kwargs={}, lower_is_better=True, diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index 4df3f267202..640e5f721be 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -8,15 +8,18 @@ from typing import List, Optional, Union from ax.benchmark.benchmark_problem import ( - MultiObjectiveBenchmarkProblem, - SingleObjectiveBenchmarkProblem, + create_multi_objective_problem_from_botorch, + create_single_objective_problem_from_botorch, ) from ax.benchmark.metrics.benchmark import BenchmarkMetric from ax.benchmark.runners.botorch_test import BotorchTestProblemRunner from ax.core.types import ComparisonOp from ax.utils.common.testutils import TestCase from ax.utils.common.typeutils import checked_cast -from botorch.test_functions.multi_objective import BraninCurrin, ConstrainedBraninCurrin +from ax.utils.testing.benchmark_stubs import ( + get_constrained_multi_objective_benchmark_problem, +) +from botorch.test_functions.multi_objective import BraninCurrin from botorch.test_functions.synthetic import ( Ackley, ConstrainedGramacy, @@ -34,7 +37,7 @@ def setUp(self) -> None: def test_single_objective_from_botorch(self) -> None: for botorch_test_problem in [Ackley(), ConstrainedHartmann(dim=6)]: - test_problem = SingleObjectiveBenchmarkProblem.from_botorch_synthetic( + test_problem = create_single_objective_problem_from_botorch( test_problem_class=botorch_test_problem.__class__, test_problem_kwargs={}, lower_is_better=True, @@ -131,7 +134,7 @@ def test_constrained_from_botorch( objective_noise_std: Optional[float], constraint_noise_std: Optional[Union[float, List[float]]], ) -> None: - ax_problem = SingleObjectiveBenchmarkProblem.from_botorch_synthetic( + ax_problem = create_single_objective_problem_from_botorch( test_problem_class=ConstrainedGramacy, test_problem_kwargs={ "noise_std": objective_noise_std, @@ -167,12 +170,10 @@ def test_constrained_from_botorch( def test_moo_from_botorch(self) -> None: test_problem = BraninCurrin() - branin_currin_problem = ( - MultiObjectiveBenchmarkProblem.from_botorch_multi_objective( - test_problem_class=test_problem.__class__, - test_problem_kwargs={}, - num_trials=1, - ) + branin_currin_problem = create_multi_objective_problem_from_botorch( + test_problem_class=test_problem.__class__, + test_problem_kwargs={}, + num_trials=1, ) # Test search space @@ -209,14 +210,10 @@ def test_moo_from_botorch_constrained(self) -> None: NotImplementedError, "Constrained multi-objective problems are not supported.", ): - MultiObjectiveBenchmarkProblem.from_botorch_multi_objective( - test_problem_class=ConstrainedBraninCurrin, - test_problem_kwargs={}, - num_trials=1, - ) + get_constrained_multi_objective_benchmark_problem() def test_maximization_problem(self) -> None: - test_problem = SingleObjectiveBenchmarkProblem.from_botorch_synthetic( + test_problem = create_single_objective_problem_from_botorch( test_problem_class=Cosine8, lower_is_better=False, num_trials=1, diff --git a/ax/utils/testing/benchmark_stubs.py b/ax/utils/testing/benchmark_stubs.py index d092963c3a1..663b25dcb13 100644 --- a/ax/utils/testing/benchmark_stubs.py +++ b/ax/utils/testing/benchmark_stubs.py @@ -11,6 +11,8 @@ import numpy as np from ax.benchmark.benchmark_method import BenchmarkMethod from ax.benchmark.benchmark_problem import ( + create_multi_objective_problem_from_botorch, + create_single_objective_problem_from_botorch, MultiObjectiveBenchmarkProblem, SingleObjectiveBenchmarkProblem, ) @@ -44,7 +46,7 @@ def get_benchmark_problem() -> SingleObjectiveBenchmarkProblem: - return SingleObjectiveBenchmarkProblem.from_botorch_synthetic( + return create_single_objective_problem_from_botorch( test_problem_class=Branin, test_problem_kwargs={}, lower_is_better=True, @@ -57,7 +59,7 @@ def get_single_objective_benchmark_problem( num_trials: int = 4, test_problem_kwargs: Optional[Dict[str, Any]] = None, ) -> SingleObjectiveBenchmarkProblem: - return SingleObjectiveBenchmarkProblem.from_botorch_synthetic( + return create_single_objective_problem_from_botorch( test_problem_class=Branin, test_problem_kwargs=test_problem_kwargs or {}, lower_is_better=True, @@ -69,7 +71,7 @@ def get_single_objective_benchmark_problem( def get_multi_objective_benchmark_problem( observe_noise_sd: bool = False, num_trials: int = 4 ) -> MultiObjectiveBenchmarkProblem: - return MultiObjectiveBenchmarkProblem.from_botorch_multi_objective( + return create_multi_objective_problem_from_botorch( test_problem_class=BraninCurrin, test_problem_kwargs={}, num_trials=num_trials, @@ -80,7 +82,7 @@ def get_multi_objective_benchmark_problem( def get_constrained_multi_objective_benchmark_problem( observe_noise_sd: bool = False, num_trials: int = 4 ) -> MultiObjectiveBenchmarkProblem: - return MultiObjectiveBenchmarkProblem.from_botorch_multi_objective( + return create_multi_objective_problem_from_botorch( test_problem_class=ConstrainedBraninCurrin, test_problem_kwargs={}, num_trials=num_trials,