Skip to content

Commit

Permalink
Remove BenchmarkProblemWithKnownOptimum type annotation (#2602)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #2602

These are no longer necessary. See previous PR for context. This change reaps them and updates type annotations.

Reviewed By: saitcakmak

Differential Revision: D60146081
  • Loading branch information
esantorella authored and facebook-github-bot committed Jul 25, 2024
1 parent cb8cde1 commit a5b4a64
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 17 deletions.
9 changes: 2 additions & 7 deletions ax/benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,7 @@
import numpy as np

from ax.benchmark.benchmark_method import BenchmarkMethod
from ax.benchmark.benchmark_problem import (
BenchmarkProblemProtocol,
BenchmarkProblemWithKnownOptimum,
)
from ax.benchmark.benchmark_problem import BenchmarkProblemProtocol
from ax.benchmark.benchmark_result import AggregatedBenchmarkResult, BenchmarkResult
from ax.benchmark.metrics.base import BenchmarkMetricBase, GroundTruthMetricMixin
from ax.core.experiment import Experiment
Expand Down Expand Up @@ -60,9 +57,7 @@ def compute_score_trace(
# Use the first GenerationStep's best found point as baseline. Sometimes (ex. in
# a timeout) the first GenerationStep will not have not completed and we will not
# have enough trials; in this case we do not score.
if (len(optimization_trace) <= num_baseline_trials) or not isinstance(
problem, BenchmarkProblemWithKnownOptimum
):
if len(optimization_trace) <= num_baseline_trials:
return np.full(len(optimization_trace), np.nan)
optimum = problem.optimal_value
baseline = optimization_trace[num_baseline_trials - 1]
Expand Down
13 changes: 3 additions & 10 deletions ax/benchmark/benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
# `BenchmarkProblem` as return type annotation, used for serialization and rendering
# in the UI.

import abc
from dataclasses import dataclass, field
from typing import (
Any,
Expand Down Expand Up @@ -92,14 +91,7 @@ class BenchmarkProblemProtocol(Protocol):
bool, Dict[str, bool]
] # Whether we observe the observation noise level
has_ground_truth: bool # if True, evals (w/o synthetic noise) are determinstic

@abc.abstractproperty
def runner(self) -> Runner:
pass # pragma: no cover


@runtime_checkable
class BenchmarkProblemWithKnownOptimum(Protocol):
runner: Runner
optimal_value: float


Expand All @@ -109,7 +101,8 @@ class BenchmarkProblem(Base):
Problem against which diffrent methods can be benchmarked.
Defines how data is generated, the objective (via the OptimizationConfig),
and the SearchSpace.
and the SearchSpace. Does not define the runner, which must be handled by
subclasses.
Args:
name: Can be generated programmatically with `_get_name`.
Expand Down

0 comments on commit a5b4a64

Please sign in to comment.