Skip to content

Commit

Permalink
Fix: keyword arguments to submit (#384)
Browse files Browse the repository at this point in the history
* Fix: keyword arguments to submit

* Fix: Missing param for implementing AbstractTA

* Fix: Typing of multi_objectives

* Add: mutli_objectives to each ExecuteTaFucnWithQueue
  • Loading branch information
eddiebergman authored Feb 18, 2022
1 parent bf264d6 commit b5c1757
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 0 deletions.
3 changes: 3 additions & 0 deletions autoPyTorch/api/base_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -690,6 +690,7 @@ def _do_dummy_prediction(self) -> None:
backend=self._backend,
seed=self.seed,
metric=self._metric,
multi_objectives=["cost"],
logger_port=self._logger_port,
cost_for_crash=get_cost_of_crash(self._metric),
abort_on_first_run_crash=False,
Expand Down Expand Up @@ -773,6 +774,7 @@ def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs:
pynisher_context=self._multiprocessing_context,
backend=self._backend,
seed=self.seed,
multi_objectives=["cost"],
metric=self._metric,
logger_port=self._logger_port,
cost_for_crash=get_cost_of_crash(self._metric),
Expand Down Expand Up @@ -1575,6 +1577,7 @@ def fit_pipeline(
backend=self._backend,
seed=self.seed,
metric=metric,
multi_objectives=["cost"],
logger_port=self._logger_port,
cost_for_crash=get_cost_of_crash(metric),
abort_on_first_run_crash=False,
Expand Down
1 change: 1 addition & 0 deletions autoPyTorch/evaluation/tae.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ def __init__(
cost_for_crash: float,
abort_on_first_run_crash: bool,
pynisher_context: str,
multi_objectives: List[str],
pipeline_config: Optional[Dict[str, Any]] = None,
initial_num_run: int = 1,
stats: Optional[Stats] = None,
Expand Down
16 changes: 16 additions & 0 deletions autoPyTorch/utils/single_thread_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,24 @@ def submit(
func: Callable,
*args: List,
priority: int = 0,
key: Any = None,
workers: Any = None,
resources: Any = None,
retries: Any = None,
fifo_timeout: Any = "100 ms",
allow_other_workers: Any = False,
actor: Any = False,
actors: Any = False,
pure: Any = None,
**kwargs: Any,
) -> Any:
"""
Note
----
The keyword arguments caught in `dask.distributed.Client` need to
be specified here so they don't get passed in as ``**kwargs`` to the
``func``.
"""
return DummyFuture(func(*args, **kwargs))

def close(self) -> None:
Expand Down
12 changes: 12 additions & 0 deletions test/test_evaluation/test_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ def test_eval_with_limits_holdout(self, pynisher_mock):
config.config_id = 198
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
multi_objectives=["cost"],
memory_limit=3072,
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
Expand All @@ -120,6 +121,7 @@ def test_cutoff_lower_than_remaining_time(self, pynisher_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -146,6 +148,7 @@ def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -166,6 +169,7 @@ def test_zero_or_negative_cutoff(self, pynisher_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -187,6 +191,7 @@ def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand Down Expand Up @@ -228,6 +233,7 @@ def test_eval_with_limits_holdout_fail_memory_error(self, pynisher_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand Down Expand Up @@ -266,6 +272,7 @@ def side_effect(**kwargs):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -289,6 +296,7 @@ def side_effect(**kwargs):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand Down Expand Up @@ -316,6 +324,7 @@ def side_effect(*args, **kwargs):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -340,6 +349,7 @@ def test_exception_in_target_function(self, eval_holdout_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -363,6 +373,7 @@ def test_silent_exception_in_target_function(self):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand Down Expand Up @@ -401,6 +412,7 @@ def test_eval_with_simple_intensification(self):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand Down

0 comments on commit b5c1757

Please sign in to comment.