From 3808a39bf78212d92eb3d5edc645459a9aa1c685 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 18 Feb 2022 12:45:40 +0100 Subject: [PATCH 1/4] Fix: keyword arguments to submit --- autoPyTorch/utils/single_thread_client.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/autoPyTorch/utils/single_thread_client.py b/autoPyTorch/utils/single_thread_client.py index 9bb0fe3eb..30fd05b94 100644 --- a/autoPyTorch/utils/single_thread_client.py +++ b/autoPyTorch/utils/single_thread_client.py @@ -61,8 +61,24 @@ def submit( func: Callable, *args: List, priority: int = 0, + key: Any = None, + workers: Any = None, + resources: Any = None, + retries: Any = None, + fifo_timeout: Any = "100 ms", + allow_other_workers: Any = False, + actor: Any = False, + actors: Any = False, + pure: Any = None, **kwargs: Any, ) -> Any: + """ + Note + ---- + The keyword arguments caught in `dask.distributed.Client` need to + be specified here so they don't get passed in as ``**kwargs`` to the + ``func``. + """ return DummyFuture(func(*args, **kwargs)) def close(self) -> None: From 24c24ebfb1a6537502d9b04fb719864e2316087c Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 18 Feb 2022 13:40:34 +0100 Subject: [PATCH 2/4] Fix: Missing param for implementing AbstractTA --- autoPyTorch/evaluation/tae.py | 1 + 1 file changed, 1 insertion(+) diff --git a/autoPyTorch/evaluation/tae.py b/autoPyTorch/evaluation/tae.py index 7ca895304..df3fb60f0 100644 --- a/autoPyTorch/evaluation/tae.py +++ b/autoPyTorch/evaluation/tae.py @@ -116,6 +116,7 @@ def __init__( stats: Optional[Stats] = None, run_obj: str = 'quality', par_factor: int = 1, + multi_objectives: List[str] = None, output_y_hat_optimization: bool = True, include: Optional[Dict[str, Any]] = None, exclude: Optional[Dict[str, Any]] = None, From c44b40a31c36571363234c2eea267333436862ba Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 18 Feb 2022 14:48:48 +0100 Subject: [PATCH 3/4] Fix: Typing of multi_objectives --- autoPyTorch/evaluation/tae.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autoPyTorch/evaluation/tae.py b/autoPyTorch/evaluation/tae.py index df3fb60f0..b109dbb1a 100644 --- a/autoPyTorch/evaluation/tae.py +++ b/autoPyTorch/evaluation/tae.py @@ -111,12 +111,12 @@ def __init__( cost_for_crash: float, abort_on_first_run_crash: bool, pynisher_context: str, + multi_objectives: List[str], pipeline_config: Optional[Dict[str, Any]] = None, initial_num_run: int = 1, stats: Optional[Stats] = None, run_obj: str = 'quality', par_factor: int = 1, - multi_objectives: List[str] = None, output_y_hat_optimization: bool = True, include: Optional[Dict[str, Any]] = None, exclude: Optional[Dict[str, Any]] = None, From 39351419c7b6c3362e18d60103a1bd84dbeef0a6 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Fri, 18 Feb 2022 15:47:11 +0100 Subject: [PATCH 4/4] Add: mutli_objectives to each ExecuteTaFucnWithQueue --- autoPyTorch/api/base_task.py | 3 +++ test/test_evaluation/test_evaluation.py | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/autoPyTorch/api/base_task.py b/autoPyTorch/api/base_task.py index 80d8bd51e..905d795fd 100644 --- a/autoPyTorch/api/base_task.py +++ b/autoPyTorch/api/base_task.py @@ -690,6 +690,7 @@ def _do_dummy_prediction(self) -> None: backend=self._backend, seed=self.seed, metric=self._metric, + multi_objectives=["cost"], logger_port=self._logger_port, cost_for_crash=get_cost_of_crash(self._metric), abort_on_first_run_crash=False, @@ -773,6 +774,7 @@ def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs: pynisher_context=self._multiprocessing_context, backend=self._backend, seed=self.seed, + multi_objectives=["cost"], metric=self._metric, logger_port=self._logger_port, cost_for_crash=get_cost_of_crash(self._metric), @@ -1575,6 +1577,7 @@ def fit_pipeline( backend=self._backend, seed=self.seed, metric=metric, + multi_objectives=["cost"], logger_port=self._logger_port, cost_for_crash=get_cost_of_crash(metric), abort_on_first_run_crash=False, diff --git a/test/test_evaluation/test_evaluation.py b/test/test_evaluation/test_evaluation.py index 051a1c174..2cabb6a73 100644 --- a/test/test_evaluation/test_evaluation.py +++ b/test/test_evaluation/test_evaluation.py @@ -99,6 +99,7 @@ def test_eval_with_limits_holdout(self, pynisher_mock): config.config_id = 198 ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, + multi_objectives=["cost"], memory_limit=3072, metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), @@ -120,6 +121,7 @@ def test_cutoff_lower_than_remaining_time(self, pynisher_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -146,6 +148,7 @@ def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -166,6 +169,7 @@ def test_zero_or_negative_cutoff(self, pynisher_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -187,6 +191,7 @@ def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -228,6 +233,7 @@ def test_eval_with_limits_holdout_fail_memory_error(self, pynisher_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -266,6 +272,7 @@ def side_effect(**kwargs): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -289,6 +296,7 @@ def side_effect(**kwargs): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -316,6 +324,7 @@ def side_effect(*args, **kwargs): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -340,6 +349,7 @@ def test_exception_in_target_function(self, eval_holdout_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -363,6 +373,7 @@ def test_silent_exception_in_target_function(self): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -401,6 +412,7 @@ def test_eval_with_simple_intensification(self): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False,