Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix: keyword arguments to submit #384

Merged
merged 4 commits into from
Feb 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions autoPyTorch/api/base_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -690,6 +690,7 @@ def _do_dummy_prediction(self) -> None:
backend=self._backend,
seed=self.seed,
metric=self._metric,
multi_objectives=["cost"],
logger_port=self._logger_port,
cost_for_crash=get_cost_of_crash(self._metric),
abort_on_first_run_crash=False,
Expand Down Expand Up @@ -773,6 +774,7 @@ def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs:
pynisher_context=self._multiprocessing_context,
backend=self._backend,
seed=self.seed,
multi_objectives=["cost"],
metric=self._metric,
logger_port=self._logger_port,
cost_for_crash=get_cost_of_crash(self._metric),
Expand Down Expand Up @@ -1575,6 +1577,7 @@ def fit_pipeline(
backend=self._backend,
seed=self.seed,
metric=metric,
multi_objectives=["cost"],
logger_port=self._logger_port,
cost_for_crash=get_cost_of_crash(metric),
abort_on_first_run_crash=False,
Expand Down
1 change: 1 addition & 0 deletions autoPyTorch/evaluation/tae.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ def __init__(
cost_for_crash: float,
abort_on_first_run_crash: bool,
pynisher_context: str,
multi_objectives: List[str],
pipeline_config: Optional[Dict[str, Any]] = None,
initial_num_run: int = 1,
stats: Optional[Stats] = None,
Expand Down
16 changes: 16 additions & 0 deletions autoPyTorch/utils/single_thread_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,24 @@ def submit(
func: Callable,
*args: List,
priority: int = 0,
key: Any = None,
workers: Any = None,
resources: Any = None,
retries: Any = None,
fifo_timeout: Any = "100 ms",
allow_other_workers: Any = False,
actor: Any = False,
actors: Any = False,
pure: Any = None,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we also need to add multi_objectives to this list as the github actions raise E TypeError: __init__() got an unexpected keyword argument 'multi_objectives'

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's unfortunately a seperate problem -_-

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, thanks for this fix. Lets merge this when we have resolved this issue as well. Btw, let me know if you need help with this error.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It likely effects autosklearn as well so I'll update this PR when I find it

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

okay thanks

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Think I fixed it, there are now timeouts occurring though which seem to be another seperate issue.

**kwargs: Any,
) -> Any:
"""
Note
----
The keyword arguments caught in `dask.distributed.Client` need to
be specified here so they don't get passed in as ``**kwargs`` to the
``func``.
"""
return DummyFuture(func(*args, **kwargs))

def close(self) -> None:
Expand Down
12 changes: 12 additions & 0 deletions test/test_evaluation/test_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ def test_eval_with_limits_holdout(self, pynisher_mock):
config.config_id = 198
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
multi_objectives=["cost"],
memory_limit=3072,
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
Expand All @@ -120,6 +121,7 @@ def test_cutoff_lower_than_remaining_time(self, pynisher_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -146,6 +148,7 @@ def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -166,6 +169,7 @@ def test_zero_or_negative_cutoff(self, pynisher_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -187,6 +191,7 @@ def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand Down Expand Up @@ -228,6 +233,7 @@ def test_eval_with_limits_holdout_fail_memory_error(self, pynisher_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand Down Expand Up @@ -266,6 +272,7 @@ def side_effect(**kwargs):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -289,6 +296,7 @@ def side_effect(**kwargs):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand Down Expand Up @@ -316,6 +324,7 @@ def side_effect(*args, **kwargs):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -340,6 +349,7 @@ def test_exception_in_target_function(self, eval_holdout_mock):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand All @@ -363,6 +373,7 @@ def test_silent_exception_in_target_function(self):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand Down Expand Up @@ -401,6 +412,7 @@ def test_eval_with_simple_intensification(self):
ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1,
stats=self.stats,
memory_limit=3072,
multi_objectives=["cost"],
metric=accuracy,
cost_for_crash=get_cost_of_crash(accuracy),
abort_on_first_run_crash=False,
Expand Down