Skip to content

Commit

Permalink
Fix: Implement hotfix from #1407, aimed at master (#1408)
Browse files Browse the repository at this point in the history
* Fix: Implement hotfix from #1407, aimed at master

* Add: Argument to ExecuteTaFuncWithQueue

* Add: multi_objectives arg to tests

* Fix: Two more locations of `multi_objectives`
  • Loading branch information
eddiebergman authored Feb 21, 2022
1 parent 45d3ff8 commit 00b8e6e
Show file tree
Hide file tree
Showing 6 changed files with 53 additions and 3 deletions.
2 changes: 1 addition & 1 deletion autosklearn/__version__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
"""Version information."""

# The following line *must* be the last in the module, exactly as formatted:
__version__ = "0.14.3"
__version__ = "0.14.6"
2 changes: 2 additions & 0 deletions autosklearn/automl.py
Original file line number Diff line number Diff line change
Expand Up @@ -487,6 +487,7 @@ def _do_dummy_prediction(self, datamanager: XYDataManager, num_run: int) -> int:
ta = ExecuteTaFuncWithQueue(
backend=self._backend,
autosklearn_seed=self._seed,
multi_objectives=["cost"],
resampling_strategy=self._resampling_strategy,
initial_num_run=num_run,
stats=stats,
Expand Down Expand Up @@ -1347,6 +1348,7 @@ def fit_pipeline(
backend=self._backend,
autosklearn_seed=self._seed,
abort_on_first_run_crash=False,
multi_objectives=["cost"],
cost_for_crash=get_cost_of_crash(kwargs["metric"]),
port=self._logger_port,
**kwargs,
Expand Down
2 changes: 1 addition & 1 deletion autosklearn/evaluation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ def __init__(
abort_on_first_run_crash: bool,
port: int,
pynisher_context: str,
multi_objectives: List[str],
initial_num_run: int = 1,
stats: Optional[Stats] = None,
run_obj: str = "quality",
Expand All @@ -146,7 +147,6 @@ def __init__(
ta: Optional[Callable] = None,
**resampling_strategy_args: Any,
):

if resampling_strategy == "holdout":
eval_function = autosklearn.evaluation.train_evaluator.eval_holdout
elif resampling_strategy == "holdout-iterative-fit":
Expand Down
19 changes: 18 additions & 1 deletion autosklearn/util/single_thread_client.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import typing
from typing import Any

from pathlib import Path

Expand Down Expand Up @@ -67,8 +68,24 @@ def submit(
func: typing.Callable,
*args: typing.List,
priority: int = 0,
**kwargs: typing.Dict,
key: Any = None,
workers: Any = None,
resources: Any = None,
retries: Any = None,
fifo_timeout: Any = "100 ms",
allow_other_workers: Any = False,
actor: Any = False,
actors: Any = False,
pure: Any = None,
**kwargs: Any,
) -> typing.Any:
"""
Note
----
The keyword arguments caught in `dask.distributed.Client` need to
be specified here so they don't get passed in as ``**kwargs`` to the
``func``.
"""
return DummyFuture(func(*args, **kwargs))

def close(self) -> None:
Expand Down
20 changes: 20 additions & 0 deletions doc/releases.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,26 @@
Releases
========

Version 0.14.6
==============

* HOTFIX #1407: Catches keyword arguments in `SingleThreadedClient` so they don't get passed to it's executing `func`.

Contributors v0.14.6
********************
* Eddie Bergman


Version 0.14.5
==============

* HOTFIX: Release PyPi package with ``automl_common`` included

Contributors v0.14.5
********************
* Eddie Bergman


Version 0.14.3
==============

Expand Down
11 changes: 11 additions & 0 deletions test/test_evaluation/test_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ def test_eval_with_limits_holdout(self, pynisher_mock):
autosklearn_seed=1,
port=self.logger_port,
resampling_strategy="holdout",
multi_objectives=["cost"],
stats=self.stats,
memory_limit=3072,
metric=accuracy,
Expand Down Expand Up @@ -121,6 +122,7 @@ def test_zero_or_negative_cutoff(self, pynisher_mock):
backend=self.backend,
autosklearn_seed=1,
port=self.logger_port,
multi_objectives=["cost"],
resampling_strategy="holdout",
stats=self.stats,
metric=accuracy,
Expand Down Expand Up @@ -150,6 +152,7 @@ def test_cutoff_lower_than_remaining_time(self, pynisher_mock):
backend=self.backend,
autosklearn_seed=1,
port=self.logger_port,
multi_objectives=["cost"],
resampling_strategy="holdout",
stats=self.stats,
metric=accuracy,
Expand Down Expand Up @@ -181,6 +184,7 @@ def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
backend=self.backend,
autosklearn_seed=1,
port=self.logger_port,
multi_objectives=["cost"],
resampling_strategy="holdout",
stats=self.stats,
memory_limit=3072,
Expand Down Expand Up @@ -251,6 +255,7 @@ def test_eval_with_limits_holdout_fail_memory_error(self, pynisher_mock):
backend=self.backend,
autosklearn_seed=1,
port=self.logger_port,
multi_objectives=["cost"],
resampling_strategy="holdout",
stats=self.stats,
memory_limit=3072,
Expand Down Expand Up @@ -292,6 +297,7 @@ def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock):
backend=self.backend,
autosklearn_seed=1,
port=self.logger_port,
multi_objectives=["cost"],
resampling_strategy="holdout",
stats=self.stats,
memory_limit=3072,
Expand Down Expand Up @@ -341,6 +347,7 @@ def side_effect(**kwargs):
backend=self.backend,
autosklearn_seed=1,
port=self.logger_port,
multi_objectives=["cost"],
resampling_strategy="holdout",
stats=self.stats,
memory_limit=3072,
Expand Down Expand Up @@ -376,6 +383,7 @@ def side_effect(**kwargs):
backend=self.backend,
autosklearn_seed=1,
port=self.logger_port,
multi_objectives=["cost"],
resampling_strategy="holdout",
stats=self.stats,
memory_limit=3072,
Expand Down Expand Up @@ -419,6 +427,7 @@ def side_effect(*args, **kwargs):
backend=self.backend,
autosklearn_seed=1,
port=self.logger_port,
multi_objectives=["cost"],
resampling_strategy="holdout",
stats=self.stats,
memory_limit=3072,
Expand Down Expand Up @@ -454,6 +463,7 @@ def test_exception_in_target_function(self, eval_holdout_mock):
backend=self.backend,
autosklearn_seed=1,
port=self.logger_port,
multi_objectives=["cost"],
resampling_strategy="holdout",
stats=self.stats,
memory_limit=3072,
Expand Down Expand Up @@ -490,6 +500,7 @@ def test_silent_exception_in_target_function(self):
port=self.logger_port,
autosklearn_seed=1,
resampling_strategy="holdout",
multi_objectives=["cost"],
stats=self.stats,
memory_limit=3072,
metric=accuracy,
Expand Down

0 comments on commit 00b8e6e

Please sign in to comment.