Skip to content

Commit

Permalink
[temporal] [cont] Fix errors
Browse files Browse the repository at this point in the history
[test] Add the tests for the instantiation of abstract evaluator 1 -- 3
[test] Add the tests for util 1 -- 2
[test] Add the tests for train_evaluator 1 -- 2
[refactor] [test] Clean up the pipeline classes and add tests for it 1 -- 2
[test] Add the tests for tae 1 -- 4
[fix] Fix an error due to the change in extract learning curve
[experimental] Increase the coverage

[test] Add tests for pipeline repr

Since the modifications in tests removed the coverage on pipeline repr,
I added tests to increase those parts.
Basically, the decrease in the coverage happened due to the usage of
dummy pipelines.
  • Loading branch information
nabenabe0928 committed Jan 10, 2022
1 parent a50d254 commit ffb4dae
Show file tree
Hide file tree
Showing 17 changed files with 792 additions and 285 deletions.
2 changes: 1 addition & 1 deletion autoPyTorch/api/base_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -1028,7 +1028,7 @@ def _search(
DisableFileOutputParameters.y_opt in self._disable_file_output
and self.ensemble_size > 1
):
self._logger.warning(f"No ensemble will be created when {DisableFileOutputParameters.y_optimization}"
self._logger.warning(f"No ensemble will be created when {DisableFileOutputParameters.y_opt}"
f" is in disable_file_output")

self._memory_limit = memory_limit
Expand Down
23 changes: 17 additions & 6 deletions autoPyTorch/evaluation/abstract_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,10 +193,16 @@ def with_default_pipeline_config(
f'{cls.__name__}.with_default_pipeline_config() got multiple values for argument `budget_type`'
)

budget_type_choices = ('epochs', 'runtime')
if pipeline_config is None:
pipeline_config = get_default_pipeline_config(choice=choice)
if 'budget_type' not in pipeline_config:
raise ValueError('pipeline_config must have `budget_type`')

budget_type = pipeline_config['budget_type']
if pipeline_config['budget_type'] not in budget_type_choices:
raise ValueError(f"budget_type must be in {budget_type_choices}, but got {budget_type}")

kwargs.update(pipeline_config=pipeline_config, budget_type=budget_type)
return cls(**kwargs)

Expand Down Expand Up @@ -307,6 +313,9 @@ def _init_dataset_properties(self) -> None:
))

self.X_train, self.y_train = datamanager.train_tensors
self.unique_train_labels = [
list(np.unique(self.y_train[train_indices])) for train_indices, _ in self.splits
]
self.X_valid, self.y_valid, self.X_test, self.y_test = None, None, None, None
if datamanager.val_tensors is not None:
self.X_valid, self.y_valid = datamanager.val_tensors
Expand Down Expand Up @@ -377,7 +386,7 @@ def predict(
self,
X: Optional[np.ndarray],
pipeline: BaseEstimator,
label_examples: Optional[np.ndarray] = None
unique_train_labels: Optional[List[int]] = None
) -> Optional[np.ndarray]:
"""
A wrapper function to handle the prediction of regression or classification tasks.
Expand All @@ -387,7 +396,8 @@ def predict(
A set of features to feed to the pipeline
pipeline (BaseEstimator):
A model that will take the features X return a prediction y
label_examples (Optional[np.ndarray]):
unique_train_labels (Optional[List[int]]):
The unique labels included in the train split.
Returns:
(np.ndarray):
Expand All @@ -411,7 +421,7 @@ def predict(
prediction=pred,
num_classes=self.num_classes,
output_type=self.output_type,
label_examples=label_examples
unique_train_labels=unique_train_labels
)

return pred
Expand All @@ -435,6 +445,10 @@ def _get_pipeline(self) -> BaseEstimator:
A scikit-learn compliant pipeline which is not yet fit to the data.
"""
config = self.evaluator_params.configuration
if not isinstance(config, (int, str, Configuration)):
raise TypeError("The type of configuration must be either (int, str, Configuration), "
f"but got type {type(config)}")

kwargs = dict(
config=config,
random_state=np.random.RandomState(self.fixed_pipeline_params.seed),
Expand All @@ -452,9 +466,6 @@ def _get_pipeline(self) -> BaseEstimator:
exclude=self.fixed_pipeline_params.exclude,
search_space_updates=self.fixed_pipeline_params.search_space_updates,
**kwargs)
else:
raise ValueError("The type of configuration must be either (int, str, Configuration), "
f"but got type {type(config)}")

def _loss(self, labels: np.ndarray, preds: np.ndarray) -> Dict[str, float]:
"""SMAC follows a minimization goal, so the make_scorer
Expand Down
Loading

0 comments on commit ffb4dae

Please sign in to comment.