Skip to content

Commit

Permalink
style: apply automated linter fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
megalinter-bot committed Aug 19, 2024
1 parent 7b66cdd commit 2bd526e
Show file tree
Hide file tree
Showing 4 changed files with 85 additions and 30 deletions.
3 changes: 2 additions & 1 deletion src/safeds/ml/metrics/_classification_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ def summarize(

@staticmethod
def accuracy(
predicted: Column | TabularDataset | TimeSeriesDataset, expected: Column | TabularDataset | TimeSeriesDataset,
predicted: Column | TabularDataset | TimeSeriesDataset,
expected: Column | TabularDataset | TimeSeriesDataset,
) -> float:
"""
Compute the accuracy on the given data.
Expand Down
30 changes: 20 additions & 10 deletions src/safeds/ml/metrics/_regression_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ def __init__(self) -> None: ...

@staticmethod
def summarize(
predicted: Column | TabularDataset | TimeSeriesDataset, expected: Column | TabularDataset | TimeSeriesDataset,
predicted: Column | TabularDataset | TimeSeriesDataset,
expected: Column | TabularDataset | TimeSeriesDataset,
) -> Table:
"""
Summarize regression metrics on the given data.
Expand Down Expand Up @@ -60,7 +61,8 @@ def summarize(

@staticmethod
def coefficient_of_determination(
predicted: Column | TabularDataset | TimeSeriesDataset, expected: Column | TabularDataset | TimeSeriesDataset,
predicted: Column | TabularDataset | TimeSeriesDataset,
expected: Column | TabularDataset | TimeSeriesDataset,
) -> float:
"""
Compute the coefficient of determination (R²) on the given data.
Expand Down Expand Up @@ -105,7 +107,8 @@ def coefficient_of_determination(
predicted_row_as_col: Column = Column("predicted", predicted[i])
expected_row_as_col = expected.get_value(i)
sum_of_coefficient_of_determination += RegressionMetrics.coefficient_of_determination(
predicted_row_as_col, expected_row_as_col,
predicted_row_as_col,
expected_row_as_col,
)
return sum_of_coefficient_of_determination / expected.row_count

Expand All @@ -122,7 +125,8 @@ def coefficient_of_determination(

@staticmethod
def mean_absolute_error(
predicted: Column | TabularDataset | TimeSeriesDataset, expected: Column | TabularDataset | TimeSeriesDataset,
predicted: Column | TabularDataset | TimeSeriesDataset,
expected: Column | TabularDataset | TimeSeriesDataset,
) -> float:
"""
Compute the mean absolute error (MAE) on the given data.
Expand Down Expand Up @@ -159,15 +163,17 @@ def mean_absolute_error(
predicted_row_as_col: Column = Column("predicted", predicted[i])
expected_row_as_col = expected.get_value(i)
sum_of_mean_absolute_errors += RegressionMetrics.mean_absolute_error(
predicted_row_as_col, expected_row_as_col,
predicted_row_as_col,
expected_row_as_col,
)
return sum_of_mean_absolute_errors / expected.row_count

return (expected._series - predicted._series).abs().mean()

@staticmethod
def mean_directional_accuracy(
predicted: Column | TabularDataset | TimeSeriesDataset, expected: Column | TabularDataset | TimeSeriesDataset,
predicted: Column | TabularDataset | TimeSeriesDataset,
expected: Column | TabularDataset | TimeSeriesDataset,
) -> float:
"""
Compute the mean directional accuracy (MDA) on the given data.
Expand Down Expand Up @@ -207,7 +213,8 @@ def mean_directional_accuracy(

@staticmethod
def mean_squared_error(
predicted: Column | TabularDataset | TimeSeriesDataset, expected: Column | TabularDataset | TimeSeriesDataset,
predicted: Column | TabularDataset | TimeSeriesDataset,
expected: Column | TabularDataset | TimeSeriesDataset,
) -> float:
"""
Compute the mean squared error (MSE) on the given data.
Expand Down Expand Up @@ -246,15 +253,17 @@ def mean_squared_error(
predicted_row_as_col: Column = Column("predicted", predicted[i])
expected_row_as_col = expected.get_value(i)
sum_of_mean_squared_errors += RegressionMetrics.mean_squared_error(
predicted_row_as_col, expected_row_as_col,
predicted_row_as_col,
expected_row_as_col,
)
return sum_of_mean_squared_errors / expected.row_count

return (expected._series - predicted._series).pow(2).mean()

@staticmethod
def median_absolute_deviation(
predicted: Column | TabularDataset | TimeSeriesDataset, expected: Column | TabularDataset | TimeSeriesDataset,
predicted: Column | TabularDataset | TimeSeriesDataset,
expected: Column | TabularDataset | TimeSeriesDataset,
) -> float:
"""
Compute the median absolute deviation (MAD) on the given data.
Expand Down Expand Up @@ -291,7 +300,8 @@ def median_absolute_deviation(
predicted_row_as_col: Column = Column("predicted", predicted[i])
expected_row_as_col = expected.get_value(i)
sum_of_median_absolute_deviation += RegressionMetrics.median_absolute_deviation(
predicted_row_as_col, expected_row_as_col,
predicted_row_as_col,
expected_row_as_col,
)
return sum_of_median_absolute_deviation / expected.row_count

Expand Down
59 changes: 45 additions & 14 deletions src/safeds/ml/nn/_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,11 @@ def fit_by_exhaustive_search(
for model in list_of_models:
futures.append(
executor.submit(
model.fit, train_set, epoch_size, batch_size, learning_rate, # type: ignore[arg-type]
model.fit,
train_set,
epoch_size,
batch_size,
learning_rate, # type: ignore[arg-type]
),
) # type: ignore[arg-type]
[done, _] = wait(futures, return_when=ALL_COMPLETED)
Expand All @@ -347,7 +351,10 @@ def fit_by_exhaustive_search(
return self._get_best_fnn_model(list_of_fitted_models, test_set, optimization_metric)
else: # train_data is TimeSeriesDataset
return self._get_best_rnn_model(
list_of_fitted_models, train_set, test_set, optimization_metric, # type: ignore[arg-type]
list_of_fitted_models,
train_set,
test_set,
optimization_metric, # type: ignore[arg-type]
) # type: ignore[arg-type]

def _data_split_table(self, data: TabularDataset) -> tuple[TabularDataset, TabularDataset]:
Expand Down Expand Up @@ -949,7 +956,11 @@ def fit_by_exhaustive_search(
for model in list_of_models:
futures.append(
executor.submit(
model.fit, train_set, epoch_size, batch_size, learning_rate, # type: ignore[arg-type]
model.fit,
train_set,
epoch_size,
batch_size,
learning_rate, # type: ignore[arg-type]
),
) # type: ignore[arg-type]
[done, _] = wait(futures, return_when=ALL_COMPLETED)
Expand All @@ -963,17 +974,23 @@ def fit_by_exhaustive_search(
return self._get_best_rnn_model(
list_of_fitted_models,
train_set, # type: ignore[arg-type]
test_set, # type: ignore[arg-type]
test_set, # type: ignore[arg-type]
optimization_metric, # type: ignore[arg-type]
positive_class,
) # type: ignore[arg-type]
elif isinstance(self._input_conversion, InputConversionImageToColumn):
return self._get_best_cnn_model_column(
list_of_fitted_models, train_set, optimization_metric, positive_class, # type: ignore[arg-type]
list_of_fitted_models,
train_set,
optimization_metric,
positive_class, # type: ignore[arg-type]
) # type: ignore[arg-type]
else: # ImageToTable
return self._get_best_cnn_model_table(
list_of_fitted_models, train_set, optimization_metric, positive_class, # type: ignore[arg-type]
list_of_fitted_models,
train_set,
optimization_metric,
positive_class, # type: ignore[arg-type]
) # type: ignore[arg-type]

def _data_split_table(self, data: TabularDataset) -> tuple[TabularDataset, TabularDataset]:
Expand Down Expand Up @@ -1194,15 +1211,21 @@ def _get_best_cnn_model_column(
best_metric_value = ClassificationMetrics.accuracy(predicted=prediction, expected=expected)
case "precision":
best_metric_value = ClassificationMetrics.precision(
predicted=prediction, expected=expected, positive_class=positive_class,
predicted=prediction,
expected=expected,
positive_class=positive_class,
)
case "recall":
best_metric_value = ClassificationMetrics.recall(
predicted=prediction, expected=expected, positive_class=positive_class,
predicted=prediction,
expected=expected,
positive_class=positive_class,
)
case "f1_score":
best_metric_value = ClassificationMetrics.f1_score(
predicted=prediction, expected=expected, positive_class=positive_class,
predicted=prediction,
expected=expected,
positive_class=positive_class,
)
else:
match optimization_metric:
Expand All @@ -1213,21 +1236,27 @@ def _get_best_cnn_model_column(
best_metric_value = error_of_fitted_model # pragma: no cover
case "precision":
error_of_fitted_model = ClassificationMetrics.precision(
predicted=prediction, expected=expected, positive_class=positive_class,
predicted=prediction,
expected=expected,
positive_class=positive_class,
)
if error_of_fitted_model > best_metric_value:
best_model = fitted_model # pragma: no cover
best_metric_value = error_of_fitted_model # pragma: no cover
case "recall":
error_of_fitted_model = ClassificationMetrics.recall(
predicted=prediction, expected=expected, positive_class=positive_class,
predicted=prediction,
expected=expected,
positive_class=positive_class,
)
if error_of_fitted_model > best_metric_value:
best_model = fitted_model # pragma: no cover
best_metric_value = error_of_fitted_model # pragma: no cover
case "f1_score":
error_of_fitted_model = ClassificationMetrics.f1_score(
predicted=prediction, expected=expected, positive_class=positive_class,
predicted=prediction,
expected=expected,
positive_class=positive_class,
)
if error_of_fitted_model > best_metric_value:
best_model = fitted_model # pragma: no cover
Expand Down Expand Up @@ -1266,7 +1295,8 @@ def _get_best_cnn_model_table(
match optimization_metric:
case "accuracy":
best_metric_value = ClassificationMetrics.accuracy(
predicted=prediction, expected=expected, # type: ignore[arg-type]
predicted=prediction,
expected=expected, # type: ignore[arg-type]
) # type: ignore[arg-type]
case "precision":
best_metric_value = ClassificationMetrics.precision(
Expand All @@ -1290,7 +1320,8 @@ def _get_best_cnn_model_table(
match optimization_metric:
case "accuracy":
error_of_fitted_model = ClassificationMetrics.accuracy(
predicted=prediction, expected=expected, # type: ignore[arg-type]
predicted=prediction,
expected=expected, # type: ignore[arg-type]
) # type: ignore[arg-type]
if error_of_fitted_model > best_metric_value:
best_model = fitted_model # pragma: no cover
Expand Down
23 changes: 18 additions & 5 deletions tests/safeds/ml/nn/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,9 @@ def test_should_catch_invalid_fit_data(self, device: Device, table: TabularDatas
def test_should_raise_when_time_series_classification_with_continuous_data(self, device: Device) -> None:
configure_test_with_device(device)
data = Table.from_dict({"a": [1, 2, 3], "b": [1, 2, 3], "c": [0, 1, 0]}).to_time_series_dataset(
"c", 1, continuous=True,
"c",
1,
continuous=True,
)
model = NeuralNetworkClassifier(
InputConversionTimeSeries(),
Expand Down Expand Up @@ -331,7 +333,9 @@ def test_should_raise_when_fitting_by_exhaustive_search_without_choice(self, dev
def test_should_raise_when_time_series_classification_with_continuous_data(self, device: Device) -> None:
configure_test_with_device(device)
data = Table.from_dict({"a": [1, 2, 3], "b": [1, 2, 3], "c": [0, 1, 0]}).to_time_series_dataset(
"c", 1, continuous=True,
"c",
1,
continuous=True,
)
model = NeuralNetworkClassifier(
InputConversionTimeSeries(),
Expand Down Expand Up @@ -432,7 +436,10 @@ def test_should_assert_that_is_fitted_is_set_correctly_and_check_return_type_for
assert not model.is_fitted

fitted_model = model.fit_by_exhaustive_search(
train_table, optimization_metric=metric, positive_class=positive_class, epoch_size=2,
train_table,
optimization_metric=metric,
positive_class=positive_class,
epoch_size=2,
)

assert fitted_model.is_fitted
Expand Down Expand Up @@ -493,7 +500,10 @@ def test_should_assert_that_is_fitted_is_set_correctly_and_check_return_type_for
)
assert not model.is_fitted
fitted_model = model.fit_by_exhaustive_search(
image_dataset, epoch_size=2, optimization_metric=metric, positive_class=positive_class,
image_dataset,
epoch_size=2,
optimization_metric=metric,
positive_class=positive_class,
)

assert fitted_model.is_fitted
Expand Down Expand Up @@ -554,7 +564,10 @@ def test_should_assert_that_is_fitted_is_set_correctly_and_check_return_type_for
)
assert not model.is_fitted
fitted_model = model.fit_by_exhaustive_search(
image_dataset, epoch_size=2, optimization_metric=metric, positive_class=positive_class,
image_dataset,
epoch_size=2,
optimization_metric=metric,
positive_class=positive_class,
)

assert fitted_model.is_fitted
Expand Down

0 comments on commit 2bd526e

Please sign in to comment.