Skip to content

Commit

Permalink
feat: move NN converters and layers to separate packages (#759)
Browse files Browse the repository at this point in the history
### Summary of Changes

NN converters and layers are now in separate packages. We can later
re-export them from the `safeds.ml.nn` package based on user feedback.
  • Loading branch information
lars-reimann authored May 13, 2024
1 parent 9e40b65 commit c6a4073
Show file tree
Hide file tree
Showing 35 changed files with 237 additions and 175 deletions.
56 changes: 1 addition & 55 deletions src/safeds/ml/nn/__init__.py
Original file line number Diff line number Diff line change
@@ -1,75 +1,21 @@
"""Classes for classification tasks."""
"""Neural networks for various tasks."""

from typing import TYPE_CHECKING

import apipkg

if TYPE_CHECKING:
from ._convolutional2d_layer import Convolutional2DLayer, ConvolutionalTranspose2DLayer
from ._flatten_layer import FlattenLayer
from ._forward_layer import ForwardLayer
from ._input_conversion import InputConversion
from ._input_conversion_image import InputConversionImage
from ._input_conversion_table import InputConversionTable
from ._input_conversion_time_series import InputConversionTimeSeries
from ._layer import Layer
from ._lstm_layer import LSTMLayer
from ._model import NeuralNetworkClassifier, NeuralNetworkRegressor
from ._output_conversion import OutputConversion
from ._output_conversion_image import (
OutputConversionImageToColumn,
OutputConversionImageToImage,
OutputConversionImageToTable,
)
from ._output_conversion_table import OutputConversionTable
from ._output_conversion_time_series import OutputConversionTimeSeries
from ._pooling2d_layer import AvgPooling2DLayer, MaxPooling2DLayer

apipkg.initpkg(
__name__,
{
"AvgPooling2DLayer": "._pooling2d_layer:AvgPooling2DLayer",
"Convolutional2DLayer": "._convolutional2d_layer:Convolutional2DLayer",
"ConvolutionalTranspose2DLayer": "._convolutional2d_layer:ConvolutionalTranspose2DLayer",
"FlattenLayer": "._flatten_layer:FlattenLayer",
"ForwardLayer": "._forward_layer:ForwardLayer",
"InputConversion": "._input_conversion:InputConversion",
"InputConversionImage": "._input_conversion_image:InputConversionImage",
"InputConversionTable": "._input_conversion_table:InputConversionTable",
"Layer": "._layer:Layer",
"OutputConversion": "._output_conversion:OutputConversion",
"InputConversionTimeSeries": "._input_conversion_time_series:InputConversionTimeSeries",
"LSTMLayer": "._lstm_layer:LSTMLayer",
"OutputConversionTable": "._output_conversion_table:OutputConversionTable",
"OutputConversionTimeSeries": "._output_conversion_time_series:OutputConversionTimeSeries",
"MaxPooling2DLayer": "._pooling2d_layer:MaxPooling2DLayer",
"NeuralNetworkClassifier": "._model:NeuralNetworkClassifier",
"NeuralNetworkRegressor": "._model:NeuralNetworkRegressor",
"OutputConversionImageToColumn": "._output_conversion_image:OutputConversionImageToColumn",
"OutputConversionImageToImage": "._output_conversion_image:OutputConversionImageToImage",
"OutputConversionImageToTable": "._output_conversion_image:OutputConversionImageToTable",
},
)

__all__ = [
"AvgPooling2DLayer",
"Convolutional2DLayer",
"ConvolutionalTranspose2DLayer",
"FlattenLayer",
"ForwardLayer",
"InputConversion",
"InputConversionImage",
"InputConversionTable",
"Layer",
"MaxPooling2DLayer",
"OutputConversion",
"InputConversionTimeSeries",
"LSTMLayer",
"OutputConversionTable",
"OutputConversionTimeSeries",
"NeuralNetworkClassifier",
"NeuralNetworkRegressor",
"OutputConversionImageToColumn",
"OutputConversionImageToImage",
"OutputConversionImageToTable",
]
17 changes: 10 additions & 7 deletions src/safeds/ml/nn/_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,25 +14,28 @@
InvalidModelStructureError,
ModelNotFittedError,
)
from safeds.ml.nn import (
Convolutional2DLayer,
FlattenLayer,
ForwardLayer,
from safeds.ml.nn.converters import (
InputConversionImage,
OutputConversionImageToColumn,
OutputConversionImageToImage,
OutputConversionImageToTable,
)
from safeds.ml.nn._output_conversion_image import _OutputConversionImage
from safeds.ml.nn._pooling2d_layer import _Pooling2DLayer
from safeds.ml.nn.converters._output_conversion_image import _OutputConversionImage
from safeds.ml.nn.layers import (
Convolutional2DLayer,
FlattenLayer,
ForwardLayer,
)
from safeds.ml.nn.layers._pooling2d_layer import _Pooling2DLayer

if TYPE_CHECKING:
from collections.abc import Callable

from torch import Tensor, nn

from safeds.data.image.typing import ImageSize
from safeds.ml.nn import InputConversion, Layer, OutputConversion
from safeds.ml.nn.converters import InputConversion, OutputConversion
from safeds.ml.nn.layers import Layer


IFT = TypeVar("IFT", TabularDataset, TimeSeriesDataset, ImageDataset) # InputFitType
Expand Down
48 changes: 48 additions & 0 deletions src/safeds/ml/nn/converters/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
"""Converters between our data contains and tensors."""

from typing import TYPE_CHECKING

import apipkg

if TYPE_CHECKING:
from ._input_conversion import InputConversion
from ._input_conversion_image import InputConversionImage
from ._input_conversion_table import InputConversionTable
from ._input_conversion_time_series import InputConversionTimeSeries
from ._output_conversion import OutputConversion
from ._output_conversion_image import (
OutputConversionImageToColumn,
OutputConversionImageToImage,
OutputConversionImageToTable,
)
from ._output_conversion_table import OutputConversionTable
from ._output_conversion_time_series import OutputConversionTimeSeries

apipkg.initpkg(
__name__,
{
"InputConversion": "._input_conversion:InputConversion",
"InputConversionImage": "._input_conversion_image:InputConversionImage",
"InputConversionTable": "._input_conversion_table:InputConversionTable",
"InputConversionTimeSeries": "._input_conversion_time_series:InputConversionTimeSeries",
"OutputConversion": "._output_conversion:OutputConversion",
"OutputConversionImageToColumn": "._output_conversion_image:OutputConversionImageToColumn",
"OutputConversionImageToImage": "._output_conversion_image:OutputConversionImageToImage",
"OutputConversionImageToTable": "._output_conversion_image:OutputConversionImageToTable",
"OutputConversionTable": "._output_conversion_table:OutputConversionTable",
"OutputConversionTimeSeries": "._output_conversion_time_series:OutputConversionTimeSeries",
},
)

__all__ = [
"InputConversion",
"InputConversionImage",
"InputConversionTable",
"InputConversionTimeSeries",
"OutputConversion",
"OutputConversionImageToColumn",
"OutputConversionImageToImage",
"OutputConversionImageToTable",
"OutputConversionTable",
"OutputConversionTimeSeries",
]
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,16 @@
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Generic, TypeVar

if TYPE_CHECKING:
from torch.utils.data import DataLoader

from safeds.data.image.containers._single_size_image_list import _SingleSizeImageList
from safeds.data.image.typing import ImageSize

from safeds.data.image.containers import ImageList
from safeds.data.labeled.containers import ImageDataset, TabularDataset, TimeSeriesDataset
from safeds.data.tabular.containers import Table

if TYPE_CHECKING:
from torch.utils.data import DataLoader

from safeds.data.image.containers._single_size_image_list import _SingleSizeImageList
from safeds.data.image.typing import ImageSize

FT = TypeVar("FT", TabularDataset, TimeSeriesDataset, ImageDataset)
PT = TypeVar("PT", Table, TimeSeriesDataset, ImageList)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,12 @@
from safeds.data.labeled.containers import ImageDataset
from safeds.data.labeled.containers._image_dataset import _ColumnAsTensor, _TableAsTensor

from ._input_conversion import InputConversion

if TYPE_CHECKING:
from safeds.data.image.typing import ImageSize
from safeds.data.tabular.transformation import OneHotEncoder

from safeds.ml.nn import InputConversion


class InputConversionImage(InputConversion[ImageDataset, ImageList]):
"""The input conversion for a neural network, defines the input parameters for the neural network."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@

from typing import TYPE_CHECKING, Any

if TYPE_CHECKING:
from torch.utils.data import DataLoader

from safeds.data.labeled.containers import TabularDataset
from safeds.data.tabular.containers import Table
from safeds.ml.nn import InputConversion

from ._input_conversion import InputConversion

if TYPE_CHECKING:
from torch.utils.data import DataLoader


class InputConversionTable(InputConversion[TabularDataset, Table]):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@

from typing import TYPE_CHECKING, Any

from safeds.data.labeled.containers import TimeSeriesDataset

from ._input_conversion import InputConversion

if TYPE_CHECKING:
from torch.utils.data import DataLoader

from safeds.data.labeled.containers import TimeSeriesDataset
from safeds.ml.nn._input_conversion import InputConversion


class InputConversionTimeSeries(InputConversion[TimeSeriesDataset, TimeSeriesDataset]):
"""The input conversion for a neural network, defines the input parameters for the neural network."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,12 @@
from typing import TYPE_CHECKING, Any, Generic, TypeVar

from safeds.data.image.containers import ImageList
from safeds.data.labeled.containers import ImageDataset, TabularDataset
from safeds.data.labeled.containers import ImageDataset, TabularDataset, TimeSeriesDataset
from safeds.data.tabular.containers import Table

if TYPE_CHECKING:
from torch import Tensor

from safeds.data.labeled.containers import TimeSeriesDataset
from safeds.data.tabular.containers import Table

IT = TypeVar("IT", Table, TimeSeriesDataset, ImageList)
OT = TypeVar("OT", TabularDataset, TimeSeriesDataset, ImageDataset)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,13 @@
from safeds.data.labeled.containers import ImageDataset
from safeds.data.labeled.containers._image_dataset import _ColumnAsTensor, _TableAsTensor
from safeds.data.tabular.containers import Column, Table
from safeds.data.tabular.transformation import OneHotEncoder

from ._output_conversion import OutputConversion

if TYPE_CHECKING:
from torch import Tensor

from safeds.data.tabular.transformation import OneHotEncoder
from safeds.ml.nn import OutputConversion


class _OutputConversionImage(OutputConversion[ImageList, ImageDataset], ABC):

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@

from typing import TYPE_CHECKING, Any

if TYPE_CHECKING:
from torch import Tensor

from safeds.data.labeled.containers import TabularDataset
from safeds.data.tabular.containers import Column, Table
from safeds.ml.nn import OutputConversion

from ._output_conversion import OutputConversion

if TYPE_CHECKING:
from torch import Tensor


class OutputConversionTable(OutputConversion[Table, TabularDataset]):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,13 @@
from typing import TYPE_CHECKING, Any

from safeds._utils import _structural_hash
from safeds.data.labeled.containers import TimeSeriesDataset
from safeds.data.tabular.containers import Column

from ._output_conversion import OutputConversion

if TYPE_CHECKING:
from torch import Tensor
from safeds.data.labeled.containers import TimeSeriesDataset
from safeds.data.tabular.containers import Column
from safeds.ml.nn._output_conversion import OutputConversion


class OutputConversionTimeSeries(OutputConversion[TimeSeriesDataset, TimeSeriesDataset]):
Expand Down
38 changes: 38 additions & 0 deletions src/safeds/ml/nn/layers/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
"""Layers of neural networks."""

from typing import TYPE_CHECKING

import apipkg

if TYPE_CHECKING:
from ._convolutional2d_layer import Convolutional2DLayer, ConvolutionalTranspose2DLayer
from ._flatten_layer import FlattenLayer
from ._forward_layer import ForwardLayer
from ._layer import Layer
from ._lstm_layer import LSTMLayer
from ._pooling2d_layer import AveragePooling2DLayer, MaxPooling2DLayer

apipkg.initpkg(
__name__,
{
"Convolutional2DLayer": "._convolutional2d_layer:Convolutional2DLayer",
"ConvolutionalTranspose2DLayer": "._convolutional2d_layer:ConvolutionalTranspose2DLayer",
"FlattenLayer": "._flatten_layer:FlattenLayer",
"ForwardLayer": "._forward_layer:ForwardLayer",
"Layer": "._layer:Layer",
"LSTMLayer": "._lstm_layer:LSTMLayer",
"AveragePooling2DLayer": "._pooling2d_layer:AveragePooling2DLayer",
"MaxPooling2DLayer": "._pooling2d_layer:MaxPooling2DLayer",
},
)

__all__ = [
"Convolutional2DLayer",
"ConvolutionalTranspose2DLayer",
"FlattenLayer",
"ForwardLayer",
"Layer",
"LSTMLayer",
"AveragePooling2DLayer",
"MaxPooling2DLayer",
]
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@
from safeds._utils import _structural_hash
from safeds.data.image.typing import ImageSize

from ._layer import Layer

if TYPE_CHECKING:
from torch import Tensor, nn

from safeds.ml.nn import Layer


def _create_internal_model(
input_size: int,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,13 @@
from safeds._config import _init_default_device
from safeds._utils import _structural_hash

from ._layer import Layer

if TYPE_CHECKING:
from torch import Tensor, nn

from safeds.data.image.typing import ImageSize

from safeds.ml.nn import Layer


def _create_internal_model() -> nn.Module:
from torch import nn
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@
from typing import TYPE_CHECKING, Any

from safeds._config import _init_default_device
from safeds._utils import _structural_hash
from safeds._validation import _check_bounds, _ClosedBound
from safeds.data.image.typing import ImageSize

from ._layer import Layer

if TYPE_CHECKING:
from torch import Tensor, nn

from safeds._utils import _structural_hash
from safeds.ml.nn import Layer


def _create_internal_model(input_size: int, output_size: int, activation_function: str) -> nn.Module:
from torch import nn
Expand Down
File renamed without changes.
Loading

0 comments on commit c6a4073

Please sign in to comment.