diff --git a/.github/workflows/ci-build.yml b/.github/workflows/ci-build.yml index e0061aa..3d397dd 100644 --- a/.github/workflows/ci-build.yml +++ b/.github/workflows/ci-build.yml @@ -18,11 +18,9 @@ jobs: run: | python -m pip install --upgrade pip pip install pybuilder - - name: Run pybuilder w/o unit tests run: | pyb analyze - security-lint: name: Security Lint with bandit runs-on: ubuntu-latest @@ -39,17 +37,15 @@ jobs: run: | python -m pip install --upgrade pip pip install pybuilder - - name: Run pybuilder to lint security w/ bandit run: | pyb bandit - unit-tests: name: Run unit tests runs-on: ${{ matrix.operating-system }} strategy: matrix: - operating-system: [ubuntu-latest, windows-latest, macos-latest] + operating-system: [ubuntu-latest, macos-latest] steps: - uses: actions/checkout@v2 @@ -63,7 +59,7 @@ jobs: run: | python -m pip install --upgrade pip pip install -r requirements.txt - + pip install pybuilder - name: Run unit tests run: | - python -m unittest discover -s tests -p "test_*.py" -vvv + pyb -E unit diff --git a/.gitignore b/.gitignore index 7114a35..41e6331 100644 --- a/.gitignore +++ b/.gitignore @@ -113,6 +113,9 @@ venv.bak/ .spyderproject .spyproject +# vscode project settings +.vscode/ + # Rope project settings .ropeproject diff --git a/README.md b/README.md index a8d55f8..3d87523 100644 --- a/README.md +++ b/README.md @@ -41,29 +41,21 @@ Examples demonstrating basic DNF regimes and instabilities - Detection of input - Selection of input - Working memory of input -- Neural oscillator - -Infrastructure -- Sensor and data input/output -- Plotting ## Example ```python -from lava.lib.dnf.populations import Population -from lava.lib.dnf.kernels import SelectiveKernel -from lava.lib.dnf.connect import connect -from lava.lib.dnf.operations import Instar, OneToOne +from lava.proc.lif.process import LIF +from lava.lib.dnf.kernels.kernels import SelectiveKernel +from lava.lib.dnf.connect.connect import connect +from lava.lib.dnf.operations.operations import Convolution # create population of 20x20 spiking neurons -dnf = Population(shape=(20, 20)) +dnf = LIF(shape=(20, 20)) # create a selective kernel kernel = SelectiveKernel(amp_exc=18, width_exc=[4, 4], global_inh=-15) # apply the kernel to the population to create a DNF with a selective regime -connect(dnf, dnf, [Instar(kernel)]) +connect(dnf.s_out, dnf.a_in, [Convolution(kernel)]) ``` - - - diff --git a/build.py b/build.py index 6996141..3963dc2 100644 --- a/build.py +++ b/build.py @@ -7,22 +7,22 @@ # use_plugin("python.coverage") use_plugin("python.distutils") use_plugin("python.sphinx") -use_plugin('python.install_dependencies') +use_plugin("python.install_dependencies") use_plugin("python.pycharm") -use_plugin('pypi:pybuilder_bandit') +use_plugin("pypi:pybuilder_bandit") -name = "lava-nc/lava-dnf" +name = "lava-dnf" default_task = ["analyze", "publish"] version = "0.1.0" -summary = "A library that provides processes and other software infrastructure to build architectures composed of Dynamic Neural Fields (DNF). This library also provides tools to direct sensory input to neural architectures and to read output, for instance for motor control. Lava-DNF is part of Lava Framework " +summary = "A library that provides processes and other software infrastructure to build architectures composed of Dynamic Neural Fields (DNF)." url = "https://lava-nc.org" license = ["BSD-3-Clause"] @init def set_properties(project): - project.set_property("dir_source_main_python", "src/lava") - project.set_property("dir_source_unittest_python", "tests") + project.set_property("dir_source_main_python", "src") + project.set_property("dir_source_unittest_python", "tests/lava") project.set_property("dir_source_main_scripts", "scripts") project.set_property("dir_docs", "docs") @@ -53,20 +53,21 @@ def set_properties(project): "target/*,.svn,CVS,.bzr,.hg,.git,__pycache__,.pybuilder/*", ) - project.get_property('distutils_commands').append('build') - project.get_property('distutils_commands').append('sdist') - project.get_property('distutils_commands').append('bdist_dumb') + project.get_property("distutils_commands").append("build") + project.get_property("distutils_commands").append("sdist") + project.get_property("distutils_commands").append("bdist_dumb") - project.set_property('bandit_break_build', True) - project.set_property('bandit_include_testsources', False) + project.set_property("bandit_break_build", True) + project.set_property("bandit_include_testsources", False) @init(environments="unit") def set_properties_unit(project): - project.set_property("dir_source_main_python", "src/lava") - project.set_property("dir_source_unittest_python", "tests") + project.set_property("dir_source_main_python", "src") + project.set_property("dir_source_unittest_python", "tests/lava") project.set_property("dir_source_main_scripts", "scripts") project.set_property("dir_docs", "docs") + project.build_depends_on("lava", url="git+https://github.com/lava-nc/lava.git") project.set_property("sphinx_config_path", "docs") project.set_property("sphinx_source_dir", "docs") @@ -82,7 +83,7 @@ def set_properties_unit(project): project.plugin_depends_on("sphinx_tabs") project.set_property("verbose", True) - + project.set_property("coverage_threshold_warn", 0) project.set_property("coverage_break_build", False) @@ -96,5 +97,5 @@ def set_properties_unit(project): project.set_property("unittest_module_glob", "test_*") - project.set_property('bandit_break_build', True) - project.set_property('bandit_include_testsources', False) + project.set_property("bandit_break_build", True) + project.set_property("bandit_include_testsources", False) diff --git a/requirements.txt b/requirements.txt index e69de29..a51b61f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -0,0 +1,3 @@ +lava-nc@git+https://github.com/lava-nc/lava.git +numpy +scipy>=1.7.2 diff --git a/setup.py b/setup.py index aaff15c..392c0bf 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,7 @@ import shutil from sys import version_info + py3 = version_info[0] == 3 py2 = not py3 if py2: @@ -41,7 +42,9 @@ def install_pyb(): try: - subprocess.check_call([sys.executable, "-m", "pip", "install", "pybuilder"]) + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "pybuilder"] + ) except subprocess.CalledProcessError as e: sys.exit(e.returncode) @@ -64,11 +67,13 @@ def install_pyb(): try: from pybuilder.cli import main + # verbose, debug, skip all optional... if main("-v", "-X", "-o", "--reset-plugins", "clean", "package"): raise RuntimeError("PyBuilder build failed") from pybuilder.reactor import Reactor + reactor = Reactor.current_instance() project = reactor.project dist_dir = project.expand_path("$dir_dist") @@ -83,7 +88,9 @@ def install_pyb(): os.remove(target_file_name) shutil.move(src_file, script_dir) setup_args = sys.argv[1:] - subprocess.check_call([sys.executable, "setup.py"] + setup_args, cwd=script_dir) + subprocess.check_call( + [sys.executable, "setup.py"] + setup_args, cwd=script_dir + ) except subprocess.CalledProcessError as e: exit_code = e.returncode sys.exit(exit_code) diff --git a/src/lava/lib/dnf/README.md b/src/lava/lib/dnf/README.md deleted file mode 100644 index 8b13789..0000000 --- a/src/lava/lib/dnf/README.md +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/lava/lib/dnf/connect/connect.py b/src/lava/lib/dnf/connect/connect.py new file mode 100644 index 0000000..cb23af3 --- /dev/null +++ b/src/lava/lib/dnf/connect/connect.py @@ -0,0 +1,240 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty +import numpy as np + +from lava.magma.core.process.process import AbstractProcess +from lava.proc.dense.models import Dense +from lava.magma.core.process.ports.ports import InPort, OutPort + +from lava.lib.dnf.operations.operations import AbstractOperation, Weights +from lava.lib.dnf.connect.exceptions import MisconfiguredConnectError +from lava.lib.dnf.connect.reshape_bool.process import ReshapeBool +from lava.lib.dnf.connect.reshape_int.process import ReshapeInt + + +def connect( + src_op: OutPort, + dst_ip: InPort, + ops: ty.Optional[ty.Union[ty.List[AbstractOperation], + AbstractOperation]] = None +) -> AbstractProcess: + """ + Creates and returns a Connections Process and connects the source + OutPort to the InPort of and the OutPort of to the + InPort of . + + The connectivity is generated from a list of operation objects . + Each operation generates a dense connectivity matrix based + on its parameters. These matrices are multiplied into a single + connectivity matrix, which is then used to generate a Connections Process + between source and destination. + + Parameters + ---------- + src_op : OutPort + OutPort of the source Process that will be connected + dst_ip : InPort + InPort of the destination Process that will be connected + ops : list(AbstractOperation), optional + list of operations that describes how the connection between + and will be created + + Returns + ------- + connections : AbstractProcess + process containing the connections between and + + """ + # validate the list of operations + ops = _validate_ops(ops, src_op.shape, dst_ip.shape) + + # configure all operations in the list with input and output shape + _configure_ops(ops, src_op.shape, dst_ip.shape) + + # compute the connectivity matrix of all operations and multiply them + # into a single matrix that will be used for the Process + weights = _compute_weights(ops) + + # create Connections process and connect it: + # source -> connections -> destination + connections = _make_connections(src_op, dst_ip, weights) + + return connections + + +def _configure_ops( + ops: ty.List[AbstractOperation], + src_shape: ty.Tuple[int, ...], + dst_shape: ty.Tuple[int, ...] +) -> None: + """ + Configure all operations by setting their input and output shape and + checking that the final output shape matches the shape of the destination + InPort. + + Parameters + ---------- + ops : list(AbstractOperation) + list of operations to configure + src_shape : tuple(int) + shape of the OutPort of the source Process + dst_shape : tuple(int) + shape of the InPort of the destination Process + + """ + # We go from the source through all operations and memorize the output + # shape of the last operation (here, the source) + prev_output_shape = src_shape + + # For every operation in the list of operations... + for op in ops: + # ...let the operation configure the output shape given the incoming + # input shape + input_shape = prev_output_shape + op.configure(input_shape) + # Memorize the computed output shape for the next iteration of the loop + prev_output_shape = op.output_shape + + # Check that the output shape of the last operation matches the shape of + # the InPort of the destination Process + if prev_output_shape != dst_shape: + raise MisconfiguredConnectError( + "the output shape of the last operation does not match the shape " + "of the destination InPort; some operations may be misconfigured") + + +def _validate_ops( + ops: ty.Union[AbstractOperation, ty.List[AbstractOperation]], + src_shape: ty.Tuple[int, ...], + dst_shape: ty.Tuple[int, ...] +) -> ty.List[AbstractOperation]: + """ + Validates the argument of the 'connect' function + + Parameters: + ----------- + ops : list or AbstractOperation + the list of operations to be validated + src_shape : tuple(int) + shape of the OutPort of the source Process + dst_shape : tuple(int) + shape of the InPort of the destination Process + + Returns: + -------- + ops : list + validated list of operations + + """ + # If no operations were specified... + if ops is None: + if src_shape != dst_shape: + raise MisconfiguredConnectError( + f"shape of source Port {src_shape} != {dst_shape} " + "shape of destination Port; when connecting differently " + "shaped Ports you have to specify operations with the " + " argument") + + # ...create a default operation + ops = [Weights(1)] + + # Make a list if it is not one already + if not isinstance(ops, list): + ops = [ops] + + # Empty lists raise an error + if len(ops) == 0: + raise ValueError("list of operations is empty") + + # Check whether each element in is of type + # AbstractOperation + for op in ops: + if not isinstance(op, AbstractOperation): + raise TypeError("elements in list of operations must be of " + "type AbstractOperation, found type " + f"{type(op)}") + + return ops + + +def _compute_weights(ops: ty.List[AbstractOperation]) -> np.ndarray: + """ + Compute the overall connectivity matrix to be used for the Connections + Process from the individual connectivity matrices that each operation + produces. + + Parameters + ---------- + ops : list(AbstractOperation) + list of operations + + Returns + ------- + weights : np.ndarray + + """ + weights = None + + # For every operation... + for op in ops: + # ...compute the weights of the current operation + op_weights = op.compute_weights() + + # If it is the first operation in the list, initialize the overall + # weights + if weights is None: + weights = op_weights + # Otherwise, multiply weights with the connectivity matrix from the last + # operations in the list to create the overall weights matrix + else: + weights = np.matmul(op_weights, weights) + + return weights + + +def _make_connections(src_op: OutPort, + dst_ip: InPort, + weights: np.ndarray) -> AbstractProcess: + """ + Creates a Connections Process with the given weights and connects its + ports such that: + source-OutPort -> connections-InPort and + connections-InPort -> destination-OutPort + + Parameters + ---------- + src_op : OutPort + OutPort of the source Process + dst_ip : InPort + InPort of the destination Process + weights : numpy.ndarray + connectivity weight matrix used for the Connections Process + + Returns + ------- + Connections Process : AbstractProcess + + """ + + # Create the connections process + connections = Dense(shape=weights.shape, + weights=weights) + + # Make connections from the source port to the connections process + # TODO (MR) workaround in absence of ReshapePorts + con_ip = connections.s_in + rs1 = ReshapeBool(shape_in=src_op.shape, shape_out=con_ip.shape) + src_op.connect(rs1.s_in) + rs1.s_out.connect(con_ip) + + # Make connections from the connections process to the destination port + # TODO (MR) workaround in absence of ReshapePorts + con_op = connections.a_out + rs2 = ReshapeInt(shape_in=con_op.shape, shape_out=dst_ip.shape) + con_op.connect(rs2.s_in) + rs2.s_out.connect(dst_ip) + + return connections diff --git a/src/lava/lib/dnf/connect/exceptions.py b/src/lava/lib/dnf/connect/exceptions.py new file mode 100644 index 0000000..c1a101a --- /dev/null +++ b/src/lava/lib/dnf/connect/exceptions.py @@ -0,0 +1,22 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty + + +class MisconfiguredConnectError(Exception): + """ + Exception that is raised when the connection function is misconfigured + with a wrong combination of operations. + + Parameters: + ----------- + msg : str (optional) + custom exception message that overwrites the default + """ + def __init__(self, msg: ty.Optional[str] = None) -> None: + if msg is None: + msg = "call to connection() misconfigured; check the choice and " \ + "parameterization of all operations" + super().__init__(msg) diff --git a/src/lava/lib/dnf/connect/reshape_bool/models.py b/src/lava/lib/dnf/connect/reshape_bool/models.py new file mode 100644 index 0000000..9903faa --- /dev/null +++ b/src/lava/lib/dnf/connect/reshape_bool/models.py @@ -0,0 +1,30 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np + +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.decorator import implements, requires +from lava.magma.core.resources import CPU + +from lava.lib.dnf.connect.reshape_bool.process import ReshapeBool + + +@implements(proc=ReshapeBool, protocol=LoihiProtocol) +@requires(CPU) +class ReshapeBoolProcessModel(PyLoihiProcessModel): + """ProcessModel for the Reshape Process""" + + shape_out: np.ndarray = LavaPyType(np.ndarray, int) + + s_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, bool) + s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, bool) + + def run_spk(self) -> None: + rec = self.s_in.recv() + reshaped_input = np.reshape(rec, tuple(self.shape_out)) + self.s_out.send(reshaped_input) diff --git a/src/lava/lib/dnf/connect/reshape_bool/process.py b/src/lava/lib/dnf/connect/reshape_bool/process.py new file mode 100644 index 0000000..25b181d --- /dev/null +++ b/src/lava/lib/dnf/connect/reshape_bool/process.py @@ -0,0 +1,38 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import typing as ty + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var + +from lava.lib.dnf.utils.validation import validate_shape + + +class ReshapeBool(AbstractProcess): + """ + Reshapes the input to the output shape, keeping the number of elements + constant. + + TODO (MR): Workaround in the absence of Reshape ports. + + Parameters: + ----------- + shape_in: tuple(int) or int + input shape + shape_out: tuple(int) or int + output shape + """ + def __init__(self, **kwargs: ty.Tuple[int, ...]) -> None: + super().__init__(**kwargs) + + shape_in = validate_shape(kwargs.pop("shape_in", (1,))) + shape_out = validate_shape(kwargs.pop("shape_out", (1,))) + shape_out_array = np.array(shape_out) + self.shape_out = Var(shape=shape_out_array.shape, init=shape_out_array) + + self.s_in = InPort(shape=shape_in) + self.s_out = OutPort(shape=shape_out) diff --git a/src/lava/lib/dnf/connect/reshape_int/models.py b/src/lava/lib/dnf/connect/reshape_int/models.py new file mode 100644 index 0000000..8541b65 --- /dev/null +++ b/src/lava/lib/dnf/connect/reshape_int/models.py @@ -0,0 +1,30 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np + +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.decorator import implements, requires +from lava.magma.core.resources import CPU + +from lava.lib.dnf.connect.reshape_int.process import ReshapeInt + + +@implements(proc=ReshapeInt, protocol=LoihiProtocol) +@requires(CPU) +class ReshapeIntProcessModel(PyLoihiProcessModel): + """ProcessModel for the Reshape Process""" + + shape_out: np.ndarray = LavaPyType(np.ndarray, int) + + s_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + + def run_spk(self) -> None: + rec = self.s_in.recv() + reshaped_input = np.reshape(rec, tuple(self.shape_out)) + self.s_out.send(reshaped_input) diff --git a/src/lava/lib/dnf/connect/reshape_int/process.py b/src/lava/lib/dnf/connect/reshape_int/process.py new file mode 100644 index 0000000..cba4407 --- /dev/null +++ b/src/lava/lib/dnf/connect/reshape_int/process.py @@ -0,0 +1,38 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import typing as ty + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var + +from lava.lib.dnf.utils.validation import validate_shape + + +class ReshapeInt(AbstractProcess): + """ + Reshapes the input to the output shape, keeping the number of elements + constant. + + TODO (MR): Workaround in the absence of Reshape ports. + + Parameters: + ----------- + shape_in: tuple(int) or int + input shape + shape_out: tuple(int) or int + output shape + """ + def __init__(self, **kwargs: ty.Tuple[int, ...]) -> None: + super().__init__(**kwargs) + + shape_in = validate_shape(kwargs.pop("shape_in", (1,))) + shape_out = validate_shape(kwargs.pop("shape_out", (1,))) + shape_out_array = np.array(shape_out) + self.shape_out = Var(shape=shape_out_array.shape, init=shape_out_array) + + self.s_in = InPort(shape=shape_in) + self.s_out = OutPort(shape=shape_out) diff --git a/src/lava/lib/dnf/inputs/gauss_pattern/models.py b/src/lava/lib/dnf/inputs/gauss_pattern/models.py new file mode 100644 index 0000000..f0562ce --- /dev/null +++ b/src/lava/lib/dnf/inputs/gauss_pattern/models.py @@ -0,0 +1,57 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + +from lava.lib.dnf.inputs.gauss_pattern.process import GaussPattern +from lava.lib.dnf.utils.math import gauss + + +# TODO: (GK) Change protocol to AsyncProtocol when supported +# TODO: (GK) Change base class to (Sequential)PyProcessModel when supported +@implements(proc=GaussPattern, protocol=LoihiProtocol) +@requires(CPU) +class GaussPatternProcessModel(PyLoihiProcessModel): + """ + PyLoihiProcessModel for GaussPatternProcess. + + Implements the behavior of sending a gauss pattern asynchronously when + a change is triggered. + """ + _shape: np.ndarray = LavaPyType(np.ndarray, int) + + _amplitude: np.ndarray = LavaPyType(np.ndarray, float) + _mean: np.ndarray = LavaPyType(np.ndarray, float) + _stddev: np.ndarray = LavaPyType(np.ndarray, float) + + null_pattern: np.ndarray = LavaPyType(np.ndarray, float) + pattern: np.ndarray = LavaPyType(np.ndarray, float) + + changed: np.ndarray = LavaPyType(np.ndarray, bool) + + a_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float) + + def run_spk(self) -> None: + # When changed flag is set to True... + if self.changed[0]: + # ...compute new pattern based on updated parameters + self.pattern = gauss(shape=tuple(self._shape), + domain=None, + amplitude=self._amplitude[0], + mean=self._mean, + stddev=self._stddev) + # Reset the 'changed' flag + self.changed[0] = False + # Send new pattern through the PyOutPort + self.a_out.send(self.pattern) + else: + # Send the null pattern + self.a_out.send(self.null_pattern) diff --git a/src/lava/lib/dnf/inputs/gauss_pattern/process.py b/src/lava/lib/dnf/inputs/gauss_pattern/process.py new file mode 100644 index 0000000..f399d9d --- /dev/null +++ b/src/lava/lib/dnf/inputs/gauss_pattern/process.py @@ -0,0 +1,209 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import typing as ty + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.variable import Var +from lava.magma.core.process.ports.ports import OutPort + +from lava.lib.dnf.utils.validation import validate_shape + + +class GaussPattern(AbstractProcess): + """ + Gauss pattern generator Process. + + This process generates Gauss patterns and send them through + the OutPort a_out. + It recomputes new patterns and sends them asynchronously only when one of + the parameters amplitude, mean or stddev changes. + Otherwise, sends an array full of numpy.nan. + + Parameters: + ----------- + shape: tuple(int) + number of neurons per dimension, e.g. shape=(30, 40) + amplitude: float + amplitude of the Gauss pattern + mean: list(float) or float + mean of the Gauss pattern + stddev: list(float) or float + standard deviation of the Gauss pattern + """ + + def __init__(self, **kwargs: ty.Union[ty.Tuple[int, ...], + ty.List[float], + float]) -> None: + super().__init__(**kwargs) + + shape = validate_shape(kwargs.pop("shape")) + amplitude = kwargs.pop("amplitude") + mean = self._validate_param(np.array(shape), + "mean", + kwargs.pop("mean")) + stddev = self._validate_param(np.array(shape), + "stddev", + kwargs.pop("stddev")) + + self._shape = Var(shape=(len(shape),), init=np.array(shape)) + self._amplitude = Var(shape=(1,), init=np.array([amplitude])) + self._mean = Var(shape=(len(shape),), init=mean) + self._stddev = Var(shape=(len(shape),), init=stddev) + + self.null_pattern = Var(shape=shape, init=np.full(shape, np.nan)) + self.pattern = Var(shape=shape, init=np.zeros(shape)) + self.changed = Var(shape=(1,), init=np.array([True])) + + self.a_out = OutPort(shape=shape) + + def _validate_param(self, + shape: np.ndarray, + param_name: str, + param: ty.Union[float, ty.List[float]]) -> np.ndarray: + """Validates that parameter param with name param_name is either + a float value or a list of floats of the same length as the + dimensionality of the given shape. + + Returns param as ndarray. + + Parameters + ---------- + shape : numpy.ndarray + shape of the pattern + param_name : str + name of the parameter (either mean or stddev) + param : list(float) or float + parameter of the pattern + + Returns + ------- + param : numpy.ndarray + + """ + if not isinstance(param, list): + param = float(param) + param = [param] + + # If param is of length 1, no validation against shape + if len(param) == 1: + param_val = param[0] + # Broadcast param value to a list of length equal to shape + # dimensionality + param = [param_val for _ in range(shape.shape[0])] + # Else, if param is of length > 1 + elif len(param) > 1: + # Validate that the length is equal to shape dimensionality + if len(param) != shape.shape[0]: + raise ValueError( + f"<{param_name}> parameter has length different " + "from shape dimensionality") + else: + raise ValueError(f"<{param_name}> parameter cannot be empty") + + return np.array(param) + + def _update(self) -> None: + """Set the value of the changed flag Var to True""" + self.changed.set(np.array([True])) + + # TODO: (GK) Remove when set() function blocks until it is complete + # To make sure parameter was set + self.changed.get() + + @property + def shape(self) -> ty.Union[np.ndarray, None]: + """Get value of the shape Var + + Returns + ------- + shape : numpy.ndarray + """ + try: + return self._shape.get() + # Catch AttributeError error raised at instantiation of the process + # since _shape Var has no runtime associated at that stage + except AttributeError: + return None + + @property + def amplitude(self) -> ty.Union[np.ndarray, None]: + """Get value of the amplitude Var + + Returns + ------- + amplitude : numpy.ndarray + """ + try: + return self._amplitude.get() + # Catch AttributeError error raised at instantiation of the process + # since _amplitude Var has no runtime associated at that stage + except AttributeError: + return None + + @amplitude.setter + def amplitude(self, amplitude: float) -> None: + """Set the value of the amplitude Var and updates the changed flag""" + self._amplitude.set(np.array([amplitude])) + + # TODO: (GK) Remove when set blocks until complete + # to make sure parameter was set + self._amplitude.get() + + self._update() + + @property + def mean(self) -> ty.Union[np.ndarray, None]: + """Get value of the mean Var + + Returns + ------- + mean : numpy.ndarray + """ + try: + return self._mean.get() + # Catch AttributeError error raised at instantiation of the process + # since _mean Var has no runtime associated at that stage + except AttributeError: + return None + + @mean.setter + def mean(self, mean: ty.Union[float, ty.List[float]]) -> None: + """Set the value of the mean Var and updates the changed flag""" + mean = self._validate_param(self.shape, "mean", mean) + self._mean.set(mean) + + # TODO: (GK) Remove when set blocks until complete + # to make sure parameter was set + self._mean.get() + + self._update() + + @property + def stddev(self) -> ty.Union[np.ndarray, None]: + """Get value of the stddev Var + + Returns + ------- + stddev : numpy.ndarray + """ + try: + return self._stddev.get() + # Catch AttributeError error raised at instantiation of the process + # since _stddev Var has no runtime associated at that stage + except AttributeError: + return None + + @stddev.setter + def stddev(self, stddev: ty.Union[float, ty.List[float]]) -> None: + """Set the value of the stddev Var and updates the changed flag""" + stddev = self._validate_param(self.shape, "stddev", stddev) + self._stddev.set(stddev) + + # TODO: (GK) Remove when set blocks until complete to make sure + # parameter was set + self._stddev.get() + + self._update() diff --git a/src/lava/lib/dnf/inputs/rate_code_spike_gen/models.py b/src/lava/lib/dnf/inputs/rate_code_spike_gen/models.py new file mode 100644 index 0000000..21a3243 --- /dev/null +++ b/src/lava/lib/dnf/inputs/rate_code_spike_gen/models.py @@ -0,0 +1,191 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + +from lava.lib.dnf.inputs.rate_code_spike_gen.process import \ + RateCodeSpikeGen + +# TODO: (GK) This has to be changed to depend on time step duration in Loihi +TIME_STEPS_PER_MINUTE = 6000.0 + + +# TODO: (GK) Change protocol to AsyncProtocol when supported +# TODO: (GK) Change base class to (Sequential)PyProcessModel when supported +@implements(proc=RateCodeSpikeGen, protocol=LoihiProtocol) +@requires(CPU) +class RateCodeSpikeGenProcessModel(PyLoihiProcessModel): + """ + PyLoihiProcessModel for SpikeGeneratorProcess. + + Implements the behavior of a rate-coded spike input generator. + """ + min_spike_rate: np.ndarray = LavaPyType(np.ndarray, float) + seed: np.ndarray = LavaPyType(np.ndarray, int) + + inter_spike_distances: np.ndarray = LavaPyType(np.ndarray, int) + first_spike_times: np.ndarray = LavaPyType(np.ndarray, int) + last_spiked: np.ndarray = LavaPyType(np.ndarray, float) + + spikes: np.ndarray = LavaPyType(np.ndarray, bool) + + a_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, float) + s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, bool) + + ts_last_changed: int = 1 + + def _compute_distances(self, pattern: np.ndarray) -> np.ndarray: + """Converts pattern representing spike rates in Hz to + inter spike distances in timesteps. + + Assumes a minute contains TIME_STEPS_PER_MINUTE timesteps. + Assumes all spike rate values less than MIN_SPIKE_RATE are negligible. + + Parameters + ---------- + pattern : numpy.ndarray + pattern representing spike rates in (Hz) + + Returns + ------- + distances : numpy.ndarray + inter spike distances in (timesteps) + + """ + # Represent infinite inter spike distances (i.e negligible spike rates) + # as 0 + distances = np.zeros_like(pattern) + + idx_non_negligible = pattern > self.min_spike_rate[0] + + distances[idx_non_negligible] = \ + np.rint(TIME_STEPS_PER_MINUTE / pattern[idx_non_negligible])\ + .astype(int) + + return distances + + def _compute_first_spike_times(self, + distances: np.ndarray) -> np.ndarray: + """Randomly picks an array of first spike time given an array of + inter spike distances. + + Every first spike time must be less than the associated + inter spike distance. + + Parameters + ---------- + distances : numpy.ndarray + inter spike distances in (timesteps) + + Returns + ------- + first_spike_times : numpy.ndarray + first spike time for each "neuron" + + """ + rng = np.random.default_rng( + seed=None if self.seed[0] == -1 else self.seed[0]) + + first_spikes = np.zeros_like(distances) + + # Find indices where distance is 0 (where the should never be spikes) + idx_zeros = distances == 0 + + # Trick to yield a distances array in the right format for rng.integers + # Since rng.integers samples a random integer from [low, high[, + # we have to add 1 to distances so that high becomes actually + # distance + 1, that way, the outcome of sampling can be distance + distances[~idx_zeros] = distances[~idx_zeros] + 1 + + # For indices where there should be a first spike ... + # Pick a random number in [1, distance[ + # (distance here is actually the true value of distance, +1) + first_spikes[~idx_zeros] = rng.integers( + low=np.ones_like(distances[~idx_zeros]), high=distances[~idx_zeros]) + + # Reverting distances array to its right format + distances[~idx_zeros] = distances[~idx_zeros] - 1 + + return first_spikes + + def _generate_spikes(self, + time_step: int) -> np.ndarray: + """Generates an array of bool values where True represent a spike + and False represents no-spike. + + Uses internal state such as inter spike distances, first spike times, + last spike times and time step where last pattern change happened to + derive whether each "neuron" should fire. + + Parameters + ---------- + time_step : int + current time step + + Returns + ------- + spikes : numpy.ndarray + spikes array + + """ + spikes = np.zeros(self.spikes.shape, dtype=bool) + + # Get time step index since pattern last changed + current_ts_transformed = time_step - self.ts_last_changed + 1 + + # Get indices where there should never be spikes + idx_zeros = self.inter_spike_distances == np.zeros + # Get indices where a spike never happened + idx_never_spiked = self.last_spiked == - np.inf + # Get indices where a first spike should be fired in this time step + idx_will_spike_first_time = \ + self.first_spike_times == current_ts_transformed + + # Computes distances from current time step to last spike times + distances_last_spiked = current_ts_transformed - self.last_spiked + + # Spike at indices where there should be a first spike + spikes[idx_will_spike_first_time] = True + # Spike at indices where we already spiked before, and where, + # distance from current ts to last spike time is equal to + # inter spike distance + spikes[~idx_never_spiked] = \ + distances_last_spiked[~idx_never_spiked] == \ + self.inter_spike_distances[~idx_never_spiked] + # Do not spike where there should never be spikes + spikes[idx_zeros] = False + + # Update last spike times + self.last_spiked[spikes] = current_ts_transformed + + return spikes + + def run_spk(self) -> None: + # Receive pattern from PyInPort + pattern = self.a_in.recv() + + # If the received pattern is not the null_pattern ... + if not np.isnan(pattern).any(): + # Save the current time step + self.ts_last_changed = self.current_ts + # Reset last spike times + self.last_spiked = np.full_like(self.last_spiked, -np.inf) + + # Update inter spike distances based on new pattern + self.inter_spike_distances = self._compute_distances(pattern) + # Compute first spike time for each "neuron" + self.first_spike_times = self._compute_first_spike_times( + self.inter_spike_distances) + + # Generate spike at every time step ... + self.spikes = self._generate_spikes(time_step=self.current_ts) + # ... and send them through the PyOutPort + self.s_out.send(self.spikes) diff --git a/src/lava/lib/dnf/inputs/rate_code_spike_gen/process.py b/src/lava/lib/dnf/inputs/rate_code_spike_gen/process.py new file mode 100644 index 0000000..fabb39a --- /dev/null +++ b/src/lava/lib/dnf/inputs/rate_code_spike_gen/process.py @@ -0,0 +1,65 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import typing as ty + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.variable import Var +from lava.magma.core.process.ports.ports import InPort, OutPort + +from lava.lib.dnf.utils.validation import validate_shape + + +class RateCodeSpikeGen(AbstractProcess): + """ + Spike generator Process for rate-coded input. + + This process generates spike trains based on patterns it receives through + its InPort a_in. + It interprets these patterns as spiking rates (rate coding). + + Receives a new pattern through a_in only once and while and trigger state + update upon receipt of new pattern. + In other time steps, receives null patterns (array full of numpy.nan). + Sends spike values through its OutPort s_out every time step. + + Parameters: + ----------- + shape: tuple(int) + number of neurons per dimension, e.g. shape=(30, 40) + min_spike_rate: float + minimum spike rate + (neurons with rates below this value will never spike) + seed: int + seed used for computing first spike times everytime pattern changes + """ + + def __init__(self, **kwargs: ty.Union[ty.Tuple[int, ...], + float, + int]) -> None: + super().__init__(**kwargs) + + shape = validate_shape(kwargs.pop("shape")) + + min_spike_rate = kwargs.pop("min_spike_rate", 0.5) + if min_spike_rate < 0: + raise ValueError(" cannot be negative.") + + # seed -1 means: use random seed + seed = kwargs.pop("seed", -1) + if seed < -1: + raise ValueError(" cannot be negative.") + + self.min_spike_rate = Var(shape=(1,), init=np.array([min_spike_rate])) + self.seed = Var(shape=(1,), init=np.array([seed])) + + self.inter_spike_distances = Var(shape=shape, init=np.zeros(shape)) + self.first_spike_times = Var(shape=shape, init=np.zeros(shape)) + self.last_spiked = Var(shape=shape, init=np.full(shape, -np.inf)) + + self.spikes = Var(shape=shape, init=np.zeros(shape)) + + self.a_in = InPort(shape=shape) + self.s_out = OutPort(shape=shape) diff --git a/src/lava/lib/dnf/kernels/kernels.py b/src/lava/lib/dnf/kernels/kernels.py new file mode 100644 index 0000000..5b8fe05 --- /dev/null +++ b/src/lava/lib/dnf/kernels/kernels.py @@ -0,0 +1,276 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import typing as ty +import warnings +from abc import ABC, abstractmethod + +from lava.lib.dnf.utils.convenience import to_ndarray +from lava.lib.dnf.utils.math import gauss + + +class Kernel: + """ + Represents a kernel that can be used in the Convolution operation. + + Parameters + ---------- + weights : numpy.ndarray + weight matrix of the kernel + padding_value : float, optional + value that is used to pad the kernel when the Convolution operation + uses BorderType.PADDED + """ + def __init__(self, + weights: np.ndarray, + padding_value: ty.Optional[float] = 0) -> None: + self._weights = weights + self._padding_value = padding_value + + @property + def weights(self) -> np.ndarray: + """Returns the weights""" + return self._weights + + @property + def padding_value(self) -> float: + """Returns the padding value""" + return self._padding_value + + +class GaussianMixin(ABC): + """ + Mixin for kernels that are generated with the gauss function. + + Parameters + ---------- + amp_exc : float + amplitude of the excitatory Gaussian of the kernel + width_exc : list(float) + widths of the excitatory Gaussian of the kernel + limit : float + determines the size/shape of the kernel such that the weight matrix + will have the size 2*limit*width_exc; defaults to 1 + shape : tuple(int), optional + will return the weight with this explicit shape; if used, the limit + argument will have no effect + + """ + def __init__(self, + amp_exc: float, + width_exc: ty.Union[float, ty.List[float]], + limit: ty.Optional[float] = 1.0, + shape: ty.Optional[ty.Tuple[int, ...]] = None, + dominant_width: np.ndarray = None) -> None: + + if amp_exc < 0: + raise ValueError(" must be positive") + self._amp_exc = amp_exc + + self._width_exc = to_ndarray(width_exc) + + if limit < 0: + raise ValueError(" must be positive") + self._limit = limit + + if dominant_width is None: + dominant_width = self._width_exc + + self._shape = self._compute_shape(dominant_width) \ + if shape is None else self._validate_shape(shape, dominant_width) + + @abstractmethod + def _compute_weights(self) -> np.ndarray: + """ + Computes the weights of the kernel + + Returns + ------- + weights : numpy.ndarray + computed weights of the kernel + + """ + pass + + def _compute_domain(self) -> np.ndarray: + """ + Computes the domain of a kernel for computing the Gaussian. + + Returns + ------- + domain : numpy.ndarray + domain used for the gauss() function + + """ + shape = np.array(self._shape) + domain = np.zeros((len(shape), 2)) + half_domain = shape / 2.0 + domain[:, 0] = -half_domain + domain[:, 1] = half_domain + + return domain + + @staticmethod + def _validate_shape(shape: ty.Tuple[int, ...], + width: np.ndarray) -> ty.Tuple[int, ...]: + """ + Validates the shape of the kernel against a width parameter + + Parameters + ---------- + shape : tuple(int) + shape to be validated + width : numpy.ndarray + width to validate the shape against + + Returns + ------- + shape : tuple(int) + validated shape + + """ + if np.size(width, axis=0) != len(shape): + raise ValueError(" and are incompatible; the" + "number of entries in must match the" + "number of dimensions in ") + + if np.any(np.array(shape)[:] % 2 == 0): + warnings.warn("kernel has an even size; this may introduce drift") + + return shape + + def _compute_shape(self, width: np.ndarray) -> ty.Tuple[int, ...]: + """ + Computes the shape of a kernel from the a width-parameter of the + kernel and a limit factor. + + Parameters + ---------- + width : numpy.ndarray + width parameter to determine the shape + + Returns + ------- + shape : tuple(int) + shape of the kernel + """ + # Compute shape from limit + shape = np.uint(np.ceil(2 * self._limit * width)) + + # Ensure that the kernel has an odd size + shape = np.where(shape % 2 == 0, shape + 1, shape) + + return tuple(shape) + + +class SelectiveKernel(GaussianMixin, Kernel): + """ + A kernel that enables creating a selective dynamic neural field + (local excitation, global inhibition). + + Parameters + ---------- + amp_exc : float + amplitude of the excitatory Gaussian of the kernel + width_exc : list(float) + widths of the excitatory Gaussian of the kernel + global_inh : float + global inhibition of the kernel; must be negative + limit : float + determines the size/shape of the kernel such that the weight matrix + will have the size 2*limit*width_exc; defaults to 1 + shape : tuple(int), optional + will return the weight with this explicit shape; if used, the limit + argument will have no effect + + """ + def __init__(self, + amp_exc: float, + width_exc: ty.Union[float, ty.List[float]], + global_inh: float, + limit: ty.Optional[float] = 1.0, + shape: ty.Optional[ty.Tuple[int, ...]] = None) -> None: + + GaussianMixin.__init__(self, amp_exc, width_exc, limit, shape) + + if global_inh > 0: + raise ValueError(" must be negative") + self._global_inh = global_inh + + weights = self._compute_weights() + Kernel.__init__(self, weights=weights, padding_value=self._global_inh) + + def _compute_weights(self) -> np.ndarray: + local_excitation = gauss(self._shape, + domain=self._compute_domain(), + amplitude=self._amp_exc, + stddev=self._width_exc) + + return local_excitation + self._global_inh + + +class MultiPeakKernel(GaussianMixin, Kernel): + """ + "Mexican hat" kernel (local excitation and mid-range inhibition) for a + DNF that enables it to create multiple peaks. + + Parameters + ---------- + amp_inh : float + amplitude of the inhibitory Gaussian of the kernel + width_inh : list(float) + widths of the inhibitory Gaussian of the kernel + + """ + def __init__(self, + amp_exc: float, + width_exc: ty.Union[float, ty.List[float]], + amp_inh: float, + width_inh: ty.Union[float, ty.List[float]], + limit: float = 1.0, + shape: ty.Optional[ty.Tuple[int]] = None) -> None: + + if amp_inh > 0: + raise ValueError(" must be positive") + self._amp_inh = amp_inh + + self._width_inh = to_ndarray(width_inh) + + GaussianMixin.__init__(self, + amp_exc, + width_exc, + limit, + shape, + dominant_width=self._width_inh) + + self._validate_widths(self._width_exc, self._width_inh) + + weights = self._compute_weights() + Kernel.__init__(self, weights=weights) + + @staticmethod + def _validate_widths(width_exc: np.ndarray, + width_inh: np.ndarray) -> None: + """Validates the excitatory and inhibitory widths against each other.""" + + if width_exc.shape != width_inh.shape: + raise ValueError("shape of " + f"{width_exc.shape} != {width_inh.shape} shape of " + "") + + def _compute_weights(self) -> np.ndarray: + domain = self._compute_domain() + + local_excitation = gauss(self._shape, + domain=domain, + amplitude=self._amp_exc, + stddev=self._width_exc) + + mid_range_inhibition = gauss(self._shape, + domain=domain, + amplitude=self._amp_inh, + stddev=self._width_inh) + + return local_excitation + mid_range_inhibition diff --git a/src/lava/lib/dnf/operations/enums.py b/src/lava/lib/dnf/operations/enums.py new file mode 100644 index 0000000..568c958 --- /dev/null +++ b/src/lava/lib/dnf/operations/enums.py @@ -0,0 +1,34 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +from __future__ import annotations +from typing import TypeVar, Type +from enum import Enum, unique, auto + +_T = TypeVar("_T") + + +@unique +class ReduceMethod(Enum): + """Enum for reduce methods of ReduceDims operation""" + SUM = auto() # ReduceDims will sum all synaptic weights of collapsed dim + MEAN = auto() # ReduceDims will compute mean of weights of collapsed dim + + @classmethod + def validate(cls: Type[_T], reduce_method: ReduceMethod) -> None: + """Validate type of """ + if not isinstance(reduce_method, ReduceMethod): + raise TypeError("reduce_method must be of type ReduceMethod") + + +@unique +class BorderType(Enum): + PADDED = auto() + CIRCULAR = auto() + + @classmethod + def validate(cls: Type[_T], border_type: BorderType) -> None: + """Validate type of """ + if not isinstance(border_type, BorderType): + raise TypeError("border_type must be of type BorderType") diff --git a/src/lava/lib/dnf/operations/exceptions.py b/src/lava/lib/dnf/operations/exceptions.py new file mode 100644 index 0000000..8ad7da2 --- /dev/null +++ b/src/lava/lib/dnf/operations/exceptions.py @@ -0,0 +1,20 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty + + +class MisconfiguredOpError(Exception): + """ + Exception that is raised when an operation is misconfigured. + + Parameters: + ----------- + msg : str (optional) + custom exception message that overwrites the default + """ + def __init__(self, msg: ty.Optional[str] = None) -> None: + if msg is None: + msg = "operation is misconfigured" + super().__init__(msg) diff --git a/src/lava/lib/dnf/operations/operations.py b/src/lava/lib/dnf/operations/operations.py new file mode 100644 index 0000000..04c6b2e --- /dev/null +++ b/src/lava/lib/dnf/operations/operations.py @@ -0,0 +1,522 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +from abc import ABC, abstractmethod +import typing as ty +import numpy as np + +from lava.lib.dnf.utils.convenience import num_neurons +from lava.lib.dnf.operations.shape_handlers import ( + AbstractShapeHandler, + KeepShapeHandler, + ReduceDimsHandler, + ExpandDimsHandler, + ReorderHandler) +from lava.lib.dnf.operations.enums import ReduceMethod, BorderType +from lava.lib.dnf.kernels.kernels import Kernel +from lava.lib.dnf.utils.convenience import num_dims +from lava.lib.dnf.utils.math import is_odd + + +class AbstractOperation(ABC): + """ + Abstract Operation, subclasses of which can be used to parameterize the + connect() function. + + Parameters + ---------- + shape_handler : AbstractShapeHandler + handles, configures, and validates the input and output shape of the + operation + + """ + def __init__(self, shape_handler: AbstractShapeHandler) -> None: + self._shape_handler = shape_handler + + @property + def output_shape(self) -> ty.Tuple[int, ...]: + """Return the output shape of the operation""" + return self._shape_handler.output_shape + + @property + def input_shape(self) -> ty.Tuple[int, ...]: + """Return the output shape of the operation""" + return self._shape_handler.input_shape + + def compute_weights(self) -> np.ndarray: + """ + Computes the connectivity weight matrix of the operation. + This public method only validates the configuration of the + operation. The actual weights are computed in the + abstract method _compute_weights(). + + Returns + ------- + connectivity weight matrix : numpy.ndarray + + """ + # Assert that the input and output shape is configured + self._shape_handler.assert_configured() + + return self._compute_weights() + + def configure(self, + input_shape: ty.Tuple[int, ...]) -> None: + """ + Configures an operation by setting its input and output shape. + + Parameters + ---------- + input_shape : tuple(int) + input shape of the operation + + """ + self._validate_args_with_input_shape(input_shape) + self._shape_handler.configure(input_shape) + + @abstractmethod + def _compute_weights(self) -> np.ndarray: + """ + Does the actual work of computing the weights and returns them as a + numpy array. + + Returns + ------- + weights : numpy.ndarray + + """ + pass + + def _validate_args_with_input_shape( + self, + input_shape: ty.Tuple[int, ...] + ) -> None: + """Validates any input arguments that the operation may receive, and + that do not get passed on to the ShapeHandler, against the input + shape.""" + pass + + +class Weights(AbstractOperation): + """ + Operation that generates one-to-one connectivity with given weights for + every synapse. + + Parameters + ---------- + weight : float + weight used for every connection + + """ + def __init__(self, weight: float) -> None: + super().__init__(KeepShapeHandler()) + self.weight = weight + + def _compute_weights(self) -> np.ndarray: + return np.eye(num_neurons(self.output_shape), + num_neurons(self.input_shape), + dtype=np.int32) * self.weight + + +class ReduceDims(AbstractOperation): + """ + Operation that reduces the dimensionality of the input by projecting + a specified subset of dimensions onto the remaining dimensions. + + Parameters + ---------- + reduce_dims : int or tuple(int) + indices of dimension that will be reduced/removed + reduce_method : ReduceMethod + method by which the dimensions will be reduced (SUM or MEAN) + + """ + def __init__(self, + reduce_dims: ty.Union[int, ty.Tuple[int, ...]], + reduce_method: ty.Optional[ReduceMethod] = ReduceMethod.SUM + ) -> None: + super().__init__(ReduceDimsHandler(reduce_dims)) + ReduceMethod.validate(reduce_method) + self.reduce_method = reduce_method + + def _compute_weights(self) -> np.ndarray: + # Indices of the input dimensions in the weight matrix + # that will not be removed + in_axes_all = np.arange(num_dims(self.input_shape)) + + sh = ty.cast(ReduceDimsHandler, self._shape_handler) + in_axes_kept = tuple(np.delete(in_axes_all, sh.reduce_dims)) + + # Generate the weight matrix + weights = _project_dims(self.input_shape, + self.output_shape, + in_axes_kept=in_axes_kept) + + if self.reduce_method == ReduceMethod.MEAN: + # Set the weights such that they compute the mean + weights = weights / num_neurons(self.input_shape) + + return weights + + +class ExpandDims(AbstractOperation): + """ + Operation that expands the dimensionality of the input by projecting + the dimensions of the input to the newly added dimensions. + + """ + def __init__(self, + new_dims_shape: ty.Union[int, ty.Tuple[int, ...]]) -> None: + super().__init__(ExpandDimsHandler(new_dims_shape)) + + def _compute_weights(self) -> np.ndarray: + # Indices of the output dimensions in the weight matrix that will + # be kept from the input + out_axes_kept = tuple(np.arange(num_dims(self.input_shape))) + + # Generate the weight matrix + weights = _project_dims(self.input_shape, + self.output_shape, + out_axes_kept=out_axes_kept) + + return weights + + +class Reorder(AbstractOperation): + """ + Operation that reorders the dimensions in the input to a specified new + order. + + Parameters + ---------- + order : tuple(int) + new order of the dimensions (see ReorderHandler) + + """ + def __init__(self, order: ty.Tuple[int, ...]) -> None: + super().__init__(ReorderHandler(order)) + + def _compute_weights(self) -> np.ndarray: + sh = ty.cast(ReorderHandler, + self._shape_handler) + weights = _project_dims(self.input_shape, + self.output_shape, + out_axes_kept=sh.order) + + return weights + + +def _project_dims( + input_shape: ty.Tuple[int, ...], + output_shape: ty.Tuple[int, ...], + out_axes_kept: ty.Optional[ty.Tuple[int, ...]] = None, + in_axes_kept: ty.Optional[ty.Tuple[int, ...]] = None +) -> np.ndarray: + """Projection function that is used both by the ReduceDims and ExpandDims + Operation + + Parameters + ---------- + input_shape : tuple(int) + input shape of the operation + output_shape : tuple(int) + output shape of the operation + out_axes_kept : tuple(int) + indices of the output dimensions in the weight matrix that will + be kept from the input + in_axes_kept : tuple(int) + indices of the input dimensions in the weight matrix that will + be kept for the output + + Returns + ------- + connectivity weight matrix : numpy.ndarray + + """ + num_neurons_in = num_neurons(input_shape) + num_neurons_out = num_neurons(output_shape) + num_dims_in = num_dims(input_shape) + num_dims_out = num_dims(output_shape) + smaller_num_dims = min(num_dims_in, num_dims_out) + + if smaller_num_dims == 0: + # If the target is a 0D population, the connectivity is from + # all neurons in the source population to that one neuron + weights = np.ones((num_neurons_out, num_neurons_in)) + else: + # Create a dense connectivity matrix, where dimensions of the + # source and target are not yet flattened + shape = output_shape + input_shape + weights = np.zeros(shape) + + ### + # The following lines create a view on the connectivity matrix, + # in which the axes are moved such that the first dimensions are all + # output dimensions that will be kept, followed by all input + # dimensions that will be kept, followed by all remaining dimensions. + if in_axes_kept is None: + in_axes_kept = np.arange(num_dims_in) + in_axes_kept = tuple(np.asarray(in_axes_kept) + num_dims_out) + + if out_axes_kept is None: + out_axes_kept = np.arange(num_dims_out) + out_axes_kept = tuple(out_axes_kept) + + # New indices of the kept output dimensions after moving the axes + new_axes_out = tuple(np.arange(len(out_axes_kept))) + # New indices of the kept input dimensions after moving the axes + new_axes_in = tuple(np.arange(len(in_axes_kept)) + len(new_axes_out)) + + # Create the view by moving the axes + conn = np.moveaxis(weights, + out_axes_kept + in_axes_kept, + new_axes_out + new_axes_in) + # + ### + + # For each source-target dimension pair, set connections to 1 for + # every pair of neurons along that dimension, as well as to all + # neurons in all remaining dimensions + if smaller_num_dims == 1: + for a in range(np.size(conn, axis=0)): + conn[a, a, ...] = 1 + elif smaller_num_dims == 2: + for a in range(np.size(conn, axis=0)): + for b in range(np.size(conn, axis=1)): + conn[a, b, a, b, ...] = 1 + elif smaller_num_dims == 3: + for a in range(np.size(conn, axis=0)): + for b in range(np.size(conn, axis=1)): + for c in range(np.size(conn, axis=2)): + conn[a, b, c, a, b, c, ...] = 1 + else: + raise NotImplementedError("projection is not implemented for " + "dimensionality > 3") + + # Flatten the source and target dimensions of the connectivity + # matrix to get a two-dimensional dense connectivity matrix + weights = weights.reshape((num_neurons_out, num_neurons_in)) + + return weights + + +class Convolution(AbstractOperation): + """ + Creates connectivity that resembles a convolution with a kernel. + Perhaps contrary to other implementations of the convolution, this + operation always leaves the shape of the input intact. That is, a + Convolution operation applied, for instance, to the output of a + population of neurons of shape (42, 42) will also yield an output of + shape (42, 42). + + Parameters + ---------- + kernel : Kernel + kernel of weights that the input will be convolved with; must be of the + same dimensionality as the input + border_types : BorderType or list(BorderType) + determines how the Convolution operation treats borders; valid values + are (1) PADDED, in which case the borders will be padded with a value + that can be specified in the Kernel or (2) CIRCULAR, in which case + the values from the other side of the input will be used as 'padding' + (this is sometimes also called "wrapped") + + """ + def __init__( + self, + kernel: ty.Union[Kernel, np.ndarray], + border_types: ty.Optional[ty.Union[BorderType, + ty.List[BorderType]]] + = BorderType.PADDED + ) -> None: + super().__init__(KeepShapeHandler()) + + self._kernel = self._validate_kernel(kernel) + self._border_types = self._validate_border_types(border_types) + + @property + def kernel(self) -> Kernel: + """Returns the kernel""" + return self._kernel + + @property + def border_types(self) -> ty.List[BorderType]: + """Returns the list of border types""" + return self._border_types + + @staticmethod + def _validate_kernel( + kernel: ty.Union[Kernel, np.ndarray] + ) -> Kernel: + """Validate the argument""" + if isinstance(kernel, np.ndarray): + kernel = Kernel(weights=kernel) + + return kernel + + @staticmethod + def _validate_border_types( + border_types: ty.Union[BorderType, ty.List[BorderType]] + ) -> ty.List[BorderType]: + """Validates the argument""" + + if isinstance(border_types, BorderType): + border_types = [border_types] + + if not isinstance(border_types, list): + raise TypeError(" must be of type BorderType or" + "list(BorderType)") + + for bt in border_types: + BorderType.validate(bt) + + return border_types + + def _validate_args_with_input_shape(self, + input_shape: ty.Tuple[int, ...] + ) -> None: + # treating 0D cases like 1D cases here + input_dim = len(input_shape) + + if len(self._border_types) == 1: + self._border_types *= input_dim + if len(self._border_types) != input_dim: + raise ValueError("number of entries in does not" + "match dimensionality of population") + + def _compute_weights(self) -> np.ndarray: + + # Input shape equals output shape + shape = self.input_shape + # Do not use num_dims() here to treat 0D like 1D + num_dims = len(shape) + _num_neurons = num_neurons(shape) + + # Generate a dense connectivity matrix + connectivity_matrix = np.zeros((_num_neurons, _num_neurons)) + + # Copy the weights of the kernel + kernel_weights = np.copy(self.kernel.weights) + + for i in range(num_dims): + # Compute the size difference between the population and the + # kernel in the current dimension + size_diff = shape[i] - np.size(kernel_weights, axis=i) + + if size_diff != 0: + pad_width = np.zeros((num_dims, 2), dtype=int) + pad_width[i, :] = int(np.floor(np.abs(size_diff) / 2.0)) + # If the padding cannot be distributed evenly... + if is_odd(size_diff): + if is_odd(np.size(kernel_weights, axis=i)): + # ...add one in front if the kernel size is odd... + pad_width[i, 0] += 1 + else: + # ...or add one in the back if the kernel size + # is even + pad_width[i, 1] += 1 + + if size_diff > 0: + # Pad the kernel with its padding value + kernel_weights = \ + np.pad(kernel_weights, + pad_width=pad_width, + constant_values=self.kernel.padding_value) + elif size_diff < 0 \ + and self.border_types[i] == BorderType.CIRCULAR: + delete_front = pad_width[i, 1] + delete_back = pad_width[i, 0] + kernel_weights = np.delete(kernel_weights, + range(delete_front), + axis=i) + kernel_weights = np.delete(kernel_weights, + range(-delete_back, 0), + axis=i) + + # Compute the center of the kernel + kernel_center = np.floor(np.array(kernel_weights.shape) / 2.0) + + # Iterate over the shape of the input population + for index, _ in np.ndenumerate(np.zeros(shape)): + # Compute how much the kernel must be shifted to bring its + # center to the correct position + shift = kernel_center.astype(int) - np.array(index, + dtype=int) + + conn_weights = kernel_weights + + # Shift the weights depending on the border method + for i in range(num_dims): + if self.border_types[i] == BorderType.CIRCULAR: + conn_weights = np.roll(conn_weights, -shift[i], axis=i) + elif self.border_types[i] == BorderType.PADDED: + conn_weights = \ + self._shift_fill(conn_weights, + -shift[i], + axis=i, + fill_value=self.kernel.padding_value) + + # If the connection weight matrix is too large for the + # population... + size_diff = shape[i] - np.size(conn_weights, axis=i) + if size_diff < 0: + # ...delete the overflowing elements + conn_weights = np.delete(conn_weights, + range(-np.abs(size_diff), 0), + axis=i) + + # Flatten kernel matrix + if num_dims > 1: + conn_weights = np.ravel(conn_weights) + + # Fill the connectivity matrix + flat_index = np.ravel_multi_index(index, shape) + connectivity_matrix[flat_index, :] = conn_weights + + return connectivity_matrix + + @staticmethod + def _shift_fill(array: np.ndarray, + shift: int, + axis: int = 0, + fill_value: float = 0) -> np.ndarray: + """ + Shift an array along a given axis, filling the empty elements. + + Parameters + ---------- + array : numpy.ndarray + the array to be shifted + shift : int + number of elements to shift + axis : int + axis along which the array is shifted + fill_value: float + value that will fill up empty elements in the shifted array + + Returns + ------- + shifted array : numpy.ndarray + + """ + if shift != 0: + if axis > array.ndim - 1: + raise IndexError(f"axis {axis} does not exist for array of " + f"shape {array.shape}") + + array = np.swapaxes(array, 0, axis) + shifted_array = np.empty_like(array) + + if shift < 0: + shifted_array[shift:, ...] = fill_value + shifted_array[:shift, ...] = array[-shift:, ...] + elif shift > 0: + shifted_array[:shift, ...] = fill_value + shifted_array[shift:, ...] = array[:-shift, ...] + + shifted_array = np.swapaxes(shifted_array, axis, 0) + + return shifted_array + else: + return array diff --git a/src/lava/lib/dnf/operations/shape_handlers.py b/src/lava/lib/dnf/operations/shape_handlers.py new file mode 100644 index 0000000..a36db93 --- /dev/null +++ b/src/lava/lib/dnf/operations/shape_handlers.py @@ -0,0 +1,264 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +from abc import ABC, abstractmethod +import typing as ty +import numpy as np + +from lava.lib.dnf.utils.convenience import num_neurons +from lava.lib.dnf.operations.exceptions import MisconfiguredOpError + +from lava.lib.dnf.utils.convenience import num_dims + + +class AbstractShapeHandler(ABC): + """ + Abstract class for handling input and output shape of the + AbstractOperation class. + + """ + def __init__(self) -> None: + self._input_shape = None + self._output_shape = None + + def configure(self, + input_shape: ty.Tuple[int, ...]) -> None: + """ + Configures the input and output shape of an operation given an input + shape. + + Parameters + ---------- + input_shape : tuple(int) + input shape of an operation + + """ + self._validate_input_shape(input_shape) + self._input_shape = input_shape + # Validate any arguments that subclass shape handlers may receive + self._validate_args() + self._compute_output_shape() + + def assert_configured(self) -> None: + """Assert that input and output shape is configured.""" + if self._input_shape is None or self._output_shape is None: + raise AssertionError("_input_shape and _output_shape " + "should not be None") + + @property + def output_shape(self) -> ty.Tuple[int, ...]: + """Return the output shape of the handler""" + return self._output_shape + + @property + def input_shape(self) -> ty.Tuple[int, ...]: + """Return the input shape of the handler""" + return self._input_shape + + @abstractmethod + def _compute_output_shape(self) -> None: + pass + + @abstractmethod + def _validate_args(self) -> None: + pass + + @abstractmethod + def _validate_input_shape(self, + input_shape: ty.Tuple[int, ...]) -> None: + pass + + +class KeepShapeHandler(AbstractShapeHandler): + """Shape handler for operations that do not change the shape of the + input.""" + def _compute_output_shape(self) -> None: + self._output_shape = self._input_shape + + def _validate_args(self) -> None: + pass + + def _validate_input_shape(self, + input_shape: ty.Tuple[int, ...]) -> None: + pass + + +class ReduceDimsHandler(AbstractShapeHandler): + """ + Shape handler for operations that reduce the dimensionality of the + input. + + Parameters + ---------- + reduce_dims : int or tuple(int) + indices of the dimensions to remove + """ + def __init__(self, + reduce_dims: ty.Union[int, ty.Tuple[int, ...]]) -> None: + super().__init__() + if isinstance(reduce_dims, int): + reduce_dims = (reduce_dims,) + self._reduce_dims = reduce_dims + + @property + def reduce_dims(self) -> ty.Tuple[int, ...]: + """Return the output shape of the handler""" + return self._reduce_dims + + def _compute_output_shape(self) -> None: + self._output_shape = tuple(np.delete(np.asarray(self._input_shape), + self.reduce_dims)) + if self._output_shape == (): + self._output_shape = (1,) + + def _validate_input_shape(self, input_shape: ty.Tuple[int, ...]) -> None: + if num_dims(input_shape) == 0: + raise MisconfiguredOpError("ReduceDims shape handler is " + "configured with an input shape that " + "is already zero-dimensional") + + def _validate_args(self) -> None: + """Validate the argument""" + if len(self.reduce_dims) == 0: + raise ValueError(" may not be empty") + + if len(self.reduce_dims) > len(self._input_shape): + raise ValueError(f"given {self.reduce_dims} has " + f"more entries than the shape of the input " + f"{self._input_shape}") + + for idx in self.reduce_dims: + # Compute the positive index irrespective of the sign of 'idx' + idx_positive = len(self._input_shape) + idx if idx < 0 else idx + # Make sure the positive index is not out of bounds + if idx_positive < 0 or idx_positive >= len(self._input_shape): + raise IndexError(f" value {idx} is out of bounds " + f"for array of size {len(self._input_shape)}") + + +class ExpandDimsHandler(AbstractShapeHandler): + """Shape handler for operations that expand the dimensionality. New + dimensions (axes) will be appended to the already existing ones of the + input. Their sizes must be specified using the argument. + + Parameters + ---------- + new_dims_shape : int or tuple(int) + shape of the added dimensions; they will be appended to the shape + of the input, for instance an input shape (2,) and + new_dims_shape=(6, 8) will produce an output shape (2, 6, 8) + """ + def __init__(self, + new_dims_shape: ty.Union[int, ty.Tuple[int, ...]]) -> None: + super().__init__() + if isinstance(new_dims_shape, int): + new_dims_shape = (new_dims_shape,) + self._new_dims_shape = new_dims_shape + + @property + def new_dims_shape(self) -> ty.Tuple[int, ...]: + """Return the attribute""" + return self._new_dims_shape + + def _compute_output_shape(self) -> None: + if num_dims(self.input_shape) == 0: + self._output_shape = self.new_dims_shape + else: + self._output_shape = self.input_shape + self.new_dims_shape + + if len(self._output_shape) > 3: + raise NotImplementedError("ExpandDims operation is configured to " + "produce an output shape with " + "dimensionality larger than 3; higher " + "dimensionality is currently not " + "supported") + + def _validate_args(self) -> None: + """Validate the argument""" + if len(self.new_dims_shape) == 0: + raise ValueError(" may not be empty") + + if any(s < 1 for s in self.new_dims_shape): + raise ValueError("values in may not be smaller " + "than 1") + + def _validate_input_shape(self, input_shape: ty.Tuple[int, ...]) -> None: + pass + + +class ReshapeHandler(AbstractShapeHandler): + """Shape handler for operations that reshape the input, changing + the shape but keeping the number of elements constant. + + Parameters + ---------- + output_shape : tuple(int) + output shape of an operation + + """ + def __init__(self, output_shape: ty.Tuple[int, ...]) -> None: + super().__init__() + self._output_shape = output_shape + + def _validate_args(self) -> None: + if num_neurons(self._input_shape) != num_neurons(self._output_shape): + raise MisconfiguredOpError("input and output shape must have the " + "same number of elements") + + def _compute_output_shape(self) -> None: + pass + + def _validate_input_shape(self, input_shape: ty.Tuple[int, ...]) -> None: + pass + + +class ReorderHandler(AbstractShapeHandler): + """Shape handler for operations that reorder the input shape. + + Parameters + ---------- + order : tuple(int) + order of the dimensions of the output; for instance if the input shape + is (1, 2, 3) and order=(0, 2, 1), the output shape will be (1, 3, 2); + must have the same number of elements as the input and output shape + + """ + def __init__(self, order: ty.Tuple[int, ...]) -> None: + super().__init__() + self._order = order + + @property + def order(self) -> ty.Tuple[int, ...]: + """Return the order of the handler""" + return self._order + + def _compute_output_shape(self) -> None: + input_shape = np.array(self._input_shape) + self._output_shape = tuple(input_shape[list(self._order)]) + + def _validate_args(self) -> None: + """Validate the argument""" + num_dims_in = num_dims(self._input_shape) + + if len(self._order) != num_dims_in: + raise MisconfiguredOpError(" must have the same number of " + "entries as the input shape: " + f"len({self._order}) != len(" + f"{self._input_shape})") + + for idx in self._order: + # Compute the positive index irrespective of the sign of 'idx' + idx_positive = len(self._input_shape) + idx if idx < 0 else idx + # Make sure the positive index is not out of bounds + if idx_positive < 0 or idx_positive >= len(self._input_shape): + raise IndexError(f" value {idx} is out of bounds " + f"for array of size {len(self._input_shape)}") + + def _validate_input_shape(self, input_shape: ty.Tuple[int, ...]) -> None: + num_dims_in = num_dims(input_shape) + + if num_dims_in < 2: + raise MisconfiguredOpError("the input dimensionality " + "is smaller than 2; there are no " + "dimensions to reorder") diff --git a/src/lava/lib/dnf/utils/convenience.py b/src/lava/lib/dnf/utils/convenience.py new file mode 100644 index 0000000..b42b3fe --- /dev/null +++ b/src/lava/lib/dnf/utils/convenience.py @@ -0,0 +1,72 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import typing as ty + + +def num_neurons(shape: ty.Tuple[int, ...]) -> int: + """ + Computes the number of neurons from a shape. + + Parameters: + ----------- + shape : tuple(int) + shape of a neural population (or input) + + Returns: + -------- + num_neurons : int + number of neurons + """ + return int(np.prod(shape)) + + +def num_dims(shape: ty.Tuple[int, ...]) -> int: + """ + Computes the dimensionality of a shape, assuming that (1,) represents + a zero-dimensional shape. + + Parameters + ---------- + shape : tuple(int) + shape of a population of neurons + + Returns + ------- + number of dimensions : int + """ + # Assume dimensionality 0 if there is only a single neuron + dims = 0 if num_neurons(shape) == 1 else len(shape) + + return dims + + +def to_ndarray( + x: ty.Union[float, ty.Tuple, ty.List, np.ndarray] +) -> np.ndarray: + """ + Converts float, tuple, or list variables to numpy.ndarray. + + Parameters + ---------- + x : float, tuple, list, numpy.ndarray + variable to convert + + Returns + ------- + return : numpy.ndarray + input converted to numpy.ndarray + + """ + if not isinstance(x, np.ndarray): + if np.isscalar(x): + return np.array([x], np.float32) + if isinstance(x, tuple) or isinstance(x, list): + return np.array(x) + else: + raise TypeError("Variable must be of one of the following types: " + "float, tuple, list or numpy.ndarray") + else: + return x diff --git a/src/lava/lib/dnf/utils/math.py b/src/lava/lib/dnf/utils/math.py new file mode 100644 index 0000000..aaeb21c --- /dev/null +++ b/src/lava/lib/dnf/utils/math.py @@ -0,0 +1,113 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import typing as ty +import scipy.stats + + +def gauss(shape: ty.Tuple[int, ...], + domain: ty.Optional[np.ndarray] = None, + amplitude: ty.Optional[float] = 1.0, + mean: ty.Optional[ty.Union[float, np.ndarray]] = None, + stddev: ty.Optional[ty.Union[float, np.ndarray]] = None + ) -> np.ndarray: + """ + Evaluates the Gaussian function over a specified domain in multiple + dimensions. + + If a domain is specified, the function will evaluate the Gaussian at + linearly interpolated values between the lower and upper bounds of the + domain. The number of samples is determined by the shape parameter. For + example, for given parameters shape=(5,) and domain=[[-5, -1]], it will + evaluate the function at positions -5,-4,-3,-2,-1. + + If no domain is specified, the function will evaluate the Gaussian at the + indices of the sampling points. For instance, for a given shape of + shape=(5,), it will evaluate the function at positions 0,1,2,3,4. + + Parameters + ---------- + shape : tuple(int) + number of sampling points along each dimension + domain : numpy.ndarray, optional + lower and upper bound of input values for each dimension at which + the Gaussian function is evaluated + amplitude : float, optional + amplitude of the Gaussian, defaults to 1 + mean : numpy.ndarray, optional + mean of the Gaussian, defaults to 0 + stddev : numpy.ndarray, optional + standard deviation of the Gaussian, defaults to 1 + + Returns + ------- + gaussian : numpy.ndarray + multi-dimensional array with samples of the Gaussian + """ + # Dimensionality of the Gaussian + dimensionality = len(shape) + + # Domain defaults to the indices of the sampling points + if domain is None: + domain = np.zeros((dimensionality, 2)) + domain[:, 1] = np.array(shape[:]) - 1 + else: + if isinstance(domain, np.ndarray) and domain.shape != (len(shape), 2): + raise ValueError("the shape of is incompatible with " + "the specified ; should be of " + f"shape ({len(shape)}, 2) but is {domain.shape}") + + # Mean defaults to 0 + if mean is None: + mean = np.zeros((dimensionality,)) + else: + if isinstance(mean, np.ndarray) and mean.shape != (len(shape),): + raise ValueError("the shape of is incompatible with " + "the specified ; should be of " + f"shape ({len(shape)},) but is {mean.shape}") + + # Standard deviation defaults to 1 + if stddev is None: + stddev = np.ones((dimensionality,)) + else: + if isinstance(stddev, np.ndarray) and stddev.shape != (len(shape),): + raise ValueError("the shape of is incompatible with " + "the specified ; should be of " + f"shape ({len(shape)},) but is {stddev.shape}") + + # Create linear spaces for each dimension + linspaces = [] + for i in range(dimensionality): + linspaces.append(np.linspace(domain[i, 0], domain[i, 1], shape[i])) + + # Arrange linear spaces into a meshgrid + linspaces = np.array(linspaces, dtype=object) + grid = np.meshgrid(*linspaces, copy=False) + grid = np.array(grid) + + # Swap axes to get around perculiarity of meshgrid + if dimensionality > 1: + grid = np.swapaxes(grid, 1, 2) + + # Reshape meshgrid to fit multi-variate normal + grid = np.moveaxis(grid, 0, -1) + + # Compute normal probability density function + mv_normal_pdf = scipy.stats.multivariate_normal.pdf(grid, + mean=mean, + cov=stddev) + # Normalize probability density function and apply amplitude + gaussian = amplitude * mv_normal_pdf / np.max(mv_normal_pdf) + + return gaussian + + +def is_odd(n: int) -> bool: + """ + Checks whether n is an odd number. + + :param int n: number to check + :returns bool: True if is an odd number""" + return bool(n & 1) diff --git a/src/lava/lib/dnf/utils/plotting.py b/src/lava/lib/dnf/utils/plotting.py new file mode 100644 index 0000000..ecb3522 --- /dev/null +++ b/src/lava/lib/dnf/utils/plotting.py @@ -0,0 +1,123 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import scipy.ndimage +import numpy as np +import typing as ty +import matplotlib.pyplot as plt +import matplotlib as mpl + + +def compute_spike_rates(spike_data: np.ndarray, + window_size: ty.Optional[int] = 11) -> np.ndarray: + """ + Computes instantaneous spike rates for all neurons over time + + This method uses the window_size parameter to derive a kernel with which + it convolves the spike_data. + Yields an array of the same shape, with each value representing the spike + rate of a neuron over the specified time window. + + Parameters + ---------- + spike_data : numpy.ndarray + spike data of dtype=bool (spike: 1, no-spike: 0) and + shape (num_time_steps, num_neurons) + window_size : int, optional + size of the time window in number of time steps + + Returns + ------- + spike_rates : numpy.ndarray + array of same shape as spike_data which represents the instantaneous + spike rate of every neuron at every time step + """ + # Compute spike rates for each time window + kernel = np.ones((window_size, 1)) / window_size + spike_rates = scipy.ndimage.convolve(spike_data, kernel) + + return spike_rates + + +def _compute_colored_spike_coordinates(spike_data: np.ndarray, + spike_rates: np.ndarray) -> \ + ty.Tuple[ty.List[int], ty.List[int], ty.List[float]]: + """ + Computes coordinates of each spike to be shown in a raster plot, along + with a color translating the instantaneous spike rate of the neuron + at the time where it spiked. + + Parameters + ---------- + spike_data : numpy.ndarray + spike data of dtype=bool (spike: 1, no-spike: 0) and + shape (num_time_steps, num_neurons) + spike_rates : numpy.ndarray + array of same shape as spike_data which represents the instantaneous + spike rate of every neuron at every time step + + Returns + ------- + x : list(int) + list of x coordinates of all spikes in the to-be shown plot + y : list(int) + list of y coordinates of all spikes in the to-be shown plot + colors : list(float) + list of colors (based on instantaneous spike rates) of all spikes in + the to-be shown plot + """ + num_time_steps = spike_data.shape[0] + # Generate a representation of spike times + time_array = np.arange(1, num_time_steps + 1) + + # Lists to hold the x and y values of the plot + x = [] + y = [] + colors = [] + + for time_idx, time_step in enumerate(time_array): + # Determine all neurons that spiked in this time step + spiking_neuron_idx, = np.where(spike_data[time_idx, :] == 1.0) + + # Add the spike rate values of the spiking neurons at the current + # time step to c + colors.extend(spike_rates[time_idx, spiking_neuron_idx]) + + spiking_neuron_idx = spiking_neuron_idx.tolist() + # If any neurons spiked... + if len(spiking_neuron_idx) > 0: + # ...add the index of all spiking neurons to y + y.extend(spiking_neuron_idx) + # ...add the current time step to x (as many times as there are + # spiking neurons) + x.extend(len(spiking_neuron_idx) * [time_step]) + + return x, y, colors + + +def raster_plot(spike_data: np.ndarray, + window_size: ty.Optional[int] = 11) -> None: + """ + Creates a raster plot, showing the spikes of all neurons over time. + + The plot will use color to express the spike rate within a time window + determined by rate_window (specified in number of time steps). + + Parameters + ---------- + spike_data : numpy.ndarray + spike data of dtype=bool (spike: 1, no-spike: 0) and + shape (num_time_steps, num_neurons) + window_size : int, optional + size of the time window in number of time steps + """ + spike_rates = compute_spike_rates(spike_data, window_size) + + x, y, colors = _compute_colored_spike_coordinates(spike_data, spike_rates) + + # Generate the plot + mpl.rcParams['lines.linewidth'] = 0.5 + mpl.rcParams['lines.antialiased'] = False + + plt.scatter(x, y, c=colors, marker='|', s=5) diff --git a/src/lava/lib/dnf/utils/validation.py b/src/lava/lib/dnf/utils/validation.py new file mode 100644 index 0000000..80e7809 --- /dev/null +++ b/src/lava/lib/dnf/utils/validation.py @@ -0,0 +1,49 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import typing as ty + + +def validate_shape(shape: ty.Any) -> ty.Tuple[int, ...]: + """ + Validate and potentially convert shape parameter. + + The shape of different elements of the DNF library can be passed in as + type tuple(int) or list(int) for multiple dimensions, or type int for a + single dimension. In all cases, it is converted to tuple(int). + + Parameters: + ----------- + shape : tuple(int) or list(int) + shape parameter to be validated + + Returns: + -------- + shape : tuple(int) + validated and converted shape parameter + """ + if shape is None: + raise AssertionError(" may not be None") + + # Convert single int values to a tuple + if isinstance(shape, int): + shape = (shape,) + # Check whether all elements in the tuple (or list) are of type int + # and positive + if isinstance(shape, tuple) or isinstance(shape, list): + for s in shape: + if not isinstance(s, (int, np.integer)): + raise TypeError("all elements of must be of type int") + if s < 0: + raise ValueError("all elements of must be greater " + "than zero") + # Convert list to tuple + if isinstance(shape, list): + shape = tuple(shape) + # If is not yet a tuple, raise a TypeError + if not isinstance(shape, tuple): + raise TypeError(" must be of type int or tuple(int)") + + return shape diff --git a/src/lava/__init__.py b/tests/__init__.py similarity index 100% rename from src/lava/__init__.py rename to tests/__init__.py diff --git a/tests/lava/lib/__init__.py b/tests/lava/lib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/lava/lib/dnf/__init__.py b/tests/lava/lib/dnf/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/lava/lib/dnf/acceptance/__init__.py b/tests/lava/lib/dnf/acceptance/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/lava/lib/dnf/acceptance/test_connecting_with_ops.py b/tests/lava/lib/dnf/acceptance/test_connecting_with_ops.py new file mode 100644 index 0000000..c43a908 --- /dev/null +++ b/tests/lava/lib/dnf/acceptance/test_connecting_with_ops.py @@ -0,0 +1,267 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np + +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg +from lava.proc.lif.process import LIF + +from lava.lib.dnf.connect.connect import connect +from lava.lib.dnf.kernels.kernels import SelectiveKernel, MultiPeakKernel +from lava.lib.dnf.operations.operations import Weights, ReduceDims, Reorder, \ + ExpandDims, Convolution + + +class TestConnectingWithOperations(unittest.TestCase): + def test_running_reorder(self) -> None: + """Tests executing a architecture with multi-dimensional input that + gets reshaped (here, reordered).""" + num_steps = 10 + shape_src = (5, 3) + shape_dst = (3, 5) + + bias = np.zeros(shape_src) + bias[:, 0] = 5000 + src = LIF(shape=shape_src, bias=bias, bias_exp=np.ones(shape_src)) + dst = LIF(shape=shape_dst) + + weight = 20 + connect(src.s_out, dst.a_in, ops=[Weights(weight), + Reorder(order=(1, 0))]) + src.run(condition=RunSteps(num_steps=num_steps), + run_cfg=Loihi1SimCfg(select_tag='floating_pt')) + + computed_dst_u = dst.vars.u.get() + expected_dst_u = np.zeros(shape_dst) + expected_dst_u[0, :] = 180. + + src.stop() + + self.assertEqual(src.runtime.current_ts, num_steps) + self.assertTrue(np.array_equal(computed_dst_u, expected_dst_u)) + + def test_connect_population_with_weights_op(self) -> None: + """Tests whether populations can be connected using the Weights + operation.""" + for shape in [(1,), (5,), (5, 5), (5, 5, 5)]: + source = LIF(shape=shape) + destination = LIF(shape=shape) + weights = Weights(5.0) + connect(source.s_out, destination.a_in, ops=[weights]) + + def test_connect_population_3d_to_2d_with_reduce_dims_and_reorder(self)\ + -> None: + """Tests whether reducing dimensions together with reordering works + when going from 3D to 2D.""" + reduce_dims = [(2,), (1,), (2,), (1,), (0,), (0,)] + orders = [(0, 1), (0, 1), (1, 0), (1, 0), (0, 1), (1, 0)] + + matrices = [np.array([[1, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 1]]), + np.array([[1, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 0, 1]]), + np.array([[1, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 1]]), + np.array([[1, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 1]]), + np.array([[1, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0, 0, 1]]), + np.array([[1, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 1]])] + + for dims, order, expected in zip(reduce_dims, orders, matrices): + source = LIF(shape=(2, 2, 2)) + destination = LIF(shape=(2, 2)) + reorder_op = Reorder(order=order) + reduce_op = ReduceDims(reduce_dims=dims) + computed = connect(source.s_out, + destination.a_in, + ops=[reduce_op, reorder_op]) + + self.assertTrue(np.array_equal(computed.weights.get(), expected)) + + def test_connect_population_2d_to_3d_with_expand_dims_and_reorder(self)\ + -> None: + """Tests whether expanding dimensions together with reordering works + when going from 2D to 3D.""" + orders = [(0, 1, 2), + (0, 2, 1), + (1, 0, 2), + (1, 2, 0), + (2, 0, 1), + (2, 1, 0)] + + matrices = [np.array([[1, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 1]]), + np.array([[1, 0, 0, 0], + [0, 1, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 1, 0], + [0, 0, 0, 1]]), + np.array([[1, 0, 0, 0], + [1, 0, 0, 0], + [0, 0, 1, 0], + [0, 0, 1, 0], + [0, 1, 0, 0], + [0, 1, 0, 0], + [0, 0, 0, 1], + [0, 0, 0, 1]]), + np.array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]]), + np.array([[1, 0, 0, 0], + [0, 0, 1, 0], + [1, 0, 0, 0], + [0, 0, 1, 0], + [0, 1, 0, 0], + [0, 0, 0, 1], + [0, 1, 0, 0], + [0, 0, 0, 1]]), + np.array([[1, 0, 0, 0], + [0, 0, 1, 0], + [0, 1, 0, 0], + [0, 0, 0, 1], + [1, 0, 0, 0], + [0, 0, 1, 0], + [0, 1, 0, 0], + [0, 0, 0, 1]])] + + for order, expected in zip(orders, matrices): + source = LIF(shape=(2, 2)) + destination = LIF(shape=(2, 2, 2)) + reorder_op = Reorder(order=order) + expand_op = ExpandDims(new_dims_shape=(2,)) + computed = connect(source.s_out, + destination.a_in, + ops=[expand_op, reorder_op]) + + self.assertTrue(np.array_equal(computed.weights.get(), expected)) + + def test_connect_population_1d_to_3d_with_expand_dims_and_reorder(self) \ + -> None: + """Tests whether expanding dimensions together with reordering works + when going from 1D to 3D.""" + orders = [(0, 1, 2), + (1, 0, 2), + (2, 1, 0), + (0, 2, 1), + (2, 0, 1), + (1, 2, 0)] + + matrices = [np.array([[1, 0], + [1, 0], + [1, 0], + [1, 0], + [0, 1], + [0, 1], + [0, 1], + [0, 1]]), + np.array([[1, 0], + [1, 0], + [0, 1], + [0, 1], + [1, 0], + [1, 0], + [0, 1], + [0, 1]]), + np.array([[1, 0], + [0, 1], + [1, 0], + [0, 1], + [1, 0], + [0, 1], + [1, 0], + [0, 1]]), + np.array([[1, 0], + [1, 0], + [1, 0], + [1, 0], + [0, 1], + [0, 1], + [0, 1], + [0, 1]]), + np.array([[1, 0], + [0, 1], + [1, 0], + [0, 1], + [1, 0], + [0, 1], + [1, 0], + [0, 1]]), + np.array([[1, 0], + [1, 0], + [0, 1], + [0, 1], + [1, 0], + [1, 0], + [0, 1], + [0, 1]])] + + for order, expected in zip(orders, matrices): + source = LIF(shape=(2,)) + destination = LIF(shape=(2, 2, 2)) + reorder_op = Reorder(order=order) + expand_op = ExpandDims(new_dims_shape=(2, 2)) + computed = connect(source.s_out, + destination.a_in, + ops=[expand_op, reorder_op]) + + self.assertTrue(np.array_equal(computed.weights.get(), expected)) + + def test_connect_population_with_selective_kernel(self) -> None: + """Tests whether populations can be connected to themselves using the + Convolution operation and a SelectiveKernel.""" + for shape in [(1,), (5,), (5, 5), (5, 5, 5)]: + population = LIF(shape=shape) + kernel = SelectiveKernel(amp_exc=1.0, + width_exc=[2] * len(shape), + global_inh=-0.1) + connect(population.s_out, + population.a_in, + ops=[Convolution(kernel)]) + + def test_connect_population_with_multi_peak_kernel(self) -> None: + """Tests whether populations can be connected to themselves using the + Convolution operation and a MultiPeakKernel.""" + for shape in [(1,), (5,), (5, 5), (5, 5, 5)]: + population = LIF(shape=shape) + kernel = MultiPeakKernel(amp_exc=1.0, + width_exc=[2] * len(shape), + amp_inh=-0.5, + width_inh=[4] * len(shape)) + connect(population.s_out, + population.a_in, + ops=[Convolution(kernel)]) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/acceptance/test_gauss_spike_generator.py b/tests/lava/lib/dnf/acceptance/test_gauss_spike_generator.py new file mode 100644 index 0000000..c69a448 --- /dev/null +++ b/tests/lava/lib/dnf/acceptance/test_gauss_spike_generator.py @@ -0,0 +1,97 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np +import typing as ty + +from lava.magma.core.decorator import implements, requires, tag +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.magma.core.model.py.ports import PyInPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.process.ports.ports import InPort +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.variable import Var +from lava.magma.core.resources import CPU +from lava.magma.core.run_configs import Loihi1SimCfg +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol + +from lava.lib.dnf.inputs.gauss_pattern.process import GaussPattern +from lava.lib.dnf.inputs.rate_code_spike_gen.process import RateCodeSpikeGen + + +class SinkProcess(AbstractProcess): + """ + Process that receives arbitrary vectors + + Parameters + ---------- + shape: tuple, shape of the process + """ + + def __init__(self, **kwargs: ty.Tuple[int, ...]) -> None: + super().__init__(**kwargs) + shape = kwargs.get("shape") + + self.data = Var(shape=shape, init=np.nan) + + self.s_in = InPort(shape=(shape[0],)) + + +@implements(proc=SinkProcess, protocol=LoihiProtocol) +@requires(CPU) +@tag('floating_pt') +class SinkProcessModel(PyLoihiProcessModel): + data: np.ndarray = LavaPyType(np.ndarray, float) + + s_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, bool) + + def run_spk(self) -> None: + """Receive data and store in an internal variable""" + s_in = self.s_in.recv() + self.data[:, self.current_ts - 1] = s_in + + +class TestGaussRateCodeSpikeGen(unittest.TestCase): + def test_rate_code_spike_gen_receiving_gauss_pattern(self) -> None: + """Tests whether the SpikeGenerator Process works as expected in + combination with the GaussPattern Process, producing spikes that are + centered around neuron 1 for 10 time steps, and then switching to + spikes centered around neuron 3 for subsequent 10 time steps.""" + num_steps_per_pattern = 10 + shape = (5,) + expected_spikes = np.array( + [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] + ) + + gauss_pattern = GaussPattern(shape=shape, amplitude=1500.0, mean=1, + stddev=0.2) + spike_gen = RateCodeSpikeGen(shape=shape, seed=42) + sink_process = SinkProcess(shape=(shape[0], num_steps_per_pattern * 2)) + + gauss_pattern.out_ports.a_out.connect(spike_gen.in_ports.a_in) + spike_gen.out_ports.s_out.connect(sink_process.in_ports.s_in) + + run_condition = RunSteps(num_steps=num_steps_per_pattern) + run_cfg = Loihi1SimCfg() + + try: + spike_gen.run(condition=run_condition, run_cfg=run_cfg) + gauss_pattern.mean = 3 + spike_gen.run(condition=run_condition, run_cfg=run_cfg) + + received_spikes = sink_process.data.get() + np.testing.assert_array_equal(received_spikes, + expected_spikes) + finally: + spike_gen.stop() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/connect/__init__.py b/tests/lava/lib/dnf/connect/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/lava/lib/dnf/connect/test_connect.py b/tests/lava/lib/dnf/connect/test_connect.py new file mode 100644 index 0000000..1c0302e --- /dev/null +++ b/tests/lava/lib/dnf/connect/test_connect.py @@ -0,0 +1,242 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np +import typing as ty + +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.process import AbstractProcess + +from lava.lib.dnf.connect.connect import connect +from lava.lib.dnf.connect.exceptions import MisconfiguredConnectError +from lava.lib.dnf.operations.operations import AbstractOperation +from lava.lib.dnf.operations.shape_handlers import AbstractShapeHandler,\ + KeepShapeHandler +from lava.lib.dnf.operations.exceptions import MisconfiguredOpError +from lava.lib.dnf.utils.convenience import num_neurons + + +class MockProcess(AbstractProcess): + """Mock Process with an InPort and OutPort""" + def __init__(self, shape: ty.Tuple[int, ...] = (1,)) -> None: + super().__init__() + self.a_in = InPort(shape) + self.s_out = OutPort(shape) + + +class MockNoChangeOperation(AbstractOperation): + """Mock Operation that does not change shape""" + def __init__(self) -> None: + super().__init__(shape_handler=KeepShapeHandler()) + + def _compute_weights(self) -> np.ndarray: + return np.eye(num_neurons(self.output_shape), + num_neurons(self.input_shape), + dtype=np.int32) + + +class MockShapeHandler(AbstractShapeHandler): + """Mock ShapeHandler for an operation that changes the shape.""" + def __init__(self, output_shape: ty.Tuple[int, ...]) -> None: + super().__init__() + self._output_shape = output_shape + + def _validate_args(self) -> None: + if self.input_shape == self.output_shape: + raise MisconfiguredOpError("operation is intended to change shape") + + def _compute_output_shape(self) -> None: + pass + + def _validate_input_shape(self, input_shape: ty.Tuple[int, ...]) -> None: + pass + + +class MockChangeOperation(AbstractOperation): + """Mock Operation that changes shape""" + def __init__(self, output_shape: ty.Tuple[int, ...]) -> None: + super().__init__(MockShapeHandler(output_shape)) + + def _compute_weights(self) -> np.ndarray: + return np.eye(num_neurons(self.output_shape), + num_neurons(self.input_shape), + dtype=np.int32) + + +class TestConnect(unittest.TestCase): + def test_connect_function_exists_and_is_callable(self) -> None: + """Tests whether the connect function exists and is callable.""" + import lava + self.assertTrue(callable(getattr(lava.lib.dnf.connect.connect, + 'connect'))) + + def _test_connections(self, + source: MockProcess, + destination: MockProcess, + connections: AbstractProcess) -> None: + """For a given source, destination, and connections Processes, + tests whether they have been connected.""" + + # check whether the connect function returns a process + self.assertIsInstance(connections, AbstractProcess) + + # check whether 'source' is connected to 'connections' + src_op = source.out_ports.s_out + con_ip = connections.in_ports.s_in + # TODO (MR): remove this after switching to Reshape ports + rs1_op = src_op.get_dst_ports()[0].process.out_ports.s_out + self.assertEqual(rs1_op.get_dst_ports(), [con_ip]) + self.assertEqual(con_ip.get_src_ports(), [rs1_op]) + + # check whether 'connections' is connected to 'target' + con_op = connections.out_ports.a_out + dst_op = destination.in_ports.a_in + # TODO (MR): remove this after switching to Reshape ports + rs2_op = con_op.get_dst_ports()[0].process.out_ports.s_out + self.assertEqual(rs2_op.get_dst_ports(), [dst_op]) + self.assertEqual(dst_op.get_src_ports(), [rs2_op]) + + def test_connecting_with_op_that_does_not_change_shape(self) -> None: + """Tests connecting a source Process to a destination Process.""" + # create mock processes and an operation to connect + source = MockProcess(shape=(1, 2, 3)) + destination = MockProcess(shape=(1, 2, 3)) + op = MockNoChangeOperation() + + # connect source to target + connections = connect(source.s_out, destination.a_in, ops=[op]) + + self._test_connections(source, destination, connections) + + def test_connecting_without_ops(self) -> None: + """Tests connecting a source Process to a destination Process + without specifying any operations.""" + # create mock processes of the same shape + shape = (1, 2, 3) + source = MockProcess(shape=shape) + destination = MockProcess(shape=shape) + + # connect source to target + connections = connect(source.s_out, destination.a_in) + + # default connection weights should be the identity matrix + np.testing.assert_array_equal(connections.weights.get(), + np.eye(int(np.prod(shape)))) + + self._test_connections(source, destination, connections) + + def test_connecting_different_shapes_without_ops_raises_error(self) -> None: + """Tests whether an exception is raised when trying to connect two + Processes that have different shapes while not specifying any + operations.""" + # create mock processes of different shapes + source = MockProcess(shape=(1, 2, 3)) + destination = MockProcess(shape=(3, 2, 1)) + + with self.assertRaises(MisconfiguredConnectError): + connect(source.s_out, destination.a_in) + + def test_empty_operations_list_raises_value_error(self) -> None: + """Tests whether an empty argument raises a value error.""" + with self.assertRaises(ValueError): + connect(MockProcess().s_out, MockProcess().a_in, ops=[]) + + def test_ops_list_containing_invalid_type_raises_type_error(self) -> None: + """Tests whether the type of all elements in is validated.""" + class NotAnOperation: + pass + + with self.assertRaises(TypeError): + connect(MockProcess().s_out, + MockProcess().a_in, + ops=[MockNoChangeOperation(), NotAnOperation()]) + + def test_single_operation_is_automatically_wrapped_into_list(self) -> None: + """Tests whether a single operation is wrapped into a list.""" + connect(MockProcess().s_out, + MockProcess().a_in, + ops=MockNoChangeOperation()) + + def test_operation_that_changes_the_shape(self) -> None: + """Tests whether an operation that changes shape can be used to + connect two Processes.""" + output_shape = (3,) + connect(MockProcess((5, 3)).s_out, + MockProcess(output_shape).a_in, + ops=MockChangeOperation(output_shape=output_shape)) + + def test_mismatching_op_output_shape_and_dest_shape_raises_error(self)\ + -> None: + """Tests whether an error is raised when the output shape of the + last operation does not match the destination shape.""" + with self.assertRaises(MisconfiguredConnectError): + connect(MockProcess((5, 3)).s_out, + MockProcess((5,)).a_in, + ops=[MockNoChangeOperation()]) + + def test_combining_multiple_ops_that_do_not_change_shape(self) -> None: + """Tests whether multiple operations can be specified that do not + change the shape.""" + connect(MockProcess((5, 3)).s_out, + MockProcess((5, 3)).a_in, + ops=[MockNoChangeOperation(), + MockNoChangeOperation()]) + + def test_multiple_non_changing_ops_and_one_that_changes_shape(self) -> None: + """Tests whether an operation that changes the shape + can be combined with multiple operations that do not change the + shape.""" + output_shape = (5,) + connect(MockProcess((5, 3)).s_out, + MockProcess(output_shape).a_in, + ops=[MockNoChangeOperation(), + MockChangeOperation(output_shape), + MockNoChangeOperation()]) + + def test_multiple_ops_that_change_shape(self) -> None: + """Tests whether multiple operations that change shape can be + combined with one that does not change shape.""" + connect(MockProcess((5, 3)).s_out, + MockProcess((2,)).a_in, + ops=[MockNoChangeOperation(), + MockChangeOperation(output_shape=(5, 2)), + MockChangeOperation(output_shape=(2,)), + MockNoChangeOperation()]) + + def test_weights_from_multiple_ops_get_multiplied(self) -> None: + """Tests whether compute_weights() multiplies the weights that are + produced by all specified operations.""" + + class MockNoChangeOpWeights(MockNoChangeOperation): + """Mock Operation that generates an identity matrix with a given + weight.""" + def __init__(self, weight: float) -> None: + super().__init__() + self.weight = weight + + def _compute_weights(self) -> np.ndarray: + return np.eye(num_neurons(self.output_shape), + num_neurons(self.input_shape), + dtype=np.int32) * self.weight + + shape = (5, 3) + w1 = 2 + w2 = 4 + + conn = connect(MockProcess(shape).s_out, + MockProcess(shape).a_in, + ops=[MockNoChangeOpWeights(weight=w1), + MockNoChangeOpWeights(weight=w2)]) + + computed_weights = conn.weights.get() + expected_weights = np.eye(num_neurons(shape), + num_neurons(shape), + dtype=np.int32) * w1 * w2 + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/connect/test_exceptions.py b/tests/lava/lib/dnf/connect/test_exceptions.py new file mode 100644 index 0000000..4dc3fab --- /dev/null +++ b/tests/lava/lib/dnf/connect/test_exceptions.py @@ -0,0 +1,22 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest + +from lava.lib.dnf.connect.exceptions import MisconfiguredConnectError + + +class TestMisconfiguredConnectError(unittest.TestCase): + def test_raising_misconfigured_connect_error(self) -> None: + """Tests whether the MisconfiguredConnectError can be raised.""" + msg = "test message" + with self.assertRaises(MisconfiguredConnectError) as context: + raise MisconfiguredConnectError(msg) + + # check whether the message is set + self.assertEqual(context.exception.args[0], msg) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/inputs/__init__.py b/tests/lava/lib/dnf/inputs/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/lava/lib/dnf/inputs/gauss_pattern/__init__.py b/tests/lava/lib/dnf/inputs/gauss_pattern/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/lava/lib/dnf/inputs/gauss_pattern/test_models.py b/tests/lava/lib/dnf/inputs/gauss_pattern/test_models.py new file mode 100644 index 0000000..d0fdad5 --- /dev/null +++ b/tests/lava/lib/dnf/inputs/gauss_pattern/test_models.py @@ -0,0 +1,122 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np +import typing as ty + +from lava.magma.core.decorator import implements, requires, tag +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.magma.core.model.py.ports import PyInPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.process.ports.ports import InPort +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.variable import Var +from lava.magma.core.resources import CPU +from lava.magma.core.run_configs import Loihi1SimCfg +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol + +from lava.lib.dnf.inputs.gauss_pattern.process import GaussPattern +from lava.lib.dnf.utils.math import gauss + + +class SinkProcess(AbstractProcess): + """ + Process that receives arbitrary vectors + + Parameters + ---------- + shape: tuple, shape of the process + """ + + def __init__(self, **kwargs: ty.Tuple[int, ...]) -> None: + super().__init__(**kwargs) + shape = kwargs.get("shape") + + self.data = Var(shape=shape, init=0) + + self.a_in = InPort(shape=shape) + + +@implements(proc=SinkProcess, protocol=LoihiProtocol) +@requires(CPU) +@tag('floating_pt') +class SinkProcessModel(PyLoihiProcessModel): + data: np.ndarray = LavaPyType(np.ndarray, float) + + a_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, float) + + def run_spk(self) -> None: + """Receive data and store in an internal variable""" + # Receive data from PyInPort + data = self.a_in.recv() + + # If the received pattern is not the null_pattern ... + if not np.isnan(data).any(): + self.data = data + + +class TestGaussPatternProcessModel(unittest.TestCase): + def test_gauss_pattern(self) -> None: + """Tests whether GaussPatternProcessModel computes and sends a gauss + pattern given its parameters.""" + gauss_pattern = GaussPattern(shape=(30, 30), + amplitude=200., + mean=[15., 15.], + stddev=[5., 5.]) + sink_process = SinkProcess(shape=(30, 30)) + gauss_pattern.out_ports.a_out.connect(sink_process.in_ports.a_in) + + gauss_generated_pattern = gauss(shape=(30, 30), + domain=None, + amplitude=200., + mean=np.array([15., 15.]), + stddev=np.array([5., 5.])) + + try: + gauss_pattern.run(condition=RunSteps(num_steps=3), + run_cfg=Loihi1SimCfg()) + + np.testing.assert_array_equal(sink_process.data.get(), + gauss_generated_pattern) + finally: + gauss_pattern.stop() + + def test_change_pattern_triggers_computation_and_send(self) -> None: + """Tests whether GaussPatternProcessModel recomputes a new pattern and + sends it when its parameters are changed. If that's the case, it will + be received by the SinkProcess one timestep later""" + gauss_pattern = GaussPattern(shape=(30, 30), + amplitude=200., + mean=[15., 15.], + stddev=[5., 5.]) + sink_process = SinkProcess(shape=(30, 30)) + gauss_pattern.out_ports.a_out.connect(sink_process.in_ports.a_in) + + gauss_generated_pattern = gauss(shape=(30, 30), + domain=None, + amplitude=100., + mean=np.array([10., 10.]), + stddev=np.array([3., 3.])) + + try: + gauss_pattern.run(condition=RunSteps(num_steps=3), + run_cfg=Loihi1SimCfg()) + + gauss_pattern.amplitude = 100. + gauss_pattern.mean = [10., 10.] + gauss_pattern.stddev = [3., 3.] + + gauss_pattern.run(condition=RunSteps(num_steps=5), + run_cfg=Loihi1SimCfg()) + + np.testing.assert_array_equal(sink_process.data.get(), + gauss_generated_pattern) + finally: + gauss_pattern.stop() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/inputs/gauss_pattern/test_process.py b/tests/lava/lib/dnf/inputs/gauss_pattern/test_process.py new file mode 100644 index 0000000..ff18963 --- /dev/null +++ b/tests/lava/lib/dnf/inputs/gauss_pattern/test_process.py @@ -0,0 +1,176 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np + +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg + +from lava.lib.dnf.inputs.gauss_pattern.process import GaussPattern + + +class TestGaussPattern(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a GaussPattern process can be initiated.""" + gauss_pattern = GaussPattern(shape=(30, 30), + amplitude=200., + mean=[15., 15.], + stddev=[5., 5.]) + + np.testing.assert_array_equal(gauss_pattern.shape, + np.array((30, 30))) + np.testing.assert_array_equal(gauss_pattern.amplitude, + np.array([200.])) + np.testing.assert_array_equal(gauss_pattern.mean, + np.array([15., 15.])) + np.testing.assert_array_equal(gauss_pattern.stddev, + np.array([5., 5.])) + np.testing.assert_array_equal(gauss_pattern.null_pattern.get(), + np.full((30, 30), np.nan)) + np.testing.assert_array_equal(gauss_pattern.pattern.get(), + np.zeros((30, 30))) + np.testing.assert_array_equal(gauss_pattern.changed.get(), + np.array([True])) + + def test_init_float_parameters(self) -> None: + """Tests whether a GaussPattern process can be initiated with float + mean and stddev.""" + gauss_pattern = GaussPattern(shape=(30, 30), + amplitude=200., + mean=15., + stddev=5.) + + np.testing.assert_array_equal(gauss_pattern.shape, + np.array((30, 30))) + np.testing.assert_array_equal(gauss_pattern.amplitude, + np.array([200.])) + np.testing.assert_array_equal(gauss_pattern.mean, + np.array([15., 15.])) + np.testing.assert_array_equal(gauss_pattern.stddev, + np.array([5., 5.])) + np.testing.assert_array_equal(gauss_pattern.null_pattern.get(), + np.full((30, 30), np.nan)) + np.testing.assert_array_equal(gauss_pattern.pattern.get(), + np.zeros((30, 30))) + np.testing.assert_array_equal(gauss_pattern.changed.get(), + np.array([True])) + + def test_init_validation(self) -> None: + """Tests whether a GaussPattern process instantiation with mean or + stddev length not matching shape dimensionality raises a ValueError.""" + with self.assertRaises(ValueError): + GaussPattern(shape=(30, 30), + amplitude=200., + mean=[15., 15., 15.], + stddev=5.) + + with self.assertRaises(ValueError): + GaussPattern(shape=(30, 30), + amplitude=200., + mean=15., + stddev=[5., 5., 5.]) + + def test_running(self) -> None: + """Tests whether a GaussPattern process can be run.""" + num_steps = 10 + + gauss_pattern = GaussPattern(shape=(30, 30), + amplitude=200., + mean=[15., 15.], + stddev=[5., 5.]) + + try: + gauss_pattern.run(condition=RunSteps(num_steps=num_steps), + run_cfg=Loihi1SimCfg()) + finally: + gauss_pattern.stop() + + self.assertEqual(gauss_pattern.runtime.current_ts, num_steps) + + def test_set_parameters(self) -> None: + """Tests whether setters for amplitude, mean and stddev actually + set values for the corresponding Vars and whether the changed Var + gets set to True.""" + gauss_pattern = GaussPattern(shape=(30, 30), + amplitude=200., + mean=[15., 15.], + stddev=[5., 5.]) + + try: + gauss_pattern.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) + + gauss_pattern.amplitude = 250. + + np.testing.assert_array_equal(gauss_pattern.amplitude, + np.array([250.])) + np.testing.assert_array_equal(gauss_pattern.changed.get(), + np.array([True])) + + gauss_pattern.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) + + gauss_pattern.mean = 20. + + np.testing.assert_array_equal(gauss_pattern.mean, + np.array([20., 20.])) + np.testing.assert_array_equal(gauss_pattern.changed.get(), + np.array([True])) + + gauss_pattern.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) + + gauss_pattern.mean = [22., 22.] + + np.testing.assert_array_equal(gauss_pattern.mean, + np.array([22., 22.])) + np.testing.assert_array_equal(gauss_pattern.changed.get(), + np.array([True])) + + gauss_pattern.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) + + gauss_pattern.stddev = 10. + + np.testing.assert_array_equal(gauss_pattern.stddev, + np.array([10., 10.])) + np.testing.assert_array_equal(gauss_pattern.changed.get(), + np.array([True])) + + gauss_pattern.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) + + gauss_pattern.stddev = [13., 13.] + + np.testing.assert_array_equal(gauss_pattern.stddev, + np.array([13., 13.])) + np.testing.assert_array_equal(gauss_pattern.changed.get(), + np.array([True])) + finally: + gauss_pattern.stop() + + def test_set_parameters_validation(self) -> None: + """Tests whether setters for mean and stddev raise a ValueError for + lengths not matching shape dimensionality.""" + gauss_pattern = GaussPattern(shape=(30, 30), + amplitude=200., + mean=[15., 15.], + stddev=[5., 5.]) + + try: + gauss_pattern.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) + + with self.assertRaises(ValueError): + gauss_pattern.mean = [10., 10., 10.] + + with self.assertRaises(ValueError): + gauss_pattern.stddev = [10., 10., 10.] + finally: + gauss_pattern.stop() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/inputs/rate_code_spike_gen/__init__.py b/tests/lava/lib/dnf/inputs/rate_code_spike_gen/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/lava/lib/dnf/inputs/rate_code_spike_gen/test_models.py b/tests/lava/lib/dnf/inputs/rate_code_spike_gen/test_models.py new file mode 100644 index 0000000..495f7fd --- /dev/null +++ b/tests/lava/lib/dnf/inputs/rate_code_spike_gen/test_models.py @@ -0,0 +1,261 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np +import typing as ty + +from lava.magma.core.decorator import implements, requires, tag +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.magma.core.model.py.ports import PyOutPort, PyInPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.process.ports.ports import OutPort, InPort +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.variable import Var +from lava.magma.core.resources import CPU +from lava.magma.core.run_configs import Loihi1SimCfg +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol + +from lava.lib.dnf.inputs.rate_code_spike_gen.process import RateCodeSpikeGen + + +class SinkProcess(AbstractProcess): + """ + Process that receives spike (bool) vectors + + Parameters + ---------- + shape: tuple, shape of the process + """ + + def __init__(self, **kwargs: ty.Tuple[int, ...]) -> None: + super().__init__(**kwargs) + shape = kwargs.get("shape") + + self.data = Var(shape=shape, init=np.nan) + self.s_in = InPort(shape=(shape[0],)) + + +@implements(proc=SinkProcess, protocol=LoihiProtocol) +@requires(CPU) +@tag('floating_pt') +class SinkProcessModel(PyLoihiProcessModel): + data: np.ndarray = LavaPyType(np.ndarray, float) + s_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, bool) + + def run_spk(self) -> None: + """Receive data and store in an internal variable""" + s_in = self.s_in.recv() + self.data[:, self.current_ts - 1] = s_in + + +class SourceProcess(AbstractProcess): + """ + Process that sends arbitrary vectors + + Parameters + ---------- + shape: tuple, shape of the process + """ + + def __init__(self, + **kwargs: ty.Union[ty.Tuple, np.ndarray]) -> None: + super().__init__(**kwargs) + shape = kwargs.get("shape") + data = kwargs.get("data") + + self.null_data = Var(shape=shape, init=np.full(shape, np.nan)) + self._data = Var(shape=shape, init=data) + self.changed = Var(shape=(1,), init=True) + self.a_out = OutPort(shape=shape) + + def _update(self) -> None: + self.changed.set(np.array([True])) + self.changed.get() + + @property + def data(self) -> ty.Union[np.ndarray, None]: + try: + return self._data.get() + except AttributeError: + return None + + @data.setter + def data(self, data: np.ndarray) -> None: + self._data.set(data) + self._data.get() + self._update() + + +@implements(proc=SourceProcess, protocol=LoihiProtocol) +@requires(CPU) +@tag('floating_pt') +class SourceProcessModel(PyLoihiProcessModel): + null_data: np.ndarray = LavaPyType(np.ndarray, float) + _data: np.ndarray = LavaPyType(np.ndarray, float) + changed: np.ndarray = LavaPyType(np.ndarray, bool) + a_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float) + + def run_spk(self) -> None: + """Send data when change is triggered, null_data otherwise""" + if self.changed[0]: + self.a_out.send(self._data) + self.changed[0] = False + else: + self.a_out.send(self.null_data) + + +class TestRateCodeSpikeGenProcessModel(unittest.TestCase): + def test_recv_null_pattern(self) -> None: + """Tests that last_spiked, inter_spike_distances, first_spike_times + Vars are not updated upon receipt of a null pattern.""" + pattern = np.zeros((30,)) + pattern[9:20] = 100. + + source = SourceProcess(shape=(30,), data=pattern) + spike_generator = RateCodeSpikeGen(shape=(30,)) + + source.out_ports.a_out.connect(spike_generator.in_ports.a_in) + + try: + source.run(condition=RunSteps(num_steps=1), run_cfg=Loihi1SimCfg()) + + inter_spike_distances = spike_generator.inter_spike_distances.get() + first_spike_times = spike_generator.first_spike_times.get() + + source.run(condition=RunSteps(num_steps=5), run_cfg=Loihi1SimCfg()) + + np.testing.assert_array_equal( + spike_generator.inter_spike_distances.get(), + inter_spike_distances) + + np.testing.assert_array_equal( + spike_generator.first_spike_times.get(), + first_spike_times) + finally: + source.stop() + + def test_recv_non_null_pattern(self) -> None: + """Tests whether last_spiked, inter_spike_distances, + first_spike_times Vars are updated upon receipt of a new pattern.""" + pattern_1 = np.zeros((30,)) + pattern_1[9:20] = 100. + + pattern_2 = np.zeros((30,)) + pattern_2[15:25] = 150. + + source = SourceProcess(shape=(30,), data=pattern_1) + spike_generator = RateCodeSpikeGen(shape=(30,), seed=42) + + source.out_ports.a_out.connect(spike_generator.in_ports.a_in) + + try: + source.run(condition=RunSteps(num_steps=3), run_cfg=Loihi1SimCfg()) + + old_inter_spike_distances = \ + spike_generator.inter_spike_distances.get() + old_first_spike_times = \ + spike_generator.first_spike_times.get() + + source.data = pattern_2 + + source.run(condition=RunSteps(num_steps=1), run_cfg=Loihi1SimCfg()) + + with self.assertRaises(AssertionError): + np.testing.assert_array_equal( + spike_generator.inter_spike_distances.get(), + old_inter_spike_distances) + + with self.assertRaises(AssertionError): + np.testing.assert_array_equal( + spike_generator.first_spike_times.get(), + old_first_spike_times) + finally: + source.stop() + + def test_compute_distances(self) -> None: + """Tests whether inter spiked distances are computed correctly given + a certain pattern.""" + shape = (3,) + pattern = np.zeros(shape) + pattern[1] = 100. + + source = SourceProcess(shape=shape, data=pattern) + spike_gen = RateCodeSpikeGen(shape=shape) + source.out_ports.a_out.connect(spike_gen.in_ports.a_in) + + expected_spike_distances = np.array([0.0, 60.0, 0.0]) + + try: + source.run(condition=RunSteps(num_steps=2), + run_cfg=Loihi1SimCfg()) + + received_spike_distances = spike_gen.inter_spike_distances.get() + + np.testing.assert_array_equal(received_spike_distances, + expected_spike_distances) + finally: + source.stop() + + def test_send(self) -> None: + """Tests whether RateCodeSpikeGenProcessModel sends data through its + OutPort every time step, regardless of whether its internal state + (inter_spike_distances ...) changed or not.""" + num_steps = 10 + + pattern = np.zeros((30,)) + pattern[9:20] = 100. + + source = SourceProcess(shape=(30,), data=pattern) + spike_generator = RateCodeSpikeGen(shape=(30,)) + sink = SinkProcess(shape=(30, num_steps)) + + source.out_ports.a_out.connect(spike_generator.in_ports.a_in) + spike_generator.out_ports.s_out.connect(sink.in_ports.s_in) + + try: + source.run(condition=RunSteps(num_steps=num_steps), + run_cfg=Loihi1SimCfg()) + + self.assertFalse(np.isnan(sink.data.get()).any()) + finally: + source.stop() + + def test_generate_spikes(self) -> None: + """Tests whether the spike trains are computed correctly""" + num_steps = 10 + shape = (5,) + + pattern = np.zeros(shape) + pattern[2:3] = 1500. + + expected_spikes = np.array( + [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 1, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] + ) + + source = SourceProcess(shape=shape, data=pattern) + spike_gen = RateCodeSpikeGen(shape=shape, seed=42) + sink = SinkProcess(shape=(shape[0], num_steps)) + + source.out_ports.a_out.connect(spike_gen.in_ports.a_in) + spike_gen.out_ports.s_out.connect(sink.in_ports.s_in) + + try: + source.run(condition=RunSteps(num_steps=num_steps), + run_cfg=Loihi1SimCfg()) + + received_spikes = sink.data.get() + + np.testing.assert_array_equal(received_spikes, expected_spikes) + finally: + source.stop() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/inputs/rate_code_spike_gen/test_process.py b/tests/lava/lib/dnf/inputs/rate_code_spike_gen/test_process.py new file mode 100644 index 0000000..dddf9d9 --- /dev/null +++ b/tests/lava/lib/dnf/inputs/rate_code_spike_gen/test_process.py @@ -0,0 +1,53 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np + +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg + +from lava.lib.dnf.inputs.rate_code_spike_gen.process import RateCodeSpikeGen + + +class TestRateCodeSpikeGen(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a RateCodeSpikeGen process can be initiated.""" + spike_generator = RateCodeSpikeGen(shape=(30, 30)) + + np.testing.assert_array_equal( + spike_generator.inter_spike_distances.get(), np.zeros((30, 30))) + np.testing.assert_array_equal( + spike_generator.first_spike_times.get(), np.zeros((30, 30))) + np.testing.assert_array_equal( + spike_generator.last_spiked.get(), np.full((30, 30), -np.inf)) + np.testing.assert_array_equal( + spike_generator.spikes.get(), np.zeros((30, 30))) + + def test_init_validation(self) -> None: + """Tests whether a RateCodeSpikeGen process instantiation with + non-valid min_spike_rate or seed raises a ValueError.""" + with self.assertRaises(ValueError): + RateCodeSpikeGen(shape=(30, 30), min_spike_rate=-5) + + with self.assertRaises(ValueError): + RateCodeSpikeGen(shape=(30, 30), min_spike_rate=-5) + + def test_running(self) -> None: + """Tests whether a RateCodeSpikeGen process can be run.""" + num_steps = 10 + + spike_generator = RateCodeSpikeGen(shape=(30, 30)) + + try: + spike_generator.run(condition=RunSteps(num_steps=num_steps), + run_cfg=Loihi1SimCfg()) + finally: + spike_generator.stop() + + self.assertEqual(spike_generator.runtime.current_ts, num_steps) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/kernels/__init__.py b/tests/lava/lib/dnf/kernels/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/lava/lib/dnf/kernels/test_kernels.py b/tests/lava/lib/dnf/kernels/test_kernels.py new file mode 100644 index 0000000..2d1a388 --- /dev/null +++ b/tests/lava/lib/dnf/kernels/test_kernels.py @@ -0,0 +1,262 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np +import typing as ty + +from lava.lib.dnf.kernels.kernels import Kernel, MultiPeakKernel, \ + SelectiveKernel, GaussianMixin + + +class TestKernel(unittest.TestCase): + def setUp(self) -> None: + """Instantiates a Kernel object.""" + self.kernel = Kernel(weights=np.zeros(1, )) + + def test_init(self) -> None: + """Tests whether a Kernel can be instantiated.""" + self.assertIsInstance(self.kernel, Kernel) + + def test_weights_property(self) -> None: + """Tests whether the weights can be accessed via a public property + method.""" + self.assertTrue(np.array_equal(self.kernel.weights, np.zeros((1,)))) + + def test_padding_value_property_and_default_value(self) -> None: + """Tests whether the padding value can be accessed via a public + property method.""" + self.assertEqual(self.kernel.padding_value, 0) + + +class TestGaussianMixin(unittest.TestCase): + class MockKernel(GaussianMixin, Kernel): + """Mock kernel to test the GaussianMixin""" + def __init__(self, + amp_exc: float = 1.0, + width_exc: ty.Union[float, ty.List[float]] = 2.0, + limit: ty.Optional[float] = 1.0, + shape: ty.Optional[ty.Tuple[int, ...]] = None) -> None: + GaussianMixin.__init__(self, amp_exc, width_exc, limit, shape) + + def _compute_weights(self) -> None: + pass + + def test_negative_excitatory_amplitude_raises_error(self) -> None: + """Tests whether a negative excitatory amplitude raises an error.""" + with self.assertRaises(ValueError): + TestGaussianMixin.MockKernel(amp_exc=-5.0) + + def test_negative_limit_raises_error(self) -> None: + """Tests whether a negative limit raises an error.""" + with self.assertRaises(ValueError): + TestGaussianMixin.MockKernel(limit=-10) + + def test_computed_shape_is_always_odd(self) -> None: + """Tests whether the computed shape always has an odd number of + elements along each dimension.""" + for width in [2, 3, [2, 2], [3, 3], [2, 3]]: + kernel = TestGaussianMixin.MockKernel(width_exc=width) + self.assertFalse(np.any(np.array(kernel._shape) % 2 == 0)) + + def test_explicitly_specifying_odd_shape(self) -> None: + """Tests whether specifying the shape of the kernel works.""" + shape = (5,) + kernel = TestGaussianMixin.MockKernel(shape=shape) + self.assertEqual(kernel._shape, shape) + + def test_explicitly_specified_shape_mismatching_width_raises_error(self) \ + -> None: + """Tests whether an error is raised when a shape is specified that + is incompatible with the argument.""" + with self.assertRaises(ValueError): + TestGaussianMixin.MockKernel(width_exc=[2, 2, 2], + shape=(5, 3)) + + def test_explicitly_specifying_even_shape_prints_warning(self) -> None: + """Checks whether a warning is issued if the specified size of + the kernel is even along one dimension.""" + shape = (4,) + with self.assertWarns(Warning): + kernel = TestGaussianMixin.MockKernel(shape=shape) + self.assertTrue(kernel._shape, shape) + + +class TestMultiPeakKernel(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a MultiPeakKernel can be instantiated and arguments + are set correctly.""" + amp_exc = 5.0 + width_exc = [2, 2] + amp_inh = -2.0 + width_inh = [4, 4] + kernel = MultiPeakKernel(amp_exc=amp_exc, + width_exc=width_exc, + amp_inh=amp_inh, + width_inh=width_inh) + self.assertIsInstance(kernel, MultiPeakKernel) + self.assertEqual(kernel._amp_exc, amp_exc) + self.assertTrue(np.array_equal(kernel._width_exc, + np.array(width_exc))) + self.assertEqual(kernel._amp_inh, amp_inh) + self.assertTrue(np.array_equal(kernel._width_inh, + np.array(width_inh))) + self.assertEqual(kernel._limit, 1.0) + self.assertEqual(kernel.padding_value, 0) + + def test_positive_inhibitory_amplitude_raises_error(self) -> None: + """Tests whether a positive inhibitory amplitude raises an error.""" + with self.assertRaises(ValueError): + MultiPeakKernel(amp_exc=5.0, + width_exc=[2, 2], + amp_inh=5.0, + width_inh=[4, 4]) + + def test_widths_of_different_shape_raise_error(self) -> None: + """Tests an error is raised when and + have a different shape.""" + with self.assertRaises(ValueError): + MultiPeakKernel(amp_exc=5.0, + width_exc=[2, 2], + amp_inh=-5.0, + width_inh=[4, 4, 4]) + + def test_shape_is_computed_from_width_inh_and_limit(self) -> None: + """Tests whether the shape of the kernel is computed correctly.""" + width_inh = 4 + limit = 2 + kernel = MultiPeakKernel(amp_exc=10, + width_exc=2, + amp_inh=-5, + width_inh=width_inh, + limit=limit) + + self.assertEqual(kernel._shape[0], (2 * width_inh * limit) + 1) + + def test_maximum_is_computed_correctly(self) -> None: + """Checks whether the maximum of the kernel is computed correctly.""" + amp_exc = 10 + amp_inh = -2 + size = 5 + kernel = MultiPeakKernel(amp_exc=amp_exc, + width_exc=4, + amp_inh=amp_inh, + width_inh=8, + shape=(size,)) + # weight at the center of the kernel should be amp_exc + amp_inh + center = int(size / 2) + self.assertEqual(kernel.weights[center], amp_exc + amp_inh) + + def test_computed_weights_when_dimensions_have_same_width(self) -> None: + """Checks whether the weight matrix has the same size in both + dimensions if the inhibitory (!) width is specified to be equal.""" + kernel = MultiPeakKernel(amp_exc=25, + width_exc=[1, 2], + amp_inh=-10, + width_inh=[3, 3]) + + self.assertEqual(kernel.weights.shape[0], kernel.weights.shape[1]) + + def test_computed_weights_when_dimensions_have_different_width_2d(self)\ + -> None: + """Checks whether the weight matrix has a larger size when the + inhibitory (!) width is specified as larger.""" + kernel = MultiPeakKernel(amp_exc=25, + width_exc=[2, 2], + amp_inh=-10, + width_inh=[4, 2]) + + self.assertTrue(kernel.weights.shape[0] > kernel.weights.shape[1]) + + def test_weight_symmetry(self) -> None: + """Checks whether the computed kernel is symmetrical for different + dimensionalities and widths.""" + + for width in [2, 3, [2, 3], [2, 3, 4]]: + kernel = MultiPeakKernel(amp_exc=25, + width_exc=width, + amp_inh=-10, + width_inh=width) + + self.assertTrue(np.allclose(kernel.weights, + np.flip(kernel.weights))) + + +class TestSelectiveKernel(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a SelectiveKernel can be instantiated and arguments + are set correctly.""" + amp_exc = 5.0 + width_exc = [2, 2] + global_inh = -0.1 + kernel = SelectiveKernel(amp_exc=amp_exc, + width_exc=width_exc, + global_inh=global_inh) + self.assertIsInstance(kernel, SelectiveKernel) + self.assertEqual(kernel._amp_exc, amp_exc) + self.assertEqual(kernel._global_inh, global_inh) + self.assertTrue(np.array_equal(kernel._width_exc, + np.array(width_exc))) + self.assertEqual(kernel._limit, 1.0) + self.assertEqual(kernel.padding_value, global_inh) + + def test_positive_global_inhibition_raises_error(self) -> None: + """Tests whether a positive global inhibition raises an error.""" + with self.assertRaises(ValueError): + SelectiveKernel(amp_exc=5.0, width_exc=[2, 2], global_inh=10.0) + + def test_shape_is_computed_from_width_exc_and_limit(self) -> None: + """Tests whether the shape of the kernel is computed correctly.""" + width = 4 + limit = 2 + kernel = SelectiveKernel(amp_exc=10, + width_exc=width, + global_inh=-5, + limit=limit) + + self.assertEqual(kernel._shape[0], (2 * width * limit) + 1) + + def test_maximum_is_computed_correctly(self) -> None: + """Checks whether the maximum of the kernel is computed correctly.""" + amp_exc = 10 + global_inh = -2 + size = 5 + kernel = SelectiveKernel(amp_exc=amp_exc, + width_exc=4, + global_inh=global_inh, + shape=(size,)) + # weight at the center of the kernel should be amp_exc + global_inh + center = int(size / 2) + self.assertEqual(kernel.weights[center], amp_exc + global_inh) + + def test_computed_weights_when_dimensions_have_same_width(self) -> None: + """Checks whether the weight matrix has the same size in both + dimensions if the width is specified to be equal.""" + kernel = SelectiveKernel(amp_exc=25, + width_exc=[1.5, 1.5], + global_inh=-1) + + self.assertEqual(kernel.weights.shape[0], kernel.weights.shape[1]) + + def test_computed_weights_when_dimensions_have_different_width(self)\ + -> None: + """Checks whether the weight matrix has a larger size when the width + is specified as larger.""" + kernel = SelectiveKernel(amp_exc=25, + width_exc=[4.0, 2.0], + global_inh=-1) + + self.assertTrue(kernel.weights.shape[0] > kernel.weights.shape[1]) + + def test_weight_symmetry(self) -> None: + """Checks whether the computed kernel is symmetrical for different + dimensionalities and widths.""" + + for width in [2, 3, [2, 3], [2, 3, 4]]: + kernel = SelectiveKernel(amp_exc=25, + width_exc=width, + global_inh=-0.1) + + self.assertTrue(np.allclose(kernel.weights, + np.flip(kernel.weights))) diff --git a/tests/lava/lib/dnf/operations/__init__.py b/tests/lava/lib/dnf/operations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/lava/lib/dnf/operations/test_enums.py b/tests/lava/lib/dnf/operations/test_enums.py new file mode 100644 index 0000000..4f8f02d --- /dev/null +++ b/tests/lava/lib/dnf/operations/test_enums.py @@ -0,0 +1,47 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest + +from lava.lib.dnf.operations.enums import ReduceMethod, BorderType + + +class TestReduceMethod(unittest.TestCase): + def test_validate_sum(self) -> None: + """Tests whether SUM is a valid type of the ReduceMethod enum.""" + ReduceMethod.validate(ReduceMethod.SUM) + + def test_validate_mean(self) -> None: + """Tests whether MEAN is a valid type of the ReduceMethod enum.""" + ReduceMethod.validate(ReduceMethod.MEAN) + + def test_invalid_type_raises_type_error(self) -> None: + """Tests whether int is an invalid type of the ReduceMethod enum.""" + with self.assertRaises(TypeError): + ReduceMethod.validate(int) + + def test_invalid_value_raises_value_error(self) -> None: + """Tests whether FOO is an invalid value of the ReduceMethod enum.""" + with self.assertRaises(AttributeError): + _ = ReduceMethod.FOO + + +class TestBorderType(unittest.TestCase): + def test_validate_padded(self) -> None: + """Tests whether PADDED is a valid type of the BorderType enum.""" + BorderType.validate(BorderType.PADDED) + + def test_validate_circular(self) -> None: + """Tests whether CIRCULAR is a valid type of the BorderType enum.""" + BorderType.validate(BorderType.CIRCULAR) + + def test_invalid_type_raises_type_error(self) -> None: + """Tests whether int is an invalid type of the BorderType enum.""" + with self.assertRaises(TypeError): + BorderType.validate(int) + + def test_invalid_value_raises_value_error(self) -> None: + """Tests whether FOO is an invalid value of the BorderType enum.""" + with self.assertRaises(AttributeError): + _ = BorderType.FOO diff --git a/tests/lava/lib/dnf/operations/test_exceptions.py b/tests/lava/lib/dnf/operations/test_exceptions.py new file mode 100644 index 0000000..2edb95f --- /dev/null +++ b/tests/lava/lib/dnf/operations/test_exceptions.py @@ -0,0 +1,18 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest + +from lava.lib.dnf.operations.exceptions import MisconfiguredOpError + + +class TestMisconfiguredOpError(unittest.TestCase): + def test_raising_misconfigured_op_error(self) -> None: + """Tests whether the MisconfiguredOpError can be raised.""" + msg = "test message" + with self.assertRaises(MisconfiguredOpError) as context: + raise MisconfiguredOpError(msg) + + # check whether the message is set + self.assertEqual(context.exception.args[0], msg) diff --git a/tests/lava/lib/dnf/operations/test_operations.py b/tests/lava/lib/dnf/operations/test_operations.py new file mode 100644 index 0000000..8953ae4 --- /dev/null +++ b/tests/lava/lib/dnf/operations/test_operations.py @@ -0,0 +1,1143 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np +import typing as ty + +from lava.lib.dnf.operations.operations import ( + AbstractOperation, + Weights, + ReduceDims, + ReduceMethod, + ExpandDims, + Reorder, + Convolution) +from lava.lib.dnf.operations.enums import BorderType +from lava.lib.dnf.operations.shape_handlers import KeepShapeHandler +from lava.lib.dnf.kernels.kernels import Kernel +from lava.lib.dnf.utils.convenience import num_neurons + + +class MockOperation(AbstractOperation): + """Generic mock Operation""" + def __init__(self) -> None: + super().__init__(shape_handler=KeepShapeHandler()) + + def _compute_weights(self) -> np.ndarray: + return np.ones((1, 1), dtype=np.int32) + + +class TestAbstractOperation(unittest.TestCase): + def test_computing_conn_without_prior_configuration_raises_error(self)\ + -> None: + """Tests whether an error is raised when compute_weights() is called + before an operation has been configured.""" + op = MockOperation() + with self.assertRaises(AssertionError): + op.compute_weights() + + def test_output_shape_getter(self) -> None: + """Tests whether the output shape property works.""" + op = MockOperation() + shape = (2, 4) + op._shape_handler._output_shape = shape + self.assertEqual(op.output_shape, shape) + + def test_input_shape_getter(self) -> None: + """Tests whether the input shape property works.""" + op = MockOperation() + shape = (2, 4) + op._shape_handler._input_shape = shape + self.assertEqual(op.input_shape, shape) + + def test_computing_conn_with_prior_configuration_works(self) -> None: + """Tests whether compute_weights() works and can be called once + configuration is complete.""" + op = MockOperation() + op.configure(input_shape=(1,)) + computed_weights = op.compute_weights() + expected_weights = np.ones((1, 1), dtype=np.int32) + + self.assertEqual(computed_weights, expected_weights) + + def test_configure_sets_input_and_output_shape(self) -> None: + """Tests whether the configure() method sets the input and + output shape.""" + input_shape = (2, 4) + op = MockOperation() + op.configure(input_shape=input_shape) + self.assertEqual(op.input_shape, input_shape) + self.assertEqual(op.output_shape, input_shape) + + +class TestWeights(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a Weights operation can be instantiated.""" + weights_op = Weights(weight=5.0) + self.assertIsInstance(weights_op, Weights) + + def test_weight_is_set_correctly(self) -> None: + """Tests whether the weight is set correctly. + shape.""" + weight = 5.0 + op = Weights(weight=weight) + self.assertEqual(op.weight, weight) + + def test_compute_weights(self) -> None: + """Tests whether computing weights produces the expected result for + shapes of different dimensionality.""" + for shape in [(1,), (5,), (5, 3), (5, 3, 2)]: + w = 5.0 + weights_op = Weights(weight=w) + weights_op.configure(shape) + + computed_weights = weights_op.compute_weights() + expected_weights = np.eye(num_neurons(shape), + num_neurons(shape), + dtype=np.int32) * w + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + +class TestReduceDims(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a ReduceDims operation can be instantiated.""" + reduce_method = ReduceMethod.SUM + op = ReduceDims(reduce_dims=0, + reduce_method=reduce_method) + + self.assertIsInstance(op, ReduceDims) + self.assertEqual(op.reduce_method, reduce_method) + + def test_compute_weights_2d_to_0d_sum(self) -> None: + """Tests reducing dimensionality from 2D to 0D using SUM.""" + op = ReduceDims(reduce_dims=(0, 1), + reduce_method=ReduceMethod.SUM) + op.configure(input_shape=(3, 3)) + computed_weights = op.compute_weights() + expected_weights = np.ones((1, 9)) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_2d_to_0d_mean(self) -> None: + """Tests reducing dimensionality from 2D to 0D using MEAN.""" + op = ReduceDims(reduce_dims=(0, 1), + reduce_method=ReduceMethod.MEAN) + op.configure(input_shape=(3, 3)) + computed_weights = op.compute_weights() + expected_weights = np.ones((1, 9)) / 9.0 + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_reduce_method_mean(self) -> None: + """Tests whether MEAN produces the same results as SUM divided by the + number of elements in the reduced dimension.""" + reduce_dims = (1,) + input_shape = (3, 3) + op_sum = ReduceDims(reduce_dims=reduce_dims, + reduce_method=ReduceMethod.SUM) + op_sum.configure(input_shape=input_shape) + computed_weights_sum = op_sum.compute_weights() + + op_mean = ReduceDims(reduce_dims=reduce_dims, + reduce_method=ReduceMethod.MEAN) + op_mean.configure(input_shape=input_shape) + computed_weights_mean = op_mean.compute_weights() + + self.assertTrue(np.array_equal(computed_weights_mean, + computed_weights_sum / 9.0)) + + def test_compute_weights_2d_to_1d_reduce_axis_0_sum(self) -> None: + """Tests reducing dimension 0 from 2D to 1D using SUM.""" + op = ReduceDims(reduce_dims=(0,), + reduce_method=ReduceMethod.SUM) + op.configure(input_shape=(3, 3)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 0, 0, 1, 0, 0, 1, 0, 0], + [0, 1, 0, 0, 1, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_2d_to_1d_axis_reduce_axis_1_sum(self) -> None: + """Tests reducing dimension 1 from 2D to 1D using SUM.""" + op = ReduceDims(reduce_dims=(1,), + reduce_method=ReduceMethod.SUM) + op.configure(input_shape=(3, 3)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 1, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 1, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_3d_to_1d_axis_keep_axis_0_sum(self) -> None: + """Tests reducing dimensions 1 and 2 from 3D to 1D using SUM.""" + op = ReduceDims(reduce_dims=(1, 2), + reduce_method=ReduceMethod.SUM) + op.configure(input_shape=(2, 2, 2)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 1, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_3d_to_1d_axis_keep_axis_1_sum(self) -> None: + """Tests reducing dimensions 0 and 2 from 3D to 1D using SUM.""" + op = ReduceDims(reduce_dims=(0, 2), + reduce_method=ReduceMethod.SUM) + op.configure(input_shape=(2, 2, 2)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 1, 0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0, 1, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_3d_to_1d_axis_keep_axis_2_sum(self) -> None: + """Tests reducing dimensions 0 and 1 from 3D to 1D using SUM.""" + op = ReduceDims(reduce_dims=(0, 1), + reduce_method=ReduceMethod.SUM) + op.configure(input_shape=(2, 2, 2)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_3d_to_2d_axis_reduce_axis_0_sum(self) -> None: + """Tests reducing dimension 0 from 3D to 2D using SUM.""" + op = ReduceDims(reduce_dims=(0,), + reduce_method=ReduceMethod.SUM) + op.configure(input_shape=(2, 2, 2)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0, 0, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_3d_to_2d_axis_reduce_axis_1_sum(self) -> None: + """Tests reducing dimension 1 from 3D to 2D using SUM.""" + op = ReduceDims(reduce_dims=(1,), + reduce_method=ReduceMethod.SUM) + op.configure(input_shape=(2, 2, 2)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 0, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_3d_to_2d_axis_reduce_axis_2_sum(self) -> None: + """Tests reducing dimension 2 from 3D to 2D using SUM.""" + op = ReduceDims(reduce_dims=(2,), + reduce_method=ReduceMethod.SUM) + op.configure(input_shape=(2, 2, 2)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + +class TestExpandDims(unittest.TestCase): + def test_init(self) -> None: + """Tests whether an ExpandDims operation can be instantiated.""" + op = ExpandDims(new_dims_shape=(5,)) + self.assertIsInstance(op, ExpandDims) + + def test_compute_weights_0d_to_1d(self) -> None: + """Tests expanding dimensionality from 0D to 1D.""" + op = ExpandDims(new_dims_shape=(3,)) + op.configure(input_shape=(1,)) + computed_weights = op.compute_weights() + expected_weights = np.ones((3, 1)) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_0d_to_2d(self) -> None: + """Tests expanding dimensionality from 0D to 2D.""" + op = ExpandDims(new_dims_shape=(3, 3)) + op.configure(input_shape=(1,)) + computed_weights = op.compute_weights() + expected_weights = np.ones((9, 1)) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_0d_to_3d(self) -> None: + """Tests expanding dimensionality from 0D to 3D.""" + op = ExpandDims(new_dims_shape=(3, 3, 3)) + op.configure(input_shape=(1,)) + computed_weights = op.compute_weights() + expected_weights = np.ones((27, 1)) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_1d_to_2d(self) -> None: + """Tests expanding dimensionality from 1D to 2D.""" + op = ExpandDims(new_dims_shape=(3,)) + op.configure(input_shape=(3,)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 0, 0], + [1, 0, 0], + [1, 0, 0], + + [0, 1, 0], + [0, 1, 0], + [0, 1, 0], + + [0, 0, 1], + [0, 0, 1], + [0, 0, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_1d_to_3d(self) -> None: + """Tests expanding dimensionality from 1D to 3D.""" + op = ExpandDims(new_dims_shape=(2, 2)) + op.configure(input_shape=(2,)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 0], + [1, 0], + [1, 0], + [1, 0], + [0, 1], + [0, 1], + [0, 1], + [0, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_2d_to_3d(self) -> None: + """Tests expanding dimensionality from 2D to 3D.""" + op = ExpandDims(new_dims_shape=(2,)) + op.configure(input_shape=(2, 2)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + +class TestReorder(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a Reorder operation can be instantiated.""" + op = Reorder(order=(1, 0, 2)) + self.assertIsInstance(op, Reorder) + + def test_compute_weights_no_change_2d(self) -> None: + """Tests 'reordering' a 2D input to the same order.""" + op = Reorder(order=(0, 1)) + op.configure(input_shape=(3, 3)) + computed_weights = op.compute_weights() + expected_weights = np.eye(9) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_reordered_2d(self) -> None: + """Tests reordering a 2D input by switching the dimensions.""" + op = Reorder(order=(1, 0)) + op.configure(input_shape=(3, 3)) + computed_weights = op.compute_weights() + expected_weights = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1]]) + + self.assertTrue(np.array_equal(computed_weights, expected_weights)) + + def test_compute_weights_reordered_3d(self) -> None: + """Tests reordering a 3D input by switching the dimensions in all + possible combinations.""" + orders = [(0, 1, 2), + (0, 2, 1), + (1, 0, 2), + (1, 2, 0), + (2, 0, 1), + (2, 1, 0)] + + expected_weights = [ + np.array([[1, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 1]]), + np.array([[1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1]]), + np.array([[1, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 1]]), + np.array([[1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1]]), + np.array([[1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1]]), + np.array([[1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1]])] + + for order, expected in zip(orders, expected_weights): + op = Reorder(order=order) + op.configure(input_shape=(2, 2, 2)) + computed = op.compute_weights() + + self.assertTrue(np.array_equal(computed, expected)) + + +class TestConvolution(unittest.TestCase): + class MockKernel(Kernel): + def __init__(self, weights: np.ndarray = None) -> None: + if weights is None: + weights = np.zeros((1,)) + super().__init__(weights=weights) + + def test_init(self) -> None: + """Tests whether a Convolution operation can be instantiated while + passing in a kernel.""" + kernel = TestConvolution.MockKernel() + op = Convolution(kernel) + # convolution is instantiated + self.assertIsInstance(op, Convolution) + # kernel is set + self.assertEqual(op.kernel, kernel) + # border type defaults to PADDED + self.assertEqual(op.border_types[0], BorderType.PADDED) + + def test_kernel_of_type_numpy_ndarray(self) -> None: + """Tests whether a argument of type numpy.ndarray is + converted internally into an AbstractKernel instance.""" + numpy_kernel = np.zeros((1,)) + op = Convolution(numpy_kernel) + self.assertIsInstance(op.kernel, Kernel) + + def test_setting_valid_border_type(self) -> None: + """Tests whether a valid border type is set correctly.""" + kernel = TestConvolution.MockKernel() + border_type = BorderType.CIRCULAR + op = Convolution(kernel, border_type) + self.assertEqual(op.border_types[0], border_type) + + def test_invalid_type_of_border_type(self) -> None: + """Checks whether a border type with an invalid type throws an + exception.""" + with self.assertRaises(Exception) as context: + Convolution(TestConvolution.MockKernel(), + border_types=["padded"]) + self.assertIsInstance(context.exception, TypeError) + + def test_invalid_border_type_list(self) -> None: + """Checks whether a list containing an invalid border type raises + and error.""" + border_types = [BorderType.PADDED, "circular"] + with self.assertRaises(TypeError): + Convolution(TestConvolution.MockKernel(), + border_types=border_types) + + def test_specifying_single_border_type_for_all_input_dimensions(self)\ + -> None: + """Checks whether you can specify a single border type for all + dimensions of the input.""" + kernel = TestConvolution.MockKernel() + border_type_in = BorderType.CIRCULAR + op = Convolution(kernel, border_type_in) + input_shape = (2, 2) + op.configure(input_shape=input_shape) + + self.assertEqual(len(op.border_types), len(input_shape)) + + for border_type_op in op.border_types: + self.assertEqual(border_type_op, border_type_in) + + def test_specifying_more_border_types_than_input_dims_raises_error(self)\ + -> None: + """Checks whether specifying too many border types raises an + exception.""" + kernel = TestConvolution.MockKernel() + input_shape = (2, 2) + border_types = [BorderType.CIRCULAR] * (len(input_shape) + 1) + op = Convolution(kernel, border_types) + + with self.assertRaises(ValueError): + op.configure(input_shape=input_shape) + + def _test_compute_weights( + self, + kernel_weights: np.ndarray, + border_types: ty.Union[BorderType, ty.List[BorderType]], + input_shapes: ty.List[ty.Tuple[int, ...]], + expected_weights: ty.List[np.ndarray] + ) -> None: + """Helper method to test compute_weights() method""" + kernel = TestConvolution.MockKernel(kernel_weights) + + for input_shape, expected in zip(input_shapes, expected_weights): + with self.subTest(msg=f"input shape: {input_shape}"): + op = Convolution(kernel, border_types=border_types) + op.configure(input_shape) + computed = op.compute_weights() + + self.assertTrue(np.array_equal(computed, expected)) + + def test_compute_weights_0d_padded(self) -> None: + """Tests whether the Convolution operation can be applied to 0D + inputs with PADDED border type. It may not make sense to do this but + it is possible.""" + self._test_compute_weights( + kernel_weights=np.array([2]), + border_types=BorderType.PADDED, + input_shapes=[(1,)], + expected_weights=[np.array([[2]])] + ) + + def test_compute_weights_0d_circular(self) -> None: + """Tests whether the Convolution operation can be applied to 0D + inputs with CIRCULAR border type. It may not make sense to do this but + it is possible.""" + self._test_compute_weights( + kernel_weights=np.array([2]), + border_types=BorderType.CIRCULAR, + input_shapes=[(1,)], + expected_weights=[np.array([[2]])] + ) + + def test_connectivity_matrix_1d_odd_kernel_padded(self) -> None: + """Tests whether computing weights works for 1D inputs with an odd sized + kernel and PADDED border type. The input sizes cover all relevant + cases, where the input size is smaller, equal to, and larger than the + kernel size.""" + expected_weights = [ + np.array([[2, 3], + [1, 2]]), + np.array([[2, 3, 0], + [1, 2, 3], + [0, 1, 2]]), + np.array([[2, 3, 0, 0], + [1, 2, 3, 0], + [0, 1, 2, 3], + [0, 0, 1, 2]]), + np.array([[2, 3, 0, 0, 0], + [1, 2, 3, 0, 0], + [0, 1, 2, 3, 0], + [0, 0, 1, 2, 3], + [0, 0, 0, 1, 2]])] + + self._test_compute_weights( + kernel_weights=np.array([1, 2, 3]), + border_types=BorderType.PADDED, + input_shapes=[(2,), (3,), (4,), (5,)], + expected_weights=expected_weights + ) + + def test_connectivity_matrix_1d_odd_kernel_circular(self) -> None: + """Tests whether computing weights works for 1D inputs with an odd sized + kernel and CIRCULAR border type.""" + expected_weights = [ + np.array([[2, 1], + [1, 2]]), + np.array([[2, 3, 1], + [1, 2, 3], + [3, 1, 2]]), + np.array([[2, 3, 0, 1], + [1, 2, 3, 0], + [0, 1, 2, 3], + [3, 0, 1, 2]]), + np.array([[2, 3, 0, 0, 1], + [1, 2, 3, 0, 0], + [0, 1, 2, 3, 0], + [0, 0, 1, 2, 3], + [3, 0, 0, 1, 2]]) + ] + + self._test_compute_weights( + kernel_weights=np.array([1, 2, 3]), + border_types=BorderType.CIRCULAR, + input_shapes=[(2,), (3,), (4,), (5,)], + expected_weights=expected_weights + ) + + def test_connectivity_matrix_1d_even_kernel_padded(self) -> None: + """Tests whether computing weights works for 1D inputs with an even + sized kernel and PADDED border type.""" + expected_weights = [ + np.array([[3, 4], + [2, 3]]), + np.array([[3, 4, 0], + [2, 3, 4], + [1, 2, 3]]), + np.array([[3, 4, 0, 0], + [2, 3, 4, 0], + [1, 2, 3, 4], + [0, 1, 2, 3]]), + np.array([[3, 4, 0, 0, 0], + [2, 3, 4, 0, 0], + [1, 2, 3, 4, 0], + [0, 1, 2, 3, 4], + [0, 0, 1, 2, 3]]), + np.array([[3, 4, 0, 0, 0, 0], + [2, 3, 4, 0, 0, 0], + [1, 2, 3, 4, 0, 0], + [0, 1, 2, 3, 4, 0], + [0, 0, 1, 2, 3, 4], + [0, 0, 0, 1, 2, 3]]) + ] + + self._test_compute_weights( + kernel_weights=np.array([1, 2, 3, 4]), + border_types=BorderType.PADDED, + input_shapes=[(2,), (3,), (4,), (5,), (6,)], + expected_weights=expected_weights + ) + + def test_connectivity_matrix_1d_even_kernel_circular(self) -> None: + """Tests whether computing weights works for 1D inputs with an even + sized kernel and CIRCULAR border type.""" + expected_weights = [ + np.array([[3, 2], + [2, 3]]), + np.array([[3, 4, 2], + [2, 3, 4], + [4, 2, 3]]), + np.array([[3, 4, 1, 2], + [2, 3, 4, 1], + [1, 2, 3, 4], + [4, 1, 2, 3]]), + np.array([[3, 4, 0, 1, 2], + [2, 3, 4, 0, 1], + [1, 2, 3, 4, 0], + [0, 1, 2, 3, 4], + [4, 0, 1, 2, 3]]), + np.array([[3, 4, 0, 0, 1, 2], + [2, 3, 4, 0, 0, 1], + [1, 2, 3, 4, 0, 0], + [0, 1, 2, 3, 4, 0], + [0, 0, 1, 2, 3, 4], + [4, 0, 0, 1, 2, 3]]) + ] + + self._test_compute_weights( + kernel_weights=np.array([1, 2, 3, 4]), + border_types=BorderType.CIRCULAR, + input_shapes=[(2,), (3,), (4,), (5,), (6,)], + expected_weights=expected_weights + ) + + def test_connectivity_matrix_2d_odd_kernel_padded(self) -> None: + """Tests whether computing weights works for 2D inputs with an odd + sized kernel and PADDED border type.""" + expected_weights = [ + np.array([[5]]), + + np.array([[5, 6, 8, 9], + [4, 5, 7, 8], + + [2, 3, 5, 6], + [1, 2, 4, 5]]), + + np.array([[5, 6, 0, 8, 9, 0, 0, 0, 0], + [4, 5, 6, 7, 8, 9, 0, 0, 0], + [0, 4, 5, 0, 7, 8, 0, 0, 0], + + [2, 3, 0, 5, 6, 0, 8, 9, 0], + [1, 2, 3, 4, 5, 6, 7, 8, 9], + [0, 1, 2, 0, 4, 5, 0, 7, 8], + + [0, 0, 0, 2, 3, 0, 5, 6, 0], + [0, 0, 0, 1, 2, 3, 4, 5, 6], + [0, 0, 0, 0, 1, 2, 0, 4, 5]]), + + np.array( + [[5, 6, 0, 0, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [4, 5, 6, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 4, 5, 6, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 4, 5, 0, 0, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0], + + [2, 3, 0, 0, 5, 6, 0, 0, 8, 9, 0, 0, 0, 0, 0, 0], + [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0, 0, 0, 0, 0], + [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0, 0, 0, 0], + [0, 0, 1, 2, 0, 0, 4, 5, 0, 0, 7, 8, 0, 0, 0, 0], + + [0, 0, 0, 0, 2, 3, 0, 0, 5, 6, 0, 0, 8, 9, 0, 0], + [0, 0, 0, 0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0], + [0, 0, 0, 0, 0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9], + [0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 4, 5, 0, 0, 7, 8], + + [0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 0, 0, 5, 6, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 4, 5, 6, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 4, 5, 6], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 4, 5]]), + + np.array([[5, 6, 0, 0, 0, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [4, 5, 6, 0, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 4, 5, 6, 0, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 4, 5, 6, 0, 0, 7, 8, 9, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 4, 5, 0, 0, 0, 7, 8, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + + [2, 3, 0, 0, 0, 5, 6, 0, 0, 0, 8, 9, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 2, 3, 0, 0, 4, 5, 6, 0, 0, 7, 8, 9, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 2, 3, 0, 0, 4, 5, 6, 0, 0, 7, 8, 9, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 2, 3, 0, 0, 4, 5, 6, 0, 0, 7, 8, 9, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 2, 0, 0, 0, 4, 5, 0, 0, 0, 7, 8, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 2, 3, 0, 0, 0, 5, 6, 0, 0, 0, + 8, 9, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 4, 5, 6, 0, 0, + 7, 8, 9, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 4, 5, 6, 0, + 0, 7, 8, 9, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 4, 5, 6, + 0, 0, 7, 8, 9, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 4, 5, + 0, 0, 0, 7, 8, 0, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 0, 0, 0, + 5, 6, 0, 0, 0, 8, 9, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, + 4, 5, 6, 0, 0, 7, 8, 9, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, + 0, 4, 5, 6, 0, 0, 7, 8, 9, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, + 0, 0, 4, 5, 6, 0, 0, 7, 8, 9], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, + 0, 0, 0, 4, 5, 0, 0, 0, 7, 8], + + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 2, 3, 0, 0, 0, 5, 6, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 2, 3, 0, 0, 4, 5, 6, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 2, 3, 0, 0, 4, 5, 6, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 2, 3, 0, 0, 4, 5, 6], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 2, 0, 0, 0, 4, 5]]) + ] + + self._test_compute_weights( + kernel_weights=np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]), + border_types=BorderType.PADDED, + input_shapes=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + expected_weights=expected_weights + ) + + def test_connectivity_matrix_2d_odd_kernel_circular(self) -> None: + """Tests whether computing weights works for 2D inputs with an odd + sized kernel and CIRCULAR border type.""" + expected_weights = [ + np.array([[5]]), + + np.array([[5, 4, 2, 1], + [4, 5, 1, 2], + + [2, 1, 5, 4], + [1, 2, 4, 5]]), + + np.array([[5, 6, 4, 8, 9, 7, 2, 3, 1], + [4, 5, 6, 7, 8, 9, 1, 2, 3], + [6, 4, 5, 9, 7, 8, 3, 1, 2], + + [2, 3, 1, 5, 6, 4, 8, 9, 7], + [1, 2, 3, 4, 5, 6, 7, 8, 9], + [3, 1, 2, 6, 4, 5, 9, 7, 8], + + [8, 9, 7, 2, 3, 1, 5, 6, 4], + [7, 8, 9, 1, 2, 3, 4, 5, 6], + [9, 7, 8, 3, 1, 2, 6, 4, 5]]), + np.array( + [[5, 6, 0, 4, 8, 9, 0, 7, 0, 0, 0, 0, 2, 3, 0, 1], + [4, 5, 6, 0, 7, 8, 9, 0, 0, 0, 0, 0, 1, 2, 3, 0], + [0, 4, 5, 6, 0, 7, 8, 9, 0, 0, 0, 0, 0, 1, 2, 3], + [6, 0, 4, 5, 9, 0, 7, 8, 0, 0, 0, 0, 3, 0, 1, 2], + + [2, 3, 0, 1, 5, 6, 0, 4, 8, 9, 0, 7, 0, 0, 0, 0], + [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0, 0, 0, 0, 0], + [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0, 0, 0, 0], + [3, 0, 1, 2, 6, 0, 4, 5, 9, 0, 7, 8, 0, 0, 0, 0], + + [0, 0, 0, 0, 2, 3, 0, 1, 5, 6, 0, 4, 8, 9, 0, 7], + [0, 0, 0, 0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0], + [0, 0, 0, 0, 0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9], + [0, 0, 0, 0, 3, 0, 1, 2, 6, 0, 4, 5, 9, 0, 7, 8], + + [8, 9, 0, 7, 0, 0, 0, 0, 2, 3, 0, 1, 5, 6, 0, 4], + [7, 8, 9, 0, 0, 0, 0, 0, 1, 2, 3, 0, 4, 5, 6, 0], + [0, 7, 8, 9, 0, 0, 0, 0, 0, 1, 2, 3, 0, 4, 5, 6], + [9, 0, 7, 8, 0, 0, 0, 0, 3, 0, 1, 2, 6, 0, 4, 5]]), + + np.array([[5, 6, 0, 0, 4, 8, 9, 0, 0, 7, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 2, 3, 0, 0, 1], + [4, 5, 6, 0, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 2, 3, 0, 0], + [0, 4, 5, 6, 0, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 2, 3, 0], + [0, 0, 4, 5, 6, 0, 0, 7, 8, 9, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 2, 3], + [6, 0, 0, 4, 5, 9, 0, 0, 7, 8, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3, 0, 0, 1, 2], + + [2, 3, 0, 0, 1, 5, 6, 0, 0, 4, 8, 9, 0, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 2, 3, 0, 0, 4, 5, 6, 0, 0, 7, 8, 9, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 2, 3, 0, 0, 4, 5, 6, 0, 0, 7, 8, 9, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 2, 3, 0, 0, 4, 5, 6, 0, 0, 7, 8, 9, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [3, 0, 0, 1, 2, 6, 0, 0, 4, 5, 9, 0, 0, 7, 8, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 2, 3, 0, 0, 1, 5, 6, 0, 0, 4, + 8, 9, 0, 0, 7, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 4, 5, 6, 0, 0, + 7, 8, 9, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 4, 5, 6, 0, + 0, 7, 8, 9, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 4, 5, 6, + 0, 0, 7, 8, 9, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 3, 0, 0, 1, 2, 6, 0, 0, 4, 5, + 9, 0, 0, 7, 8, 0, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 0, 0, 1, + 5, 6, 0, 0, 4, 8, 9, 0, 0, 7], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, + 4, 5, 6, 0, 0, 7, 8, 9, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, + 0, 4, 5, 6, 0, 0, 7, 8, 9, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, + 0, 0, 4, 5, 6, 0, 0, 7, 8, 9], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 2, + 6, 0, 0, 4, 5, 9, 0, 0, 7, 8], + + [8, 9, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 2, 3, 0, 0, 1, 5, 6, 0, 0, 4], + [7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 2, 3, 0, 0, 4, 5, 6, 0, 0], + [0, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 2, 3, 0, 0, 4, 5, 6, 0], + [0, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 2, 3, 0, 0, 4, 5, 6], + [9, 0, 0, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3, 0, 0, 1, 2, 6, 0, 0, 4, 5]]) + ] + + self._test_compute_weights( + kernel_weights=np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]), + border_types=BorderType.CIRCULAR, + input_shapes=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + expected_weights=expected_weights + ) + + def test_connectivity_matrix_2d_even_kernel_padded(self) -> None: + """Tests whether computing weights works for 2D inputs with an even + sized kernel and PADDED border type.""" + expected_weights = [ + np.array([[11]]), + + np.array([[11, 12, 15, 16], + [10, 11, 14, 15], + + [7, 8, 11, 12], + [6, 7, 10, 11]]), + + np.array([[11, 12, 0, 15, 16, 0, 0, 0, 0], + [10, 11, 12, 14, 15, 16, 0, 0, 0], + [9, 10, 11, 13, 14, 15, 0, 0, 0], + + [7, 8, 0, 11, 12, 0, 15, 16, 0], + [6, 7, 8, 10, 11, 12, 14, 15, 16], + [5, 6, 7, 9, 10, 11, 13, 14, 15], + + [3, 4, 0, 7, 8, 0, 11, 12, 0], + [2, 3, 4, 6, 7, 8, 10, 11, 12], + [1, 2, 3, 5, 6, 7, 9, 10, 11]]), + + np.array([[11, 12, 0, 0, 15, 16, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0], + [10, 11, 12, 0, 14, 15, 16, 0, 0, 0, 0, 0, 0, + 0, 0, 0], + [9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, + 0, 0, 0, 0], + [0, 9, 10, 11, 0, 13, 14, 15, 0, 0, 0, 0, 0, + 0, 0, 0], + + [7, 8, 0, 0, 11, 12, 0, 0, 15, 16, 0, 0, 0, + 0, 0, 0], + [6, 7, 8, 0, 10, 11, 12, 0, 14, 15, 16, 0, 0, + 0, 0, 0], + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 0, 0, 0, 0], + [0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, + 0, 0, 0], + + [3, 4, 0, 0, 7, 8, 0, 0, 11, 12, 0, 0, 15, + 16, 0, 0], + [2, 3, 4, 0, 6, 7, 8, 0, 10, 11, 12, 0, 14, + 15, 16, 0], + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16], + [0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, + 14, 15], + + [0, 0, 0, 0, 3, 4, 0, 0, 7, 8, 0, 0, 11, 12, + 0, 0], + [0, 0, 0, 0, 2, 3, 4, 0, 6, 7, 8, 0, 10, 11, + 12, 0], + [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12], + [0, 0, 0, 0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, + 10, 11]]), + + np.array([[11, 12, 0, 0, 0, 15, 16, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [10, 11, 12, 0, 0, 14, 15, 16, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [9, 10, 11, 12, 0, 13, 14, 15, 16, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 9, 10, 11, 12, 0, 13, 14, 15, 16, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 9, 10, 11, 0, 0, 13, 14, 15, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + + [7, 8, 0, 0, 0, 11, 12, 0, 0, 0, 15, 16, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [6, 7, 8, 0, 0, 10, 11, 12, 0, 0, 14, 15, 16, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [5, 6, 7, 8, 0, 9, 10, 11, 12, 0, 13, 14, 15, + 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 5, 6, 7, 8, 0, 9, 10, 11, 12, 0, 13, 14, + 15, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 5, 6, 7, 0, 0, 9, 10, 11, 0, 0, 13, + 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + + [3, 4, 0, 0, 0, 7, 8, 0, 0, 0, 11, 12, 0, 0, + 0, 15, 16, 0, 0, 0, 0, 0, 0, 0, 0], + [2, 3, 4, 0, 0, 6, 7, 8, 0, 0, 10, 11, 12, 0, + 0, 14, 15, 16, 0, 0, 0, 0, 0, 0, 0], + [1, 2, 3, 4, 0, 5, 6, 7, 8, 0, 9, 10, 11, 12, + 0, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0], + [0, 1, 2, 3, 4, 0, 5, 6, 7, 8, 0, 9, 10, 11, + 12, 0, 13, 14, 15, 16, 0, 0, 0, 0, 0], + [0, 0, 1, 2, 3, 0, 0, 5, 6, 7, 0, 0, 9, 10, + 11, 0, 0, 13, 14, 15, 0, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 7, 8, 0, 0, 0, + 11, 12, 0, 0, 0, 15, 16, 0, 0, 0], + [0, 0, 0, 0, 0, 2, 3, 4, 0, 0, 6, 7, 8, 0, 0, + 10, 11, 12, 0, 0, 14, 15, 16, 0, 0], + [0, 0, 0, 0, 0, 1, 2, 3, 4, 0, 5, 6, 7, 8, 0, + 9, 10, 11, 12, 0, 13, 14, 15, 16, 0], + [0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 0, 5, 6, 7, 8, + 0, 9, 10, 11, 12, 0, 13, 14, 15, 16], + [0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 5, 6, 7, + 0, 0, 9, 10, 11, 0, 0, 13, 14, 15], + + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, + 7, 8, 0, 0, 0, 11, 12, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 4, 0, 0, + 6, 7, 8, 0, 0, 10, 11, 12, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 0, + 5, 6, 7, 8, 0, 9, 10, 11, 12, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, + 0, 5, 6, 7, 8, 0, 9, 10, 11, 12], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, + 0, 0, 5, 6, 7, 0, 0, 9, 10, 11]]) + ] + + self._test_compute_weights( + kernel_weights=np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]), + border_types=BorderType.PADDED, + input_shapes=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + expected_weights=expected_weights + ) + + def test_connectivity_matrix_2d_even_kernel_circular(self) -> None: + """Tests whether computing weights works for 2D inputs with an even + sized kernel and CIRCULAR border type.""" + expected_weights = [ + np.array([[11]]), + + np.array([[11, 10, 7, 6], + [10, 11, 6, 7], + [7, 6, 11, 10], + [6, 7, 10, 11]]), + + np.array([[11, 12, 10, 15, 16, 14, 7, 8, 6], + [10, 11, 12, 14, 15, 16, 6, 7, 8], + [12, 10, 11, 16, 14, 15, 8, 6, 7], + + [7, 8, 6, 11, 12, 10, 15, 16, 14], + [6, 7, 8, 10, 11, 12, 14, 15, 16], + [8, 6, 7, 12, 10, 11, 16, 14, 15], + + [15, 16, 14, 7, 8, 6, 11, 12, 10], + [14, 15, 16, 6, 7, 8, 10, 11, 12], + [16, 14, 15, 8, 6, 7, 12, 10, 11]]), + + np.array([[11, 12, 9, 10, 15, 16, 13, 14, 3, 4, 1, 2, + 7, 8, 5, 6], + [10, 11, 12, 9, 14, 15, 16, 13, 2, 3, 4, 1, + 6, 7, 8, 5], + [9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, + 5, 6, 7, 8], + [12, 9, 10, 11, 16, 13, 14, 15, 4, 1, 2, 3, + 8, 5, 6, 7], + + [7, 8, 5, 6, 11, 12, 9, 10, 15, 16, 13, 14, + 3, 4, 1, 2], + [6, 7, 8, 5, 10, 11, 12, 9, 14, 15, 16, 13, + 2, 3, 4, 1], + [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 1, 2, 3, 4], + [8, 5, 6, 7, 12, 9, 10, 11, 16, 13, 14, 15, + 4, 1, 2, 3], + + [3, 4, 1, 2, 7, 8, 5, 6, 11, 12, 9, 10, 15, + 16, 13, 14], + [2, 3, 4, 1, 6, 7, 8, 5, 10, 11, 12, 9, 14, + 15, 16, 13], + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16], + [4, 1, 2, 3, 8, 5, 6, 7, 12, 9, 10, 11, 16, + 13, 14, 15], + + [15, 16, 13, 14, 3, 4, 1, 2, 7, 8, 5, 6, 11, + 12, 9, 10], + [14, 15, 16, 13, 2, 3, 4, 1, 6, 7, 8, 5, 10, + 11, 12, 9], + [13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12], + [16, 13, 14, 15, 4, 1, 2, 3, 8, 5, 6, 7, 12, + 9, 10, 11]]), + + np.array([[11, 12, 0, 9, 10, 15, 16, 0, 13, 14, 0, 0, + 0, 0, 0, 3, 4, 0, 1, 2, 7, 8, 0, 5, 6], + [10, 11, 12, 0, 9, 14, 15, 16, 0, 13, 0, 0, + 0, 0, 0, 2, 3, 4, 0, 1, 6, 7, 8, 0, 5], + [9, 10, 11, 12, 0, 13, 14, 15, 16, 0, 0, 0, + 0, 0, 0, 1, 2, 3, 4, 0, 5, 6, 7, 8, 0], + [0, 9, 10, 11, 12, 0, 13, 14, 15, 16, 0, 0, + 0, 0, 0, 0, 1, 2, 3, 4, 0, 5, 6, 7, 8], + [12, 0, 9, 10, 11, 16, 0, 13, 14, 15, 0, 0, + 0, 0, 0, 4, 0, 1, 2, 3, 8, 0, 5, 6, 7], + + [7, 8, 0, 5, 6, 11, 12, 0, 9, 10, 15, 16, 0, + 13, 14, 0, 0, 0, 0, 0, 3, 4, 0, 1, 2], + [6, 7, 8, 0, 5, 10, 11, 12, 0, 9, 14, 15, 16, + 0, 13, 0, 0, 0, 0, 0, 2, 3, 4, 0, 1], + [5, 6, 7, 8, 0, 9, 10, 11, 12, 0, 13, 14, 15, + 16, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 0], + [0, 5, 6, 7, 8, 0, 9, 10, 11, 12, 0, 13, 14, + 15, 16, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4], + [8, 0, 5, 6, 7, 12, 0, 9, 10, 11, 16, 0, 13, + 14, 15, 0, 0, 0, 0, 0, 4, 0, 1, 2, 3], + + [3, 4, 0, 1, 2, 7, 8, 0, 5, 6, 11, 12, 0, 9, + 10, 15, 16, 0, 13, 14, 0, 0, 0, 0, 0], + [2, 3, 4, 0, 1, 6, 7, 8, 0, 5, 10, 11, 12, 0, + 9, 14, 15, 16, 0, 13, 0, 0, 0, 0, 0], + [1, 2, 3, 4, 0, 5, 6, 7, 8, 0, 9, 10, 11, 12, + 0, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0], + [0, 1, 2, 3, 4, 0, 5, 6, 7, 8, 0, 9, 10, 11, + 12, 0, 13, 14, 15, 16, 0, 0, 0, 0, 0], + [4, 0, 1, 2, 3, 8, 0, 5, 6, 7, 12, 0, 9, 10, + 11, 16, 0, 13, 14, 15, 0, 0, 0, 0, 0], + + [0, 0, 0, 0, 0, 3, 4, 0, 1, 2, 7, 8, 0, 5, 6, + 11, 12, 0, 9, 10, 15, 16, 0, 13, 14], + [0, 0, 0, 0, 0, 2, 3, 4, 0, 1, 6, 7, 8, 0, 5, + 10, 11, 12, 0, 9, 14, 15, 16, 0, 13], + [0, 0, 0, 0, 0, 1, 2, 3, 4, 0, 5, 6, 7, 8, 0, + 9, 10, 11, 12, 0, 13, 14, 15, 16, 0], + [0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 0, 5, 6, 7, 8, + 0, 9, 10, 11, 12, 0, 13, 14, 15, 16], + [0, 0, 0, 0, 0, 4, 0, 1, 2, 3, 8, 0, 5, 6, 7, + 12, 0, 9, 10, 11, 16, 0, 13, 14, 15], + + [15, 16, 0, 13, 14, 0, 0, 0, 0, 0, 3, 4, 0, + 1, 2, 7, 8, 0, 5, 6, 11, 12, 0, 9, 10], + [14, 15, 16, 0, 13, 0, 0, 0, 0, 0, 2, 3, 4, + 0, 1, 6, 7, 8, 0, 5, 10, 11, 12, 0, 9], + [13, 14, 15, 16, 0, 0, 0, 0, 0, 0, 1, 2, 3, + 4, 0, 5, 6, 7, 8, 0, 9, 10, 11, 12, 0], + [0, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, 1, 2, + 3, 4, 0, 5, 6, 7, 8, 0, 9, 10, 11, 12], + [16, 0, 13, 14, 15, 0, 0, 0, 0, 0, 4, 0, 1, + 2, 3, 8, 0, 5, 6, 7, 12, 0, 9, 10, 11]]) + ] + + self._test_compute_weights( + kernel_weights=np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]), + border_types=BorderType.CIRCULAR, + input_shapes=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + expected_weights=expected_weights + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/operations/test_shape_handlers.py b/tests/lava/lib/dnf/operations/test_shape_handlers.py new file mode 100644 index 0000000..ae0bbd0 --- /dev/null +++ b/tests/lava/lib/dnf/operations/test_shape_handlers.py @@ -0,0 +1,315 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np +import typing as ty + +from lava.lib.dnf.operations.shape_handlers import ( + AbstractShapeHandler, + KeepShapeHandler, + ReduceDimsHandler, + ReshapeHandler, + ExpandDimsHandler, + ReorderHandler) +from lava.lib.dnf.operations.exceptions import MisconfiguredOpError + + +class MockShapeHandler(AbstractShapeHandler): + """Mock shape handler for testing""" + def __init__(self) -> None: + super().__init__() + self.args_validated = False + self.input_shape_validated = False + + def _compute_output_shape(self) -> None: + self._output_shape = self._input_shape + + def _validate_args(self) -> None: + self.args_validated = True + + def _validate_input_shape(self, input_shape: ty.Tuple[int, ...]) -> None: + self.input_shape_validated = True + + +class TestAbstractShapeHandler(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a MockShapeHandler can be instantiated.""" + sh = MockShapeHandler() + self.assertIsInstance(sh, MockShapeHandler) + + def test_assert_configured_raises_error_when_not_configured(self) -> None: + """Tests whether assert_configured() raises an assertion error when + the ShapeHandler is not yet configured.""" + sh = MockShapeHandler() + self.assertRaises(AssertionError, sh.assert_configured) + + def test_configure_works(self) -> None: + """Tests whether configure() validates and sets the input shape, + validates any args, and computes the output shape.""" + sh = MockShapeHandler() + input_shape = (2, 4) + sh.configure(input_shape) + + self.assertTrue(sh.input_shape_validated) + self.assertEqual(sh._input_shape, input_shape) + self.assertTrue(sh.args_validated) + self.assertEqual(sh._output_shape, input_shape) + + def test_output_shape_getter(self) -> None: + """Tests the getter for output shape""" + sh = MockShapeHandler() + output_shape = (2, 4) + sh._output_shape = output_shape + self.assertEqual(sh.output_shape, output_shape) + + def test_input_shape_getter(self) -> None: + """Tests the getter for input shape""" + sh = MockShapeHandler() + input_shape = (2, 4) + sh._input_shape = input_shape + self.assertEqual(sh.input_shape, input_shape) + + +class TestKeepShapeHandler(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a KeepShapeHandler can be instantiated.""" + sh = KeepShapeHandler() + self.assertIsInstance(sh, KeepShapeHandler) + + def test_compute_output_shape(self) -> None: + """Tests whether the output shape is set correctly.""" + sh = KeepShapeHandler() + input_shape = (2, 4) + sh.configure(input_shape=input_shape) + self.assertEqual(sh.output_shape, input_shape) + + +class TestExpandDimsHandler(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a ExpandDimsHandler can be instantiated.""" + sh = ExpandDimsHandler(new_dims_shape=6) + self.assertIsInstance(sh, ExpandDimsHandler) + + def test_compute_output_shape_expand_one_dim_with_int_argument(self)\ + -> None: + """Tests whether the output shape is set correctly when a single + dimension is added, using an integer to specify the shape.""" + sh = ExpandDimsHandler(new_dims_shape=6) + sh.configure(input_shape=(2, 4)) + self.assertEqual(sh.output_shape, (2, 4, 6)) + + def test_compute_output_shape_expand_multiple_dims(self) -> None: + """Tests whether the output shape is set correctly when multiple + dimensions are added.""" + sh = ExpandDimsHandler(new_dims_shape=(6, 8)) + sh.configure(input_shape=(2,)) + self.assertEqual(sh.output_shape, (2, 6, 8)) + + def test_compute_output_shape_expand_from_0d(self) -> None: + """Tests whether the output shape is set correctly when expanding + from 0D to 1D.""" + sh = ExpandDimsHandler(new_dims_shape=(10,)) + sh.configure(input_shape=(1,)) + self.assertEqual(sh.output_shape, (10,)) + + def test_negative_shape_values_raise_error(self) -> None: + """Tests whether an error is raised when contains a + negative value.""" + sh = ExpandDimsHandler(new_dims_shape=(-6,)) + with self.assertRaises(ValueError): + sh.configure(input_shape=(2, 4)) + + def test_zero_shape_values_raise_error(self) -> None: + """Tests whether an error is raised when contains a + zero.""" + sh = ExpandDimsHandler(new_dims_shape=(0,)) + with self.assertRaises(ValueError): + sh.configure(input_shape=(2, 4)) + + def test_empty_new_dims_shape_raises_error(self) -> None: + """Tests whether an error is raised when is empty.""" + sh = ExpandDimsHandler(new_dims_shape=()) + with self.assertRaises(ValueError): + sh.configure(input_shape=(2, 4)) + + def test_output_shape_larger_than_dim_3_raises_error(self) -> None: + """Tests whether an error is raised when the computed output shape is + larger than 3.""" + sh = ExpandDimsHandler(new_dims_shape=(6, 8)) + with self.assertRaises(NotImplementedError): + sh.configure(input_shape=(2, 4)) + + +class TestReduceDimsHandler(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a ReduceDimsHandler can be instantiated.""" + sh = ReduceDimsHandler(reduce_dims=1) + self.assertIsInstance(sh, ReduceDimsHandler) + + def test_compute_output_shape_remove_one_dim(self) -> None: + """Tests whether the output shape is set correctly when a single + dimension is removed.""" + sh = ReduceDimsHandler(reduce_dims=1) + sh.configure(input_shape=(2, 4)) + self.assertEqual(sh.output_shape, (2,)) + + def test_compute_output_shape_negative_index(self) -> None: + """Tests whether the output shape is set correctly when a single + dimension is removed, using a negative index.""" + sh = ReduceDimsHandler(reduce_dims=-1) + sh.configure(input_shape=(2, 4)) + self.assertEqual(sh.output_shape, (2,)) + + def test_compute_output_shape_remove_multiple_dims(self) -> None: + """Tests whether the output shape is set correctly when multiple + dimensions are removed.""" + sh = ReduceDimsHandler(reduce_dims=(0, -1)) + sh.configure(input_shape=(2, 3, 4, 5)) + self.assertEqual(sh.output_shape, (3, 4)) + + def test_compute_output_shape_remove_all(self) -> None: + """Tests whether the output shape is set correctly when all + dimensions are removed.""" + sh = ReduceDimsHandler(reduce_dims=(0, 1, 2)) + sh.configure(input_shape=(2, 3, 4)) + self.assertEqual(sh.output_shape, (1,)) + + def test_order_of_reduce_dims_does_not_impact_result(self) -> None: + """Tests whether the order of does not matter.""" + input_shape = (3, 4, 5) + sh1 = ReduceDimsHandler(reduce_dims=(1, 2)) + sh1.configure(input_shape=input_shape) + + sh2 = ReduceDimsHandler(reduce_dims=(1, 2)) + sh2.configure(input_shape=input_shape) + + self.assertTrue(np.array_equal(sh1.output_shape, + sh2.output_shape)) + + def test_reduce_dims_with_out_of_bounds_index_raises_error(self) -> None: + """Tests whether an error is raised when contains an + index that is out of bounds for the input shape.""" + with self.assertRaises(IndexError): + sh = ReduceDimsHandler(reduce_dims=2) + sh.configure(input_shape=(2, 4)) + + def test_reduce_dims_with_negative_out_of_bounds_index_raises_error(self)\ + -> None: + """Tests whether an error is raised when contains a + negative index that is out of bounds for the input shape.""" + with self.assertRaises(IndexError): + sh = ReduceDimsHandler(reduce_dims=-3) + sh.configure(input_shape=(2, 4)) + + def test_empty_reduce_dims_raises_error(self) -> None: + """Tests whether an error is raised when is an empty + tuple.""" + with self.assertRaises(ValueError): + sh = ReduceDimsHandler(reduce_dims=()) + sh.configure(input_shape=(2, 4)) + + def test_reduce_dims_with_too_many_entries_raises_error(self) -> None: + """Tests whether an error is raised when has more + elements than the dimensionality of the input.""" + with self.assertRaises(ValueError): + sh = ReduceDimsHandler(reduce_dims=(0, 0)) + sh.configure(input_shape=(4,)) + + def test_zero_dimensional_input_shape_raises_error(self) -> None: + """Tests whether an error is raised when the input shape is already + zero-dimensional.""" + with self.assertRaises(MisconfiguredOpError): + sh = ReduceDimsHandler(reduce_dims=0) + sh.configure(input_shape=(1,)) + + +class TestReshapeHandler(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a ReshapeHandler can be instantiated.""" + sh = ReshapeHandler(output_shape=(8,)) + self.assertIsInstance(sh, ReshapeHandler) + + def test_compute_output_shape(self) -> None: + """Tests whether the output shape is set correctly.""" + output_shape = (8,) + sh = ReshapeHandler(output_shape=output_shape) + sh.configure(input_shape=(2, 4)) + self.assertEqual(sh.output_shape, output_shape) + + def test_compute_output_shape_with_incorrect_num_neurons_raises_error(self)\ + -> None: + """Tests whether an error is raised when the number of neurons in + the input and output shape does not match.""" + with self.assertRaises(MisconfiguredOpError): + output_shape = (9,) + sh = ReshapeHandler(output_shape=output_shape) + sh.configure(input_shape=(2, 4)) + + +class TestReorderHandlerShapeHandler(unittest.TestCase): + def test_init(self) -> None: + """Tests whether a ReorderHandler can be instantiated.""" + sh = ReorderHandler(order=(1, 0)) + self.assertIsInstance(sh, ReorderHandler) + + def test_order_with_more_elements_than_input_raises_error(self) -> None: + """Tests whether an error is raised when the specified new + has more elements than the number of input dimensions.""" + sh = ReorderHandler(order=(1, 0, 2)) + with self.assertRaises(MisconfiguredOpError): + sh.configure(input_shape=(2, 2)) + + def test_order_with_less_elements_than_input_raises_error(self) -> None: + """Tests whether an error is raised when the specified new + has less elements than the number of input dimensions.""" + sh = ReorderHandler(order=(1,)) + with self.assertRaises(MisconfiguredOpError): + sh.configure(input_shape=(2, 2)) + + def test_negative_order_index_within_bounds_works(self) -> None: + """Tests whether indices in can be specified with negative + numbers.""" + sh = ReorderHandler(order=(0, -2)) + sh.configure(input_shape=(2, 2)) + + def test_order_index_out_of_bounds_raises_error(self) -> None: + """Tests whether an error is raised when an index in is + larger than the dimensionality of the input.""" + sh = ReorderHandler(order=(0, 2)) + with self.assertRaises(IndexError): + sh.configure(input_shape=(2, 2)) + + def test_negative_order_index_out_of_bounds_raises_error(self) -> None: + """Tests whether an error is raised when an index in is + negative and out of bounds.""" + sh = ReorderHandler(order=(0, -3)) + with self.assertRaises(IndexError): + sh.configure(input_shape=(2, 2)) + + def test_input_dimensionality_0_raises_error(self) -> None: + """Tests whether an error is raised when the input dimensionality is + 0, in which case reordering does not make sense.""" + sh = ReorderHandler(order=(0,)) + with self.assertRaises(MisconfiguredOpError): + sh.configure(input_shape=(1,)) + + def test_input_dimensionality_1_raises_error(self) -> None: + """Tests whether an error is raised when the input dimensionality is + 1, in which case reordering does not make sense.""" + sh = ReorderHandler(order=(0,)) + with self.assertRaises(MisconfiguredOpError): + sh.configure(input_shape=(5,)) + + def test_reordering_2d(self) -> None: + """Tests whether reordering a two-dimensional input works.""" + sh = ReorderHandler(order=(1, 0)) + sh.configure(input_shape=(0, 1)) + self.assertEqual(sh.output_shape, (1, 0)) + + def test_reordering_3d(self) -> None: + """Tests whether reordering a three-dimensional input works.""" + sh = ReorderHandler(order=(1, 2, 0)) + sh.configure(input_shape=(0, 1, 2)) + self.assertEqual(sh.output_shape, (1, 2, 0)) diff --git a/tests/lava/lib/dnf/utils/__init__.py b/tests/lava/lib/dnf/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/lava/lib/dnf/utils/test_convenience.py b/tests/lava/lib/dnf/utils/test_convenience.py new file mode 100644 index 0000000..e92f8ca --- /dev/null +++ b/tests/lava/lib/dnf/utils/test_convenience.py @@ -0,0 +1,74 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np + +from lava.lib.dnf.utils.convenience import num_neurons, num_dims, to_ndarray + + +class TestNumNeurons(unittest.TestCase): + def test_num_neurons_1d(self) -> None: + """Tests whether the number of neurons is computed correctly for + one-dimensional shapes.""" + num = num_neurons(shape=(15,)) + self.assertEqual(num, 15) + + def test_num_neurons_2d(self) -> None: + """Tests whether the number of neurons is computed correctly for + two-dimensional shapes.""" + num = num_neurons(shape=(5, 3)) + self.assertEqual(num, 15) + + +class TestNumDims(unittest.TestCase): + def test_num_dims_0d(self) -> None: + """Tests whether dimensionality is computed correctly for + zero-dimensional shapes.""" + dims = num_dims(shape=(1,)) + self.assertEqual(dims, 0) + + def test_num_neurons_1d(self) -> None: + """Tests whether dimensionality is computed correctly for + one-dimensional shapes.""" + dims = num_dims(shape=(10,)) + self.assertEqual(dims, 1) + + def test_num_neurons_2d(self) -> None: + """Tests whether dimensionality is computed correctly for + two-dimensional shapes.""" + dims = num_dims(shape=(10, 10)) + self.assertEqual(dims, 2) + + +class TestToNdarray(unittest.TestCase): + def test_converting_float(self) -> None: + """Tests whether floats can be converted to an ndarray.""" + a_float = 5.0 + ndarray = to_ndarray(a_float) + self.assertIsInstance(ndarray, np.ndarray) + self.assertTrue(np.array_equal(np.array([a_float]), ndarray)) + + def test_converting_list(self) -> None: + """Tests whether a list can be converted to an ndarray.""" + a_list = [1, 2, 3] + ndarray = to_ndarray(a_list) + self.assertIsInstance(ndarray, np.ndarray) + self.assertTrue(np.array_equal(np.array(a_list), ndarray)) + + def test_converting_tuple(self) -> None: + """Tests whether a tuple can be converted to an ndarray.""" + a_tuple = (1, 2, 3) + ndarray = to_ndarray(a_tuple) + self.assertIsInstance(ndarray, np.ndarray) + self.assertTrue(np.array_equal(np.array(a_tuple), ndarray)) + + def test_ndarray_not_converted(self) -> None: + """Tests whether an ndarray is simply returned.""" + ndarray = np.array([1, 2, 3]) + self.assertTrue(np.array_equal(ndarray, to_ndarray(ndarray))) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/utils/test_math.py b/tests/lava/lib/dnf/utils/test_math.py new file mode 100644 index 0000000..ea9e67e --- /dev/null +++ b/tests/lava/lib/dnf/utils/test_math.py @@ -0,0 +1,96 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import numpy as np + +from lava.lib.dnf.utils.math import is_odd, gauss + + +class TestGauss(unittest.TestCase): + def test_shape(self) -> None: + """Tests whether the returned Gaussian has the specified shape.""" + shape = (5, 3) + gaussian = gauss(shape) + self.assertEqual(gaussian.shape, shape) + + def test_default_values(self) -> None: + """Tests whether the default values for domain, amplitude, mean, and + stddev are used.""" + shape = (3, 3) + gaussian = gauss(shape) + + # amplitude should be 1, domain should be such that + # the maximum is at position (0, 0), and mean should be 0 + self.assertEqual(gaussian[0, 0], 1) + # stddev should be symmetrical + self.assertEqual(gaussian[0, 1], gaussian[1, 0]) + self.assertEqual(gaussian[0, 2], gaussian[2, 0]) + self.assertEqual(gaussian[1, 2], gaussian[2, 1]) + + def test_setting_amplitude(self) -> None: + """Tests whether the amplitude is set to the correct value.""" + shape = (3, 3) + amplitude = 42 + gaussian = gauss(shape, amplitude=amplitude) + self.assertEqual(gaussian[0, 0], amplitude) + + def test_setting_domain(self) -> None: + """Tests whether the domain is set correctly.""" + shape = (5,) + domain = np.array([[-2.5, 2.5]]) + gaussian = gauss(shape, domain=domain) + # with the specified domain, the maximum (1) should be in the center + self.assertEqual(gaussian[2], 1) + # and the gaussian should be symmetrical + self.assertTrue(np.array_equal(gaussian, np.flip(gaussian))) + + def test_setting_mean(self) -> None: + """Tests whether the mean is set correctly.""" + shape = (5,) + mean = 1 + gaussian = gauss(shape, mean=mean) + self.assertEqual(gaussian[mean], 1) + + def test_setting_stddev(self) -> None: + """Tests whether the stddev can be set.""" + shape = (3,) + gaussian_narrow = gauss(shape, stddev=1) + gaussian_broad = gauss(shape, stddev=2) + self.assertTrue(gaussian_narrow[1] < gaussian_broad[1]) + + def test_domain_shape_mismatch_raises_error(self) -> None: + """Tests whether an error is raised when the shape of the + argument is different from .""" + shape = (5, 3) + domain = np.zeros((1, 2)) # should be of shape (len(shape), 2) + with self.assertRaises(ValueError): + gauss(shape, domain=domain) + + def test_mean_shape_mismatch_raises_error(self) -> None: + """Tests whether an error is raised when the shape of the + argument is different from .""" + shape = (5, 3) + mean = np.zeros((3,)) # should be of shape (2,) + with self.assertRaises(ValueError): + gauss(shape, mean=mean) + + def test_stddev_shape_mismatch_raises_error(self) -> None: + """Tests whether an error is raised when the shape of the + argument is different from .""" + shape = (5, 3) + stddev = np.ones((3,)) # should be of shape (2,) + with self.assertRaises(ValueError): + gauss(shape, stddev=stddev) + + +class TestIsOdd(unittest.TestCase): + def test_is_odd(self) -> None: + """Tests the is_odd() helper function.""" + self.assertFalse(is_odd(0)) + self.assertTrue(is_odd(1)) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/utils/test_plotting.py b/tests/lava/lib/dnf/utils/test_plotting.py new file mode 100644 index 0000000..1c98964 --- /dev/null +++ b/tests/lava/lib/dnf/utils/test_plotting.py @@ -0,0 +1,81 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +from unittest.mock import patch, MagicMock +import numpy as np + +from lava.lib.dnf.utils.plotting import raster_plot, \ + compute_spike_rates, _compute_colored_spike_coordinates + + +class TestRasterPlot(unittest.TestCase): + def test_compute_spike_rates(self) -> None: + """Tests whether the instantaneous spike rates are computed as + expected by the function used within raster_plot().""" + spike_data = [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0], + [1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0], + [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0]] + + expected_spike_rates = [[0.25, 0.5, 0.0], [0.5, 0.5, 0.0], + [0.75, 0.25, 0.0], [0.5, 0.25, 0.0], + [0.5, 0.5, 0.0], [0.25, 0.25, 0.0], + [0.25, 0.25, 0.0], [0.5, 0.25, 0.0], + [0.5, 0.0, 0.0]] + + spike_rates = compute_spike_rates(spike_data=np.array(spike_data), + window_size=4) + + self.assertEqual(expected_spike_rates, spike_rates.tolist()) + + def test_compute_colored_spike_coordinates(self) -> None: + """Tests whether color information and coordinates are computed as + expected by the function used within raster_plot().""" + spike_data = [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0], + [1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0], + [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0]] + + spike_rates = [[0.25, 0.5, 0.0], [0.5, 0.5, 0.0], + [0.75, 0.25, 0.0], [0.5, 0.25, 0.0], + [0.5, 0.5, 0.0], [0.25, 0.25, 0.0], + [0.25, 0.25, 0.0], [0.5, 0.25, 0.0], + [0.5, 0.0, 0.0]] + + expected_x = [1, 2, 4, 4, 5, 7, 9] + + expected_y = [1, 0, 0, 1, 0, 1, 0] + + expected_colors = [0.5, 0.5, 0.5, 0.25, 0.5, 0.25, 0.5] + + x, y, colors = \ + _compute_colored_spike_coordinates(spike_data=np.array(spike_data), + spike_rates=np.array( + spike_rates)) + + self.assertEqual(expected_x, x) + self.assertEqual(expected_y, y) + self.assertEqual(expected_colors, colors) + + @patch("matplotlib.pyplot.show") + def test_raster_plot_with_default_args(self, + mock_show: MagicMock) -> None: + """Tests whether the raster_plot function can be called with only + spike_data as argument.""" + mock_show.return_value = None + + raster_plot(spike_data=np.zeros((10, 20))) + + @patch("matplotlib.pyplot.show") + def test_raster_plot_with_non_default_args(self, + mock_show: MagicMock) -> None: + """Tests whether the raster_plot function can be called by also + specifying the rate_window argument.""" + mock_show.return_value = None + + raster_plot(spike_data=np.zeros((10, 20)), + window_size=20) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/lib/dnf/utils/test_validation.py b/tests/lava/lib/dnf/utils/test_validation.py new file mode 100644 index 0000000..53aca5c --- /dev/null +++ b/tests/lava/lib/dnf/utils/test_validation.py @@ -0,0 +1,38 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest + +from lava.lib.dnf.utils.validation import validate_shape + + +class TestValidateShape(unittest.TestCase): + def test_shape_int(self) -> None: + """Tests whether the shape argument is converted to a tuple.""" + shape = validate_shape(shape=5) + self.assertEqual(shape, (5,)) + + def test_shape_tuple(self) -> None: + """Tests whether a tuple shape argument remains a tuple.""" + shape = validate_shape(shape=(5, 3)) + self.assertTrue(shape == (5, 3)) + + def test_shape_list(self) -> None: + """Tests whether a list shape argument is converted to a tuple.""" + shape = validate_shape(shape=[5, 3]) + self.assertTrue(shape == (5, 3)) + + def test_negative_values(self) -> None: + """Tests whether negative shape values raise a ValueError.""" + with self.assertRaises(ValueError): + validate_shape(shape=(5, -3)) + + def test_invalid_type(self) -> None: + """Tests whether an invalid type raises a TypeError.""" + with self.assertRaises(TypeError): + validate_shape(shape=5.3) + + +if __name__ == '__main__': + unittest.main() diff --git a/tutorials/lava/lib/dnf/dnf_101/tutorial_dnf_101.ipynb b/tutorials/lava/lib/dnf/dnf_101/tutorial_dnf_101.ipynb new file mode 100644 index 0000000..5faf750 --- /dev/null +++ b/tutorials/lava/lib/dnf/dnf_101/tutorial_dnf_101.ipynb @@ -0,0 +1,510 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*Copyright (C) 2021 Intel Corporation*
\n", + "*SPDX-License-Identifier: BSD-3-Clause*
\n", + "*See: https://spdx.org/licenses/*\n", + "\n", + "---\n", + "\n", + "# lava-dnf 101: Overview of features\n", + "\n", + "## Populations and connections\n", + "\n", + "Create populations of leaky integrate-and-fire (LIF) neurons.\n", + "The `shape` argument determines the number of neurons (and their layout; see\n", + "further below)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.proc.lif.process import LIF\n", + "\n", + "\n", + "# a one-dimensional LIF population\n", + "population = LIF(shape=(20,))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create connections between populations using the `connect()` function.\n", + "The connectivity can be specified using a sequence of _Operations_. Here,\n", + "every neuron from `population1` is connected to the\n", + "corresponding neuron from `population2` with a synaptic weight of 20.\n", + "Operations are explained in more detail below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.lib.dnf.connect.connect import connect\n", + "from lava.lib.dnf.operations.operations import Weights\n", + "\n", + "\n", + "population1 = LIF(shape=(20,))\n", + "population2 = LIF(shape=(20,))\n", + "connect(population1.s_out, population2.a_in, ops=[Weights(20)])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dynamic neural fields (DNF)\n", + "\n", + "### Multi-peak DNF\n", + "\n", + "Create dynamic neural fields (DNFs) that support multiple peaks by using the\n", + "`MultiPeakKernel` with local excitation and mid-range inhibition. Use the\n", + "`Convolution` operation to apply the kernel." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.lib.dnf.kernels.kernels import MultiPeakKernel\n", + "from lava.lib.dnf.operations.operations import Convolution\n", + "\n", + "\n", + "dnf = LIF(shape=(20,))\n", + "\n", + "kernel = MultiPeakKernel(amp_exc=25,\n", + " width_exc=3,\n", + " amp_inh=-15,\n", + " width_inh=6)\n", + "connect(dnf.s_out, dnf.a_in, ops=[Convolution(kernel)])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Selective DNF\n", + "\n", + "Create DNFs that are selective and only create a single peak by using the\n", + "`SelectiveKernel` with local excitation and global inhibition." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.lib.dnf.kernels.kernels import SelectiveKernel\n", + "\n", + "\n", + "dnf = LIF(shape=(20,))\n", + "\n", + "kernel = SelectiveKernel(amp_exc=18,\n", + " width_exc=3,\n", + " global_inh=-15)\n", + "connect(dnf.s_out, dnf.a_in, ops=[Convolution(kernel)])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Input\n", + "\n", + "### Spike generators\n", + "\n", + "To simulate spike input to a DNF, use a `RateCodeSpikeGen` Process. It\n", + "generates spikes with a spike rate pattern that can be specified, for\n", + "instance by using the `GaussPattern` Process. Connect the `RateCodeSpikeGen` to\n", + " a DNF with the `connect()` function. You may change parameters of the\n", + " `GaussPattern` during runtime." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "from lava.magma.core.run_conditions import RunSteps\n", + "from lava.lib.dnf.inputs.gauss_pattern.process import GaussPattern\n", + "from lava.lib.dnf.inputs.rate_code_spike_gen.process import RateCodeSpikeGen\n", + "\n", + "\n", + "shape = (15,)\n", + "\n", + "# GaussPattern produces a pattern of spike rates\n", + "gauss_pattern = GaussPattern(shape=shape, amplitude=100, mean=5, stddev=5)\n", + "\n", + "# The spike generator produces spikes based on the spike rates given\n", + "# by the Gaussian pattern\n", + "spike_generator = RateCodeSpikeGen(shape=shape)\n", + "gauss_pattern.a_out.connect(spike_generator.a_in)\n", + "\n", + "# Connect the spike generator to a population\n", + "population = LIF(shape=shape)\n", + "connect(spike_generator.s_out, population.a_in, ops=[Weights(20)])\n", + "\n", + "# Start running the network (explained below)\n", + "population.run(condition=RunSteps(num_steps=10),\n", + " run_cfg=Loihi1SimCfg(select_tag='floating_pt'))\n", + "\n", + "# You may change parameters of the Gaussian pattern during runtime\n", + "gauss_pattern.amplitude = 50\n", + "\n", + "# Continue the run\n", + "# ...\n", + "\n", + "# Stop the run to free resources\n", + "population.stop()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Higher dimensions\n", + "\n", + "Define DNFs and inputs over higher dimensionalities by specifying a `shape` with\n", + "multiple entries." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "shape = (15, 15)\n", + "dnf = LIF(shape=shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Inputs and kernels must match the dimensionality of the DNF; specify\n", + "parameters that can be multi-dimensional, for example `mean` and `stddev` in\n", + "`GaussPattern`, as vectors rather than scalars." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "# Make sure to specify 'mean' and 'stddev' as 2D vectors\n", + "gauss_pattern = GaussPattern(shape=shape,\n", + " amplitude=100,\n", + " mean=[5, 5],\n", + " stddev=[4, 4])\n", + "spike_generator = RateCodeSpikeGen(shape=shape)\n", + "gauss_pattern.a_out.connect(spike_generator.a_in)\n", + "\n", + "# Make sure to specify 'width_exc' and 'width_inh'\n", + "# as 2D vectors\n", + "kernel = MultiPeakKernel(amp_exc=58,\n", + " width_exc=[3.8, 3.8],\n", + " amp_inh=-50,\n", + " width_inh=[7.5, 7.5])\n", + "connect(dnf.s_out, dnf.a_in, ops=[Convolution(kernel)])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Operations and larger architectures\n", + "\n", + "### One-to-one connections\n", + "When connecting two DNFs that have the same shape (in terms of neurons and\n", + "dimensions), you can connect them without specifying any operations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "dnf1 = LIF(shape=(10,))\n", + "dnf2 = LIF(shape=(10,))\n", + "\n", + "connect(dnf1.s_out, dnf2.a_in)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "In that case the synaptic weight defaults to 1. If you want to set a\n", + "homogeneous weight for all neurons, use the operation `Weights`.\n", + "It connects each neuron in the first DNF to its (single) respective neuron\n", + "in the second DNF with the specified weight value." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "dnf1 = LIF(shape=(10,))\n", + "dnf2 = LIF(shape=(10,))\n", + "\n", + "connect(dnf1.s_out, dnf2.a_in, ops=[Weights(40)])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reducing dimensions\n", + "When the dimensionality of the source DNF is larger than that of\n", + "the target DNF, use the `ReduceDims` operation, specifying the indices of the\n", + " dimensions that should be removed and how to remove them (here, by summing\n", + " over dimension 1)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.lib.dnf.operations.operations import ReduceDims\n", + "from lava.lib.dnf.operations.enums import ReduceMethod\n", + "\n", + "\n", + "dnf_2d = LIF(shape=(20, 10))\n", + "dnf_1d = LIF(shape=(20,))\n", + "\n", + "connect(dnf_2d.s_out,\n", + " dnf_1d.a_in,\n", + " ops=[ReduceDims(reduce_dims=1, reduce_method=ReduceMethod.SUM)])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Expanding dimensions\n", + "When the dimensionality of the source DNF is smaller than that of the target\n", + "DNF, use the `ExpandDims` operation, specifying the number of neurons of the\n", + "dimensions that will be added." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.lib.dnf.operations.operations import ExpandDims\n", + "\n", + "\n", + "dnf_1d = LIF(shape=(20,))\n", + "dnf_2d = LIF(shape=(20, 10))\n", + "\n", + "connect(dnf_1d.s_out, dnf_2d.a_in, ops=[ExpandDims(new_dims_shape=10)])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reordering dimensions\n", + "To reorder dimensions, use the `Reorder` operation, specifying the indices of the dimension in their new order." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.lib.dnf.operations.operations import Reorder\n", + "\n", + "\n", + "dnf_1 = LIF(shape=(10, 20))\n", + "dnf_2 = LIF(shape=(20, 10))\n", + "\n", + "# map dimensions (0, 1) of dnf_1 to dimensions (1, 0) of dnf_2\n", + "connect(dnf_1.s_out, dnf_2.a_in, ops=[Reorder(order=(1, 0))])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Combining operations\n", + "\n", + "All operations can be combined with each other to produce more complex\n", + "connectivity. For instance, reordering can be combined with the `ReduceDims` or\n", + "`ExpandDims` operation, which can again be combined with a `Weights` operation,\n", + "as shown below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "dnf_1d = LIF(shape=(10,))\n", + "dnf_2d = LIF(shape=(20, 10))\n", + "\n", + "connect(dnf_1d.s_out, dnf_2d.a_in, ops=[ExpandDims(new_dims_shape=20),\n", + " Reorder(order=(1, 0)),\n", + " Weights(20)])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## Running and plotting networks\n", + "\n", + "Call the `run()` method on any Process in the network.\n", + "To inspect Vars and Ports in Processes, create Monitors. Use them to probe Vars\n", + "and Ports before running. Create plots with the probed data after running." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.proc.monitor.process import Monitor\n", + "\n", + "\n", + "shape = (15,)\n", + "time_steps = 10\n", + "\n", + "# Set up a DNF\n", + "dnf = LIF(shape=shape)\n", + "kernel = MultiPeakKernel(amp_exc=17,\n", + " width_exc=3,\n", + " amp_inh=-15,\n", + " width_inh=6)\n", + "connect(dnf.s_out, dnf.a_in, ops=[Convolution(kernel)])\n", + "\n", + "# Set up a monitor and probe the spike output of the DNF\n", + "monitor = Monitor()\n", + "monitor.probe(dnf.s_out, time_steps)\n", + "\n", + "# Run the DNF\n", + "dnf.run(condition=RunSteps(num_steps=time_steps),\n", + " run_cfg=Loihi1SimCfg(select_tag='floating_pt'))\n", + "\n", + "# Get probed data from monitor\n", + "probed_data = monitor.get_data()\n", + "\n", + "# Stop the execution after getting the monitor's data\n", + "dnf.stop()\n", + "\n", + "# Now you can plot the data." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} \ No newline at end of file diff --git a/tutorials/lava/lib/dnf/dnf_regimes/tutorial_dnf_regimes.ipynb b/tutorials/lava/lib/dnf/dnf_regimes/tutorial_dnf_regimes.ipynb new file mode 100644 index 0000000..9aeb271 --- /dev/null +++ b/tutorials/lava/lib/dnf/dnf_regimes/tutorial_dnf_regimes.ipynb @@ -0,0 +1,266 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*Copyright (C) 2021 Intel Corporation*
\n", + "*SPDX-License-Identifier: BSD-3-Clause*
\n", + "*See: https://spdx.org/licenses/*\n", + "\n", + "---\n", + "\n", + "# DNF regimes\n", + "\n", + "The following examples demonstrate the fundamental regimes of\n", + "DNFs and their associated instabilities that are the basis for all\n", + "macroscopic behavior of architectures.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from lava.proc.lif.process import LIF\n", + "from lava.lib.dnf.connect.connect import connect\n", + "from lava.lib.dnf.operations.operations import Weights\n", + "from lava.lib.dnf.operations.operations import Convolution\n", + "from lava.lib.dnf.inputs.gauss_pattern.process import GaussPattern\n", + "from lava.lib.dnf.inputs.rate_code_spike_gen.process import RateCodeSpikeGen\n", + "\n", + "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "from lava.magma.core.run_conditions import RunSteps\n", + "from lava.proc.monitor.process import Monitor\n", + "from lava.proc.monitor.models import PyMonitorModel\n", + "\n", + "from utils import plot_1d, animated_1d_plot\n", + "\n", + "\n", + "class Architecture:\n", + " \"\"\"This class structure is not required and is only used here to reduce\n", + " code duplication for different examples.\"\"\"\n", + " def __init__(self, kernel):\n", + " shape = (15,)\n", + " time_steps = 700\n", + "\n", + " # Set up spike generator 1\n", + " self.gauss_pattern_1 = GaussPattern(shape=shape,\n", + " amplitude=0,\n", + " mean=11.25,\n", + " stddev=2.25)\n", + " self.spike_generator_1 = RateCodeSpikeGen(shape=shape)\n", + " self.gauss_pattern_1.a_out.connect(self.spike_generator_1.a_in)\n", + "\n", + " # Set up spike generator 2\n", + " self.gauss_pattern_2 = GaussPattern(shape=shape,\n", + " amplitude=0,\n", + " mean=3.75,\n", + " stddev=2.25)\n", + " self.spike_generator_2 = RateCodeSpikeGen(shape=shape)\n", + " self.gauss_pattern_2.a_out.connect(self.spike_generator_2.a_in)\n", + "\n", + " # DNF with specified kernel\n", + " self.dnf = LIF(shape=shape, du=409, dv=2047, vth=200)\n", + " connect(self.dnf.s_out, self.dnf.a_in, [Convolution(kernel)])\n", + "\n", + " # Connect spike input to DNF\n", + " connect(self.spike_generator_1.s_out, self.dnf.a_in, [Weights(25)])\n", + " connect(self.spike_generator_2.s_out, self.dnf.a_in, [Weights(25)])\n", + "\n", + " # Set up monitors\n", + " self.monitor_dnf = Monitor()\n", + " self.monitor_dnf.probe(target=self.dnf.s_out, num_steps=time_steps)\n", + " self.monitor_input_1 = Monitor()\n", + " self.monitor_input_1.probe(self.spike_generator_1.s_out, time_steps)\n", + " self.monitor_input_2 = Monitor()\n", + " self.monitor_input_2.probe(self.spike_generator_2.s_out, time_steps)\n", + "\n", + " # Set up a run configuration\n", + " self.run_cfg = Loihi1SimCfg(select_tag=\"bit_accurate_loihi\")\n", + "\n", + " def run(self):\n", + " # Run the network and make changes to spike inputs over time\n", + " condition = RunSteps(num_steps=100)\n", + " self.gauss_pattern_1.run(condition=condition, run_cfg=self.run_cfg)\n", + " self.gauss_pattern_1.amplitude = 2300\n", + " self.gauss_pattern_2.amplitude = 2300\n", + " self.gauss_pattern_1.run(condition=condition, run_cfg=self.run_cfg)\n", + " self.gauss_pattern_1.amplitude = 11200\n", + " self.gauss_pattern_2.amplitude = 11200\n", + " self.gauss_pattern_1.run(condition=condition, run_cfg=self.run_cfg)\n", + " self.gauss_pattern_1.amplitude = 2300\n", + " self.gauss_pattern_2.amplitude = 2300\n", + " self.gauss_pattern_1.run(condition=RunSteps(num_steps=200),\n", + " run_cfg=self.run_cfg)\n", + " self.gauss_pattern_1.amplitude = 0\n", + " self.gauss_pattern_2.amplitude = 0\n", + " self.gauss_pattern_1.run(condition=RunSteps(num_steps=200),\n", + " run_cfg=self.run_cfg)\n", + "\n", + " def plot(self):\n", + " # Get probed data from monitors\n", + " data_dnf = self.monitor_dnf.get_data()\\\n", + " [self.dnf.name][self.dnf.s_out.name]\n", + " data_input1 = self.monitor_input_1.get_data()\\\n", + " [self.spike_generator_1.name][self.spike_generator_1.s_out.name]\n", + " data_input2 = self.monitor_input_2.get_data()\\\n", + " [self.spike_generator_2.name][self.spike_generator_2.s_out.name]\n", + "\n", + " # Stop the execution of the network\n", + " self.spike_generator_1.stop()\n", + "\n", + " # Generate a raster plot from the probed data\n", + " plot_1d(data_dnf,\n", + " data_input1,\n", + " data_input2)\n", + " \n", + " # Generate an animated plot from the probed data\n", + " animated_1d_plot(data_dnf,\n", + " data_input1,\n", + " data_input2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Detection\n", + "\n", + "This example demonstrates the _detection instability_ of DNFs. The DNF receives spike input at two locations, whose spike rate changes over time. Between time step 100 and 200, the spike rate is not high enough for the DNF to form a peak and it does not spike. When the input spike rate is increased after time step 200, the DNF goes through the _detection instability_, forming a peak and producing spike output. When the input spike rate is lowered back to its previous value, the DNF keeps spiking and its peak remains stable. This demonstrates hysteresis of detection - the DNF stabilizes its decision about detecting an input. Only when the input is completely removed at time step 500 does the peak disappear in the _reverse detection instability_.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.lib.dnf.kernels.kernels import MultiPeakKernel\n", + "\n", + "detection_kernel = MultiPeakKernel(amp_exc=83, \n", + " width_exc=3.75, \n", + " amp_inh=-70,\n", + " width_inh=7.5)\n", + "\n", + "architecture = Architecture(detection_kernel)\n", + "architecture.run()\n", + "architecture.plot()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Memory\n", + "\n", + "This example demonstrates the memory property of DNFs. The input and connectivity is structured like in the example on 'detection' above but here the local excitation is stronger. This leads to the two peaks in the DNF to remain stable even after the input is deactivated in time step 400. The DNF has thus formed a memory of the inputs being present at those locations. Even though such self-sustained peaks are stable without input, they may track present input that changes its location in a graded manner." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "memory_kernel = MultiPeakKernel(amp_exc=32, \n", + " width_exc=2.5, \n", + " amp_inh=-18, \n", + " width_inh=4.5)\n", + "\n", + "architecture = Architecture(memory_kernel)\n", + "architecture.run()\n", + "architecture.plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## Selection\n", + "\n", + "This example demonstrates the _selection instability_ of a DNF. With spike input at two locations, the DNF makes a selection decision at time step 100, creating a peak at only one of the input locations. That decision remains stable even though two inputs are present simultaneously for the subsequent 100 time steps. The selection switches only when the first input is deactivates at time step 200. It then remains stable on the second input location even as the first input is activated again at time step 300. This demonstrates hysteresis of selection - the DNF stabilizes its selection decision. The same behavior is demonstrated again between time step 400 and 600 for the first input.\n", + "\n", + "Note: The selection is biased/forced here to always be on the input centered on neuron 11 to fit the structure of the input. To achieve this, this input is activated one time step before the other input. If both were activated at the same time step the selection decision would be random. The DNF may then select the \"wrong\" input and the example would not work." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.lib.dnf.kernels.kernels import SelectiveKernel\n", + "\n", + "class SelectiveArchitecture(Architecture):\n", + " \"\"\"Class that overrides the run function to specify a different input\n", + " structure.\"\"\"\n", + " def run(self):\n", + " # Run the network and make changes to spike inputs over time\n", + " self.dnf.run(condition=RunSteps(num_steps=99), run_cfg=self.run_cfg)\n", + " self.gauss_pattern_1.amplitude = 10000\n", + " self.dnf.run(condition=RunSteps(num_steps=1), run_cfg=self.run_cfg)\n", + " self.gauss_pattern_2.amplitude = 10000\n", + " self.dnf.run(condition=RunSteps(num_steps=100), run_cfg=self.run_cfg)\n", + " self.gauss_pattern_1.amplitude = 0\n", + " self.dnf.run(condition=RunSteps(num_steps=100), run_cfg=self.run_cfg)\n", + " self.gauss_pattern_1.amplitude = 10000\n", + " self.dnf.run(condition=RunSteps(num_steps=100), run_cfg=self.run_cfg)\n", + " self.gauss_pattern_2.amplitude = 0\n", + " self.dnf.run(condition=RunSteps(num_steps=100), run_cfg=self.run_cfg)\n", + " self.gauss_pattern_2.amplitude = 10000\n", + " self.dnf.run(condition=RunSteps(num_steps=100), run_cfg=self.run_cfg)\n", + " self.gauss_pattern_1.amplitude = 0\n", + " self.gauss_pattern_2.amplitude = 0\n", + " self.dnf.run(condition=RunSteps(num_steps=100), run_cfg=self.run_cfg)\n", + "\n", + "\n", + "selection_kernel = SelectiveKernel(amp_exc=20, \n", + " width_exc=2.25, \n", + " global_inh=-15)\n", + "\n", + "architecture = SelectiveArchitecture(selection_kernel)\n", + "architecture.run()\n", + "architecture.plot()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/tutorials/lava/lib/dnf/dnf_regimes/utils.py b/tutorials/lava/lib/dnf/dnf_regimes/utils.py new file mode 100644 index 0000000..7521ae4 --- /dev/null +++ b/tutorials/lava/lib/dnf/dnf_regimes/utils.py @@ -0,0 +1,135 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import matplotlib.pyplot as plt +from matplotlib import animation +from IPython import display +import typing as ty + +from lava.lib.dnf.utils.plotting import raster_plot, compute_spike_rates + + +def plot_1d(probe_data_dnf: np.ndarray, + probe_data_input1: np.ndarray, + probe_data_input2: np.ndarray) -> None: + """Generates an architecture raster plot for the examples in the DNF 101 + tutorial. + + Parameters + ---------- + probe_data_dnf : numpy.ndarray + probe data of the DNF + probe_data_input1 : numpy.ndarray + probe data of the first spiking input + probe_data_input2 : numpy.ndarray + probe data of the second spiking input + """ + probe_data_input = probe_data_input1 + probe_data_input2 + probe_data_input = probe_data_input.astype(np.float) + probe_data_input = np.transpose(probe_data_input) + probe_data_dnf = np.transpose(probe_data_dnf.astype(np.float)) + + num_neurons = np.size(probe_data_input, axis=1) + num_time_steps = np.size(probe_data_input, axis=0) + + plt.figure(figsize=(10, 5)) + ax0 = plt.subplot(2, 1, 1) + raster_plot(probe_data_input) + ax0.set_xlabel(None) + ax0.set_ylabel('Input\nNeuron idx') + ax0.set_xticklabels([]) + ax0.set_yticks([0, num_neurons - 1]) + ax0.set_xlim(0, num_time_steps) + ax0.set_ylim(-1, num_neurons) + + ax1 = plt.subplot(2, 1, 2) + raster_plot(probe_data_dnf) + ax1.set_xlabel('Time steps') + ax1.set_ylabel('DNF\nNeuron idx') + ax1.set_yticks([0, num_neurons - 1]) + ax1.set_xlim(0, num_time_steps) + ax1.set_ylim(-1, num_neurons) + + plt.subplots_adjust(bottom=0.1, right=0.8, top=0.9) + cax = plt.axes([0.85, 0.1, 0.035, 0.8]) + plt.colorbar(cax=cax, label="Spike rate") + + plt.show() + + +def animated_1d_plot(probe_data_dnf: np.ndarray, + probe_data_input1: np.ndarray, + probe_data_input2: np.ndarray, + interval: ty.Optional[int] = 30) -> None: + """Generates an animated plot for examples in the DNF regimes tutorial. + + Parameters + ---------- + probe_data_dnf : numpy.ndarray + probe data of the DNF + probe_data_input1 : numpy.ndarray + probe data of the first spiking input + probe_data_input2 : numpy.ndarray + probe data of the second spiking input + interval : int + interval to use in matplotlib.animation.FuncAnimation + """ + probe_data_input = probe_data_input1 + probe_data_input2 + probe_data_input = probe_data_input.astype(np.float) + probe_data_dnf = probe_data_dnf.astype(np.float) + probe_data_input = np.transpose(probe_data_input) + probe_data_dnf = np.transpose(probe_data_dnf) + + num_neurons = np.size(probe_data_input, axis=1) + num_time_steps = np.size(probe_data_dnf, axis=0) + + input_spike_rates = compute_spike_rates(probe_data_input) + dnf_spike_rates = compute_spike_rates(probe_data_dnf) + + fig, ax = plt.subplots(2, 1, figsize=(10, 5)) + line0, = ax[0].plot(np.zeros((num_neurons,)), 'bo-') + line1, = ax[1].plot(np.zeros((num_neurons,)), 'ro-') + + im = [line0, line1] + + ax[0].set_xlabel("") + ax[1].set_xlabel("Input neuron idx") + + ax[0].set_ylabel("Input spike rate") + ax[1].set_ylabel("DNF spike rate") + + ax[0].set_xticks([]) + ax[1].set_xticks([0, num_neurons - 1]) + + ax[0].set_yticks([0, 1]) + ax[1].set_yticks([0, 1]) + + ax[0].set_xlim(-1, num_neurons) + ax[1].set_xlim(-1, num_neurons) + + offset = 0.1 + ax[0].set_ylim(np.min(input_spike_rates) - offset, + np.max(input_spike_rates) + offset) + ax[1].set_ylim(np.min(dnf_spike_rates) - offset, + np.max(dnf_spike_rates) + offset) + + plt.tight_layout() + + def animate(i: int) -> ty.List: + x = range(num_neurons) + im[0].set_data(x, input_spike_rates[i, :]) + im[1].set_data(x, dnf_spike_rates[i, :]) + return im + + anim = animation.FuncAnimation(fig, + animate, + frames=num_time_steps, + interval=interval, + blit=True) + + html = display.HTML(anim.to_jshtml()) + + display.display(html) + plt.close()