diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 4dbe39eb2..d96b923fc 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -17,19 +17,19 @@ Objective of issue: **Lava version:** -- [ ] **0.6.0** (feature release) -- [ ] **0.5.1** (bug fixes) -- [x] **0.5.0** (current version) -- [ ] **0.4.1** -- [ ] **0.4.0** -- [ ] **0.3.0** -- [ ] **0.1.2** +- [ ] **0.6.0** (feature release) +- [ ] **0.5.1** (bug fixes) +- [x] **0.5.0** (current version) +- [ ] **0.4.1** +- [ ] **0.4.0** +- [ ] **0.3.0** +- [ ] **0.1.2** **I'm submitting a ...** -- [ ] bug report -- [ ] feature request -- [ ] documentation request +- [ ] bug report +- [ ] feature request +- [ ] documentation request diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index ff3259a2e..49f097a3a 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -22,12 +22,12 @@ A clear and concise description of what the bug is. **To reproduce current behavior** Steps to reproduce the behavior: -1. When I run this code (add code or minimum test case) ... +1. When I run this code (add code or minimum test case) ... ```python def my_code(): pass ``` -2. I get this error ... +2. I get this error ... ``` Error... ``` @@ -39,9 +39,9 @@ A clear and concise description of what you expected to happen. If applicable, add screenshots to help explain your problem. Remove section otherwise. **Environment (please complete the following information):** - - Device: [e.g. Laptop, Intel cloud] - - OS: [e.g. Linux] - - Lava version [e.g. 0.6.1] +- Device: [e.g. Laptop, Intel cloud] +- OS: [e.g. Linux] +- Lava version [e.g. 0.6.1] **Additional context** Add any other context about the problem here. Remove section otherwise. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 083147669..a0b34ba9d 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -11,8 +11,8 @@ assignees: '' As a user, I want to [capability] to [benefit]. ### Conditions of satisfaction -- An optional list of conditions that have to be fulfilled for this feature to be complete. -- For example: "Users can add both individual items and lists as parameters." +- An optional list of conditions that have to be fulfilled for this feature to be complete. +- For example: "Users can add both individual items and lists as parameters." ### Acceptance tests -- An optional list of tests that should be written to automatically test the new feature. +- An optional list of tests that should be written to automatically test the new feature. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index bc820529f..4eb543f72 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -12,13 +12,13 @@ Objective of pull request: ## Pull request checklist Your PR fulfills the following requirements: -- [ ] [Issue](https://github.com/lava-nc/lava/issues) created that explains the change and why it's needed -- [ ] Tests are part of the PR (for bug fixes / features) -- [ ] [Docs](https://github.com/lava-nc/docs) reviewed and added / updated if needed (for bug fixes / features) -- [ ] PR conforms to [Coding Conventions](https://lava-nc.org/developer_guide.html#coding-conventions) -- [ ] [PR applys BSD 3-clause or LGPL2.1+ Licenses](https://lava-nc.org/developer_guide.html#add-a-license) to all code files -- [ ] Lint (`flakeheaven lint src/lava tests/`) and (`bandit -r src/lava/.`) pass locally -- [ ] Build tests (`pytest`) passes locally +- [ ] [Issue](https://github.com/lava-nc/lava/issues) created that explains the change and why it's needed +- [ ] Tests are part of the PR (for bug fixes / features) +- [ ] [Docs](https://github.com/lava-nc/docs) reviewed and added / updated if needed (for bug fixes / features) +- [ ] PR conforms to [Coding Conventions](https://lava-nc.org/developer_guide.html#coding-conventions) +- [ ] [PR applys BSD 3-clause or LGPL2.1+ Licenses](https://lava-nc.org/developer_guide.html#add-a-license) to all code files +- [ ] Lint (`flakeheaven lint src/lava tests/`) and (`bandit -r src/lava/.`) pass locally +- [ ] Build tests (`pytest`) passes locally ## Pull request type @@ -30,13 +30,13 @@ Your PR fulfills the following requirements: Please check your PR type: -- [ ] Bugfix -- [ ] Feature -- [ ] Code style update (formatting, renaming) -- [ ] Refactoring (no functional changes, no api changes) -- [ ] Build related changes -- [ ] Documentation changes -- [ ] Other (please describe): +- [ ] Bugfix +- [ ] Feature +- [ ] Code style update (formatting, renaming) +- [ ] Refactoring (no functional changes, no api changes) +- [ ] Build related changes +- [ ] Documentation changes +- [ ] Other (please describe): ## What is the current behavior? @@ -50,8 +50,8 @@ Please check your PR type: ## Does this introduce a breaking change? -- [ ] Yes -- [ ] No +- [ ] Yes +- [ ] No diff --git a/README.md b/README.md index b1b1ef9c5..d3df5e71c 100644 --- a/README.md +++ b/README.md @@ -35,22 +35,13 @@ supporting architectures like Intel Loihi may remain proprietary to Intel and will be shared as extensions to eligible users. >### Lava extension for Intel's Loihi ->The Lava extension for Loihi is available for members of the Intel -Neuromorphic Research Community (INRC). The extension enables execution of -Lava on Intel's Loihi hardware platform. +>The Lava extension for Loihi is available for members of the Intel Neuromorphic Research Community (INRC). The extension enables execution of Lava on Intel's Loihi hardware platform. > ->Developers interested in using Lava with Loihi systems need to join the -INRC. Loihi 1 and 2 research systems are currently not available -commercially. Once a member of the INRC, developers will gain access to -cloud-hosted Loihi systems or may be able to obtain physical Loihi systems on a -loan basis. +>Developers interested in using Lava with Loihi systems need to join the INRC. Loihi 1 and 2 research systems are currently not available commercially. Once a member of the INRC, developers will gain access to cloud-hosted Loihi systems or may be able to obtain physical Loihi systems on a loan basis. > ->To join the INRC, visit [http://neuromorphic.intel.com](http://neuromorphic.intel.com) -or email at [inrc_interest@intel.com](mailto:inrc_interest@intel.com). +>To join the INRC, visit [http://neuromorphic.intel.com](http://neuromorphic.intel.com) or email at [inrc_interest@intel.com](mailto:inrc_interest@intel.com). > -> If you are already a member of the INRC, please read how to [get - started with the Lava extension for Loihi.](https://intel-ncl.atlassian.net/wiki/spaces/NAP/pages/1785856001/Get+started+with+the+Lava+extension+for+Loihi) -> This page is **only** accessible to members of the INRC. +> If you are already a member of the INRC, please read how to [get started with the Lava extension for Loihi](https://intel-ncl.atlassian.net/wiki/spaces/NAP/pages/1785856001/Get+started+with+the+Lava+extension+for+Loihi). This page is **only** accessible to members of the INRC. # Getting started @@ -70,7 +61,7 @@ will need to install the `poetry` Python package. Open a **python 3** terminal and run based on the OS you are on: -### [Linux/MacOS] +### Linux/MacOS ```bash cd $HOME @@ -87,7 +78,7 @@ pytest ## See FAQ for more info: https://github.com/lava-nc/lava/wiki/Frequently-Asked-Questions-(FAQ)#install ``` -### [Windows] +### Windows ```powershell # Commands using PowerShell @@ -140,7 +131,7 @@ Required test coverage of 85.0% reached. Total coverage: 88.81% ``` -## [Alternative] Installing Lava via Conda +## Alternative: Installing Lava via Conda If you use the Conda package manager, you can simply install the Lava package via: @@ -158,7 +149,7 @@ conda install -n lava -c intel numpy scipy conda install -n lava -c conda-forge lava --freeze-installed ``` -## [Alternative] Installing Lava from binaries +## Alternative: Installing Lava from binaries If you only need to install Lava as a user in your python environment, we will publish Lava releases via @@ -167,7 +158,7 @@ the package and install it. Open a Python terminal and run: -### [Windows/MacOS/Linux] +### Windows/MacOS/Linux ```bash python -m venv .venv @@ -203,7 +194,7 @@ bandit -r src/lava/. bandit -r src/lava/. --format custom --msg-template '{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}' ``` ## -> Refer to the tutorials directory for in-depth as well as end-to-end tutorials on how to write Lava Processes, connect them, and execute the code. +>Refer to the tutorials directory for in-depth as well as end-to-end tutorials on how to write Lava Processes, connect them, and execute the code. # Stay in touch diff --git a/RELEASE.md b/RELEASE.md index 0493bdc4e..1852313be 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -4,79 +4,79 @@ While this release offers few high-level application examples, Lava v0.4.0 provi ## New Features and Improvements Features marked with * are available as part of the Loihi 2 extension. -- *Extended Process library including new ProcessModels and additional improvements: - - LIF, Sigma-Delta, and Dense Processes execute on Loihi NeuroCores. - - Prototype Convolutional Process added. - - Sending and receiving spikes to NeuroCores via embedded processes that can be programmed in C with examples included. - - All Lava Processes now list all constructor arguments explicitly with type annotations. -- *Added high-level API to develop custom ProcessModels that use Loihi 2 features: - - Loihi NeuroCores can be programmed in Python by allocating neural network resources like Axons, Synapses or Neurons. In particular, Loihi 2 NeuroCore Neurons can be configured by writing highly flexible assembly programs. - - Loihi embedded processors can be programmed in C. But unlike the prior NxSDK, no knowledge of low-level registers details is required anymore. Instead, the C API mirrors the high-level Python API to interact with other processes via channels. -- Compiler and Runtime support for Loihi 2: - - General redesign of Compiler and Runtime architecture to support compilation of Processes that execute across a heterogenous backend of different compute resources. CPU and Loihi are supported via separate sub compilers. - - *The Loihi NeuroCore sub compiler automatically distributes neural network resources across multiple cores. - - *The Runtime supports direct channel-based communication between Processes running on Loihi NeuroCores, embedded CPUs or host CPUs written in Python or C. Of all combinations, only Python<->C and C<->NeuroCore are currently supported. - - *Added support to access Process Variables on Loihi NeuroCores at runtime via Var.set and Var.get(). -- New tutorials and improved class and method docstrings explain how new Lava features can be used such as *NeuroCore and *embedded processor programming. -- An extended suite of unit tests and new *integration tests validate the correctness of the Lava framework. +- *Extended Process library including new ProcessModels and additional improvements: + - LIF, Sigma-Delta, and Dense Processes execute on Loihi NeuroCores. + - Prototype Convolutional Process added. + - Sending and receiving spikes to NeuroCores via embedded processes that can be programmed in C with examples included. + - All Lava Processes now list all constructor arguments explicitly with type annotations. +- *Added high-level API to develop custom ProcessModels that use Loihi 2 features: + - Loihi NeuroCores can be programmed in Python by allocating neural network resources like Axons, Synapses or Neurons. In particular, Loihi 2 NeuroCore Neurons can be configured by writing highly flexible assembly programs. + - Loihi embedded processors can be programmed in C. But unlike the prior NxSDK, no knowledge of low-level registers details is required anymore. Instead, the C API mirrors the high-level Python API to interact with other processes via channels. +- Compiler and Runtime support for Loihi 2: + - General redesign of Compiler and Runtime architecture to support compilation of Processes that execute across a heterogenous backend of different compute resources. CPU and Loihi are supported via separate sub compilers. + - *The Loihi NeuroCore sub compiler automatically distributes neural network resources across multiple cores. + - *The Runtime supports direct channel-based communication between Processes running on Loihi NeuroCores, embedded CPUs or host CPUs written in Python or C. Of all combinations, only Python<->C and C<->NeuroCore are currently supported. + - *Added support to access Process Variables on Loihi NeuroCores at runtime via Var.set and Var.get(). +- New tutorials and improved class and method docstrings explain how new Lava features can be used such as *NeuroCore and *embedded processor programming. +- An extended suite of unit tests and new *integration tests validate the correctness of the Lava framework. ## Bug Fixes and Other Changes -- Support for virtual ports on multiple incoming connections (Python Processes only) (Issue [#223](https://github.com/lava-nc/lava/issues/223), PR [#224](https://github.com/lava-nc/lava/pull/224)) -- Added conda install instructions (PR [#225](https://github.com/lava-nc/lava/pull/225)) -- Var.set/get() works when RunContinuous RunMode is used (Issue [#255](https://github.com/lava-nc/lava/issues/255), PR [#256](https://github.com/lava-nc/lava/pull/256)) -- Successful execution of tutorials now covered by unit tests (Issue [#243](https://github.com/lava-nc/lava/issues/243), PR [#244](https://github.com/lava-nc/lava/pull/244)) -- Fixed PYTHONPATH in tutorial_01 (Issue [#45](https://github.com/lava-nc/lava/issues/45), PR [#239](https://github.com/lava-nc/lava/pull/239)) -- Fixed output of tutorial_07 (Issue [#249](https://github.com/lava-nc/lava/issues/249), PR [#253](https://github.com/lava-nc/lava/pull/253)) +- Support for virtual ports on multiple incoming connections (Python Processes only) (Issue [#223](https://github.com/lava-nc/lava/issues/223), PR [#224](https://github.com/lava-nc/lava/pull/224)) +- Added conda install instructions (PR [#225](https://github.com/lava-nc/lava/pull/225)) +- Var.set/get() works when RunContinuous RunMode is used (Issue [#255](https://github.com/lava-nc/lava/issues/255), PR [#256](https://github.com/lava-nc/lava/pull/256)) +- Successful execution of tutorials now covered by unit tests (Issue [#243](https://github.com/lava-nc/lava/issues/243), PR [#244](https://github.com/lava-nc/lava/pull/244)) +- Fixed PYTHONPATH in tutorial_01 (Issue [#45](https://github.com/lava-nc/lava/issues/45), PR [#239](https://github.com/lava-nc/lava/pull/239)) +- Fixed output of tutorial_07 (Issue [#249](https://github.com/lava-nc/lava/issues/249), PR [#253](https://github.com/lava-nc/lava/pull/253)) ## Breaking Changes -- Process constructors for standard library processes now require explicit keyword/value pairs and do not accept arbitrary input arguments via **kwargs anymore. This might break some workloads. -- use_graded_spike kwarg has been changed to num_message_bits for all the built-in processes. -- shape kwarg has been removed from Dense process. It is automatically inferred from the weight parameter’s shape. -- Conv Process has additional arguments weight_exp and num_weight_bits that are relevant for fixed-point implementations. -- The sign_mode argument in the Dense Process is now an enum rather than an integer. -- New parameters u and v in the LIF Process enable setting initial values for current and voltage. -- The bias parameter in the LIF Process has been renamed to bias_mant. +- Process constructors for standard library processes now require explicit keyword/value pairs and do not accept arbitrary input arguments via **kwargs anymore. This might break some workloads. +- use_graded_spike kwarg has been changed to num_message_bits for all the built-in processes. +- shape kwarg has been removed from Dense process. It is automatically inferred from the weight parameter’s shape. +- Conv Process has additional arguments weight_exp and num_weight_bits that are relevant for fixed-point implementations. +- The sign_mode argument in the Dense Process is now an enum rather than an integer. +- New parameters u and v in the LIF Process enable setting initial values for current and voltage. +- The bias parameter in the LIF Process has been renamed to bias_mant. ## Known Issues -- Lava does currently not support on-chip learning, Loihi 1 and a variety of connectivity compression features such as convolutional encoding. -- All Processes in a network must currently be connected via channels. Running unconnected Processes using NcProcessModels in parallel currently gives incorrect results. -- Only one instance of a Process targeting an embedded processor (using CProcessModel) can currently be created. Creating multiple instances in a network, results in an error. As a workaround, the behavior of multiple Processes can be fused into a single CProcessModel. -- Direct channel connections between Processes using a PyProcessModel and NcProcessModel are not supported. -- In the scenario that InputAxons are duplicated across multiple cores and users expect to inject spikes based on the declared port size, then the current implementation leads to buffer overflows and memory corruption. -- Channel communication between PyProcessModels is slow. -- The Lava Compiler is still inefficient and in need of improvement to performance and memory utilization. -- Virtual ports are only supported between Processes using PyProcModels, but not between Processes when CProcModels or NcProcModels are involved. In addition, VirtualPorts do not support concatenation yet. -- Joining and forking of virtual ports is not supported. -- The Monitor Process does currently only support probing of a single Var per Process implemented via a PyProcessModel. The Monitor Process does currently not support probing of Vars mapped to NeuroCores. -- Despite new docstrings, type annotations, and parameter descriptions to most of the public user-facing API, some parts of the code still have limited documentation and are missing type annotations. +- Lava does currently not support on-chip learning, Loihi 1 and a variety of connectivity compression features such as convolutional encoding. +- All Processes in a network must currently be connected via channels. Running unconnected Processes using NcProcessModels in parallel currently gives incorrect results. +- Only one instance of a Process targeting an embedded processor (using CProcessModel) can currently be created. Creating multiple instances in a network, results in an error. As a workaround, the behavior of multiple Processes can be fused into a single CProcessModel. +- Direct channel connections between Processes using a PyProcessModel and NcProcessModel are not supported. +- In the scenario that InputAxons are duplicated across multiple cores and users expect to inject spikes based on the declared port size, then the current implementation leads to buffer overflows and memory corruption. +- Channel communication between PyProcessModels is slow. +- The Lava Compiler is still inefficient and in need of improvement to performance and memory utilization. +- Virtual ports are only supported between Processes using PyProcModels, but not between Processes when CProcModels or NcProcModels are involved. In addition, VirtualPorts do not support concatenation yet. +- Joining and forking of virtual ports is not supported. +- The Monitor Process does currently only support probing of a single Var per Process implemented via a PyProcessModel. The Monitor Process does currently not support probing of Vars mapped to NeuroCores. +- Despite new docstrings, type annotations, and parameter descriptions to most of the public user-facing API, some parts of the code still have limited documentation and are missing type annotations. ## What's Changed -* Virtual ports on multiple incoming connections by @mathisrichter in https://github.com/lava-nc/lava/pull/224 -* Add conda install to README by @Tobias-Fischer in https://github.com/lava-nc/lava/pull/225 -* PYTHONPATH fix in tutorial by @jlubo in https://github.com/lava-nc/lava/pull/239 -* Fix tutorial04_execution.ipynb by @mgkwill in https://github.com/lava-nc/lava/pull/241 -* Tutorial tests by @mgkwill in https://github.com/lava-nc/lava/pull/244 -* Update README.md remove vlab instructions by @mgkwill in https://github.com/lava-nc/lava/pull/248 -* Tutorial bug fix by @PhilippPlank in https://github.com/lava-nc/lava/pull/253 -* Fix get set var by @PhilippPlank in https://github.com/lava-nc/lava/pull/256 -* Update runtime_service.py by @PhilippPlank in https://github.com/lava-nc/lava/pull/258 -* Release/v0.4.0 by @mgkwill in https://github.com/lava-nc/lava/pull/265 +* Virtual ports on multiple incoming connections by @mathisrichter in https://github.com/lava-nc/lava/pull/224 +* Add conda install to README by @Tobias-Fischer in https://github.com/lava-nc/lava/pull/225 +* PYTHONPATH fix in tutorial by @jlubo in https://github.com/lava-nc/lava/pull/239 +* Fix tutorial04_execution.ipynb by @mgkwill in https://github.com/lava-nc/lava/pull/241 +* Tutorial tests by @mgkwill in https://github.com/lava-nc/lava/pull/244 +* Update README.md remove vlab instructions by @mgkwill in https://github.com/lava-nc/lava/pull/248 +* Tutorial bug fix by @PhilippPlank in https://github.com/lava-nc/lava/pull/253 +* Fix get set var by @PhilippPlank in https://github.com/lava-nc/lava/pull/256 +* Update runtime_service.py by @PhilippPlank in https://github.com/lava-nc/lava/pull/258 +* Release/v0.4.0 by @mgkwill in https://github.com/lava-nc/lava/pull/265 ## Thanks to our Contributors -- Intel Corporation: All contributing members of the Intel Neuromorphic Computing Lab +- Intel Corporation: All contributing members of the Intel Neuromorphic Computing Lab ### Open-source community: -- [Tobias-Fischer](https://github.com/Tobias-Fischer), Tobias Fischer -- [jlubo](https://github.com/jlubo), Jannik Luboeinski +- [Tobias-Fischer](https://github.com/Tobias-Fischer), Tobias Fischer +- [jlubo](https://github.com/jlubo), Jannik Luboeinski ## New Contributors -* @jlubo made their first contribution in https://github.com/lava-nc/lava/pull/239 +* @jlubo made their first contribution in https://github.com/lava-nc/lava/pull/239 **Full Changelog**: https://github.com/lava-nc/lava/compare/v0.3.0...v0.4.0 diff --git a/pyproject.toml b/pyproject.toml index de74583ad..80e0f7c7f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -170,11 +170,11 @@ pep8-naming = ["-*"] # mccabe = ["+*"] # pep8-naming = ["+*"] # pyflakes = ["+*"] -# pylint = ["+*"] +pylint = ["+*"] pycodestyle = ["+*", "-W503", "-E203"] -pyflakes = ["-*"] # Disable temporarily until lint fix is pushed -pylint = ["-*"] # Disable temporarily until lint fix is pushed +# pyflakes = ["-*"] # Disable temporarily until lint fix is pushed +# pylint = ["-*"] # Disable temporarily until lint fix is pushed [tool.flakeheaven.exceptions."tests/"] pycodestyle = ["-F401"] # Disable a check diff --git a/src/lava/magma/compiler/builders/channel_builder.py b/src/lava/magma/compiler/builders/channel_builder.py index 5ffdc76ce..9f470535b 100644 --- a/src/lava/magma/compiler/builders/channel_builder.py +++ b/src/lava/magma/compiler/builders/channel_builder.py @@ -190,4 +190,3 @@ def build( Exception Can't build channel of type specified """ - pass diff --git a/src/lava/magma/compiler/builders/interfaces.py b/src/lava/magma/compiler/builders/interfaces.py index eed2c90fc..5c78fbf01 100644 --- a/src/lava/magma/compiler/builders/interfaces.py +++ b/src/lava/magma/compiler/builders/interfaces.py @@ -14,7 +14,6 @@ class AbstractBuilder(ABC): @abstractmethod def build(self): """Build the actual process.""" - pass class ResourceAddress(ABC): @@ -24,7 +23,6 @@ class ResourceAddress(ABC): class Resource(ABC): def write(self, hw: ty.Any): """Given hw, write this compiled resource""" - pass class CompiledResource(Resource): diff --git a/src/lava/magma/compiler/channels/pypychannel.py b/src/lava/magma/compiler/channels/pypychannel.py index afcef996b..94489de1f 100644 --- a/src/lava/magma/compiler/channels/pypychannel.py +++ b/src/lava/magma/compiler/channels/pypychannel.py @@ -62,7 +62,7 @@ def __init__(self, name, shm, proto, size, req, ack): self._done = False self._array = [] self._semaphore = None - self.observer = None + self.observer: ty.Optional[ty.Callable[[], ty.Any]] = None self.thread = None @property @@ -210,7 +210,7 @@ def __init__(self, name, shm, proto, size, req, ack): self._done = False self._array = [] self._queue = None - self.observer = None + self.observer: ty.Optional[ty.Callable[[], ty.Any]] = None self.thread = None @property @@ -311,26 +311,30 @@ def _changed(self): with self._cv: self._cv.notify_all() - def _set_observer(self, channel_actions, observer): + @staticmethod + def _set_observer( + channel_actions: ty.Tuple, + observer: ty.Union[ty.Callable[[], ty.Any], None]) -> None: for channel, _ in channel_actions: channel.observer = observer def select( self, - *args: ty.Tuple[ - ty.Union[CspSendPort, CspRecvPort], ty.Callable[[], ty.Any] + *channel_actions: ty.Tuple[ + ty.Union[CspSendPort, CspRecvPort], + ty.Callable[[], ty.Any] ], - ): + ) -> None: """ Wait for any channel to become ready, then execute the corresponding callable and return the result. """ with self._cv: - self._set_observer(args, self._changed) + self._set_observer(channel_actions, self._changed) while True: - for channel, action in args: + for channel, action in channel_actions: if channel.probe(): - self._set_observer(args, None) + self._set_observer(channel_actions, None) return action() self._cv.wait() diff --git a/src/lava/magma/compiler/compiler.py b/src/lava/magma/compiler/compiler.py index da050bb29..1a3beec26 100644 --- a/src/lava/magma/compiler/compiler.py +++ b/src/lava/magma/compiler/compiler.py @@ -132,7 +132,7 @@ def compile( proc_builders, channel_map = self._compile_proc_groups( proc_groups, channel_map ) - py_builders, c_builders, nc_builders = split_proc_builders_by_type( + _, c_builders, nc_builders = split_proc_builders_by_type( proc_builders ) diff --git a/src/lava/magma/compiler/compiler_graphs.py b/src/lava/magma/compiler/compiler_graphs.py index 7d36fc140..75a112e62 100644 --- a/src/lava/magma/compiler/compiler_graphs.py +++ b/src/lava/magma/compiler/compiler_graphs.py @@ -8,7 +8,6 @@ import itertools import os import pkgutil -import re import sys import types import typing as ty diff --git a/src/lava/magma/compiler/mapper.py b/src/lava/magma/compiler/mapper.py index 2f570877f..d0d957faf 100644 --- a/src/lava/magma/compiler/mapper.py +++ b/src/lava/magma/compiler/mapper.py @@ -76,7 +76,7 @@ def map_cores(self, executable: Executable, executable: Compiled Executable """ - py_builders, c_builders, nc_builders = split_proc_builders_by_type( + _, c_builders, nc_builders = split_proc_builders_by_type( executable.proc_builders) # Iterate over all the ncbuilder and map them for ncb in nc_builders.values(): @@ -124,7 +124,6 @@ def map_cores(self, executable: Executable, # src or dst and its initializers for port_pair in channel_map: src = port_pair.src - dst = port_pair.dst # Checking if the initializers are same if channel_map[port_pair].src_port_initializer == ports[ port]: diff --git a/src/lava/magma/compiler/node.py b/src/lava/magma/compiler/node.py index 20e0c7261..88a9d5647 100644 --- a/src/lava/magma/compiler/node.py +++ b/src/lava/magma/compiler/node.py @@ -4,7 +4,6 @@ from __future__ import annotations -import typing import typing as ty from collections import UserList, OrderedDict @@ -27,7 +26,7 @@ def __init__( processes: ty.List[AbstractProcess], ): self.id: int = -1 - self.node_type: typing.Type[AbstractNode] = node_type + self.node_type: ty.Type[AbstractNode] = node_type self.processes = processes def add_process(self, process: AbstractProcess): @@ -57,7 +56,7 @@ def __str__(self): result.append(str(self.node_map)) return "\n".join(result) - def append(self, node: Node): + def append(self, node: Node): # pylint: disable=arguments-renamed """Appends a new node to the NodeConfig.""" node.id = self._node_ctr self._node_ctr += 1 diff --git a/src/lava/magma/compiler/subcompilers/channel_builders_factory.py b/src/lava/magma/compiler/subcompilers/channel_builders_factory.py index 6c18eaa5d..f4d1158f7 100644 --- a/src/lava/magma/compiler/subcompilers/channel_builders_factory.py +++ b/src/lava/magma/compiler/subcompilers/channel_builders_factory.py @@ -106,10 +106,10 @@ def from_channel_map( src_pt_init.connected_port_type = LoihiConnectedPortType.C_PY dst_pt_init.connected_port_type = LoihiConnectedPortType.C_PY if ch_type is ChannelType.PyC: - p_port, c_port = src_port, dst_port + p_port = src_port pi = dst_pt_init else: - c_port, p_port = src_port, dst_port + p_port = dst_port pi = src_pt_init lt = getattr(p_port.process.model_class, p_port.name).cls if lt in [PyInPort.VEC_DENSE, PyOutPort.VEC_DENSE]: diff --git a/src/lava/magma/compiler/subcompilers/interfaces.py b/src/lava/magma/compiler/subcompilers/interfaces.py index 067c6da02..63147ca30 100644 --- a/src/lava/magma/compiler/subcompilers/interfaces.py +++ b/src/lava/magma/compiler/subcompilers/interfaces.py @@ -20,14 +20,12 @@ class AbstractSubCompiler(ABC): def compile(self, channel_map: ChannelMap) -> ChannelMap: """Partitions all Processes in the SubCompiler's ProcGroup onto the available resources.""" - pass @abstractmethod def get_builders( self, channel_map: ChannelMap ) -> ty.Tuple[ty.Dict[AbstractProcess, AbstractProcessBuilder], ChannelMap]: """After compilation, creates and returns builders for all Processes.""" - pass class SubCompiler(AbstractSubCompiler): diff --git a/src/lava/magma/compiler/utils.py b/src/lava/magma/compiler/utils.py index 73c95a8de..ce9aa7838 100644 --- a/src/lava/magma/compiler/utils.py +++ b/src/lava/magma/compiler/utils.py @@ -129,7 +129,6 @@ class LoihiIOPortInitializer(LoihiPortInitializer): @dataclass class LoihiInPortInitializer(LoihiIOPortInitializer): """Port Initializer for a InPort for C/NC Models""" - pass @dataclass @@ -141,7 +140,6 @@ class LoihiCInPortInitializer(LoihiIOPortInitializer): @dataclass class LoihiOutPortInitializer(LoihiIOPortInitializer): """Port Initializer for a OutPort for C/NC Models""" - pass @dataclass diff --git a/src/lava/magma/core/learning/constants.py b/src/lava/magma/core/learning/constants.py index c4b1cbf86..ab34c9cb9 100644 --- a/src/lava/magma/core/learning/constants.py +++ b/src/lava/magma/core/learning/constants.py @@ -2,7 +2,7 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -from enum import IntEnum, auto +from enum import IntEnum import lava.magma.core.learning.string_symbols as str_symbols # --------------------------------------------------------------------------- diff --git a/src/lava/magma/core/learning/symbolic_equation.py b/src/lava/magma/core/learning/symbolic_equation.py index d3d72a78c..1ccbc9fe0 100644 --- a/src/lava/magma/core/learning/symbolic_equation.py +++ b/src/lava/magma/core/learning/symbolic_equation.py @@ -3,14 +3,14 @@ # See: https://spdx.org/licenses/ import re -from abc import abstractmethod +from abc import ABC, abstractmethod import typing as ty import ast import lava.magma.core.learning.string_symbols as str_symbols -class Symbol(object): +class Symbol(ABC): """Super class for all possible symbols.""" def __init__(self, expr: ty.Optional[str] = "") -> None: @@ -50,8 +50,9 @@ def __str__(self): pass @staticmethod - def find_expr(expr: str, reg_expr: str, symbol: "Symbol") \ - -> ty.Tuple[ty.Optional["Symbol"], str]: + def find_expr(expr: str, + reg_expr: str, + symbol: "Symbol") -> ty.Tuple[ty.Optional["Symbol"], str]: """Factory method for creating symbols. Matches an expression to a regular expression and if there is a match, @@ -132,7 +133,6 @@ def __str__(self) -> str: class Operator(Symbol): """Abstract super class for operator Symbols.""" - pass class Addition(Operator): @@ -231,8 +231,6 @@ def __str__(self): class FactorSym(Symbol): """Abstract super class for factor Symbols.""" - pass - class Dependency(FactorSym): """Abstract super class for dependency Symbols.""" diff --git a/src/lava/magma/core/learning/utils.py b/src/lava/magma/core/learning/utils.py index 5b7d6998c..480860397 100644 --- a/src/lava/magma/core/learning/utils.py +++ b/src/lava/magma/core/learning/utils.py @@ -4,7 +4,6 @@ import numpy as np import typing as ty -import struct def stochastic_round(values: np.ndarray, diff --git a/src/lava/magma/core/model/interfaces.py b/src/lava/magma/core/model/interfaces.py index a5aa1a064..bae0367be 100644 --- a/src/lava/magma/core/model/interfaces.py +++ b/src/lava/magma/core/model/interfaces.py @@ -27,7 +27,6 @@ def shape(self) -> ty.Tuple[int, ...]: @abstractmethod def csp_ports(self) -> ty.List[AbstractCspPort]: """Returns all csp ports of the port.""" - pass def start(self): """Start all csp ports.""" diff --git a/src/lava/magma/core/model/py/connection.py b/src/lava/magma/core/model/py/connection.py index 4a436efeb..dfa33c440 100644 --- a/src/lava/magma/core/model/py/connection.py +++ b/src/lava/magma/core/model/py/connection.py @@ -307,7 +307,7 @@ def _create_learning_rule_applier( pass @abstractmethod - def _init_randoms(self): + def _init_randoms(self) -> None: pass @property @@ -1561,3 +1561,6 @@ def _saturate_synaptic_variable( f"'tag_1', or 'tag_2'." f"Got {synaptic_variable_name=}." ) + + def _init_randoms(self) -> None: + pass diff --git a/src/lava/magma/core/model/py/model.py b/src/lava/magma/core/model/py/model.py index 0e9433799..11189dc2e 100644 --- a/src/lava/magma/core/model/py/model.py +++ b/src/lava/magma/core/model/py/model.py @@ -128,7 +128,7 @@ def _get_var(self): for value in var_iter: data_port.send(enum_to_np(value, np.float64)) elif isinstance(var, csr_matrix): - dst, src, values = find(var, explicit_zeros=True) + _, _, values = find(var, explicit_zeros=True) num_items = var.data.size data_port.send(enum_to_np(num_items)) for value in values: @@ -242,7 +242,6 @@ def add_ports_for_polling(self): """ Add various ports to poll for communication on ports """ - pass def join(self): """ @@ -257,7 +256,6 @@ def on_var_update(self): """This method is called if a Var is updated. It can be used as callback function to calculate dependent changes.""" - pass class PyLoihiProcessModel(AbstractPyProcessModel): diff --git a/src/lava/magma/core/model/py/ports.py b/src/lava/magma/core/model/py/ports.py index b3b1d31b9..49de9f5af 100644 --- a/src/lava/magma/core/model/py/ports.py +++ b/src/lava/magma/core/model/py/ports.py @@ -51,7 +51,6 @@ def csp_ports(self) -> ty.List[AbstractCspPort]: ------- A list of all CSP Ports connected to the PyPort. """ - pass class AbstractPyIOPort(AbstractPyPort): diff --git a/src/lava/magma/core/process/ports/ports.py b/src/lava/magma/core/process/ports/ports.py index ad4eb66d8..e221bf839 100644 --- a/src/lava/magma/core/process/ports/ports.py +++ b/src/lava/magma/core/process/ports/ports.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ +from __future__ import annotations import typing as ty from abc import ABC, abstractmethod import math @@ -383,8 +384,6 @@ class AbstractIOPort(AbstractPort): type hierarchy needed for validating connections. """ - pass - class AbstractRVPort(AbstractPort): """Abstract base class for RefPorts and VarPorts. @@ -392,8 +391,6 @@ class AbstractRVPort(AbstractPort): type hierarchy needed for validating connections. """ - pass - class AbstractSrcPort(ABC): """Interface for source ports such as OutPorts and RefPorts from which @@ -402,8 +399,6 @@ class AbstractSrcPort(ABC): type hierarchy needed for validating connections. """ - pass - class AbstractDstPort(ABC): """Interface for destination ports such as InPorts and VarPorts in which @@ -412,8 +407,6 @@ class AbstractDstPort(ABC): type hierarchy needed for validating connections. """ - pass - class OutPort(AbstractIOPort, AbstractSrcPort): """Output ports are members of a Lava Process and can be connected to @@ -635,7 +628,7 @@ def get_dst_vars(self) -> ty.List[Var]: return [ty.cast(VarPort, p).var for p in self.get_dst_ports()] @staticmethod - def create_implicit_var_port(var: Var) -> "ImplicitVarPort": + def create_implicit_var_port(var: Var) -> ImplicitVarPort: """Creates and returns an ImplicitVarPort for the given Var.""" # Create a VarPort to wrap Var vp = ImplicitVarPort(var) @@ -751,7 +744,9 @@ def connect_from( class ImplicitVarPort(VarPort): """Sub class for VarPort to identify implicitly created VarPorts when a RefPort connects directly to a Var.""" - pass + + def __init__(self, var: Var) -> None: + super().__init__(var) class AbstractVirtualPort(AbstractPort): @@ -828,7 +823,7 @@ class ReshapePort(AbstractVirtualPort): def __init__(self, new_shape: ty.Tuple[int, ...], old_shape: ty.Tuple[int, ...]): - AbstractPort.__init__(self, new_shape) + super().__init__(new_shape) self.old_shape = old_shape def get_transform_func_fwd(self) -> ft.partial: @@ -863,7 +858,7 @@ class ConcatPort(AbstractVirtualPort): tensor-valued data array from the derived to the new shape.""" def __init__(self, ports: ty.List[AbstractPort], axis: int): - AbstractPort.__init__(self, self._get_new_shape(ports, axis)) + super().__init__(self._get_new_shape(ports, axis)) self._connect_backward( ports, AbstractPort, assert_same_shape=False, assert_same_type=True ) @@ -918,7 +913,7 @@ def __init__(self, new_shape: ty.Tuple[int, ...], axes: ty.Tuple[int, ...]): self.axes = axes - AbstractPort.__init__(self, new_shape) + super().__init__(new_shape) def get_transform_func_fwd(self) -> ft.partial: """Returns a function pointer that implements the forward (fwd) @@ -943,18 +938,3 @@ def get_transform_func_bwd(self) -> ft.partial: function_pointer : functools.partial a function pointer that can be applied to incoming data""" return ft.partial(np.transpose, axes=np.argsort(self.axes)) - - -class ReIndexPort(AbstractVirtualPort): - """A ReIndexPort is a virtual port that allows to re-index the elements - of a port before connecting to another port. - It is used by the compiler to map the indices of the underlying - tensor-valued data array from the derived to the new shape. - - Example: - out_port = OutPort((2, 2)) - in_port = InPort((2, 2)) - out_port.reindex([3, 1, 0, 2]).connect(in_port) - """ - - pass diff --git a/src/lava/magma/core/process/ports/reduce_ops.py b/src/lava/magma/core/process/ports/reduce_ops.py index 4d9e99ad7..709d77367 100644 --- a/src/lava/magma/core/process/ports/reduce_ops.py +++ b/src/lava/magma/core/process/ports/reduce_ops.py @@ -9,11 +9,7 @@ class AbstractReduceOp(ABC): """Reduce operations are required by InPorts to specify how date from multiple OutPorts connected to the same InPorts gets integrated.""" - pass - class ReduceSum(AbstractReduceOp): """ReduceOp to indicate that multiple inputs to same InPort should be added.""" - - pass diff --git a/src/lava/magma/core/process/process.py b/src/lava/magma/core/process/process.py index aa533fd2e..55840292b 100644 --- a/src/lava/magma/core/process/process.py +++ b/src/lava/magma/core/process/process.py @@ -18,7 +18,6 @@ from lava.magma.core.run_conditions import AbstractRunCondition from lava.magma.core.run_configs import RunConfig from lava.magma.runtime.runtime import Runtime -from lava.magma.runtime.runtime_services.enums import LoihiVersion if ty.TYPE_CHECKING: from lava.magma.core.model.model import AbstractProcessModel @@ -223,7 +222,6 @@ def __del__(self): def __enter__(self): """Executed when Process enters a "with" block of a context manager.""" - pass def __exit__(self, exc_type, exc_val, exc_tb): """Stop the runtime when exiting "with" block of a context manager.""" @@ -283,7 +281,7 @@ def runtime(self, value): def register_sub_procs(self, procs: ty.Dict[str, AbstractProcess]): """Registers other processes as sub processes of this process.""" - for name, p in procs.items(): + for p in procs.values(): if not isinstance(p, AbstractProcess): raise AssertionError p.parent_proc = self diff --git a/src/lava/magma/core/resources.py b/src/lava/magma/core/resources.py index 28bc102e7..3419ae1b8 100644 --- a/src/lava/magma/core/resources.py +++ b/src/lava/magma/core/resources.py @@ -13,89 +13,73 @@ class AbstractResource(ABC): Each ProcessModel lists its required hardware resources with the @requires decorator. """ - pass # Compute resources ------------------------------------------------------------ class AbstractComputeResource(AbstractResource): """A compute resource, for example a particular type of neuromorphic processor or CPU.""" - pass class CPU(AbstractComputeResource): """A central processing unit on a regular computer or laptop.""" - pass class HostCPU(AbstractComputeResource): """A central processing unit on a special host system that holds neuromorphic devices.""" - pass class GPU(AbstractComputeResource): """A graphical processing unit.""" - pass class ECPU(AbstractComputeResource): """An embedded central processing unit that is part of a neuromorphic chip.""" - pass class LMT(ECPU): """A Lakemont embedded central processing unit.""" - pass class PB(ECPU): """A Powell Bute embedded central processing unit.""" - pass class NeuroCore(AbstractComputeResource): """A neuromorphic core.""" - pass class Loihi1NeuroCore(NeuroCore): """A neuromorphic core on a Loihi 1 chip.""" - pass class Loihi2NeuroCore(NeuroCore): """A neuromorphic core on a Loihi 2 chip.""" - pass # Peripheral resources --------------------------------------------------------- class AbstractPeripheralResource(AbstractResource): """A hardware resource that is a peripheral device.""" - pass class DVS(AbstractPeripheralResource): """An event-based dynamic vision sensor (DVS).""" - pass class HardDrive(AbstractPeripheralResource): """A hard drive in a computer.""" - pass class HeadNodeHardDrive(AbstractPeripheralResource): """A hard drive attached to a HeadNode (the node on which a user executes code).""" - pass # Nodes ------------------------------------------------------------------------ class AbstractNode(ABC): """A node is a resource that has other compute or peripheral resources.""" - pass class GenericNode(AbstractNode): @@ -112,7 +96,6 @@ class HeadNode(GenericNode): class Loihi1System(AbstractNode): """A neuromorphic system that carries Loihi 1 chips.""" - pass class KapohoBay(Loihi1System): @@ -133,7 +116,6 @@ class Pohoiki(Loihi1System): class Loihi2System(AbstractNode): """A neuromorphic system that carries Loihi 2 chips.""" - pass class OheoGulch(Loihi2System): diff --git a/src/lava/magma/core/run_configs.py b/src/lava/magma/core/run_configs.py index f7144ce4d..1bf6ffda0 100644 --- a/src/lava/magma/core/run_configs.py +++ b/src/lava/magma/core/run_configs.py @@ -79,11 +79,9 @@ def __init__(self, def exclude_nodes(self, nodes: ty.List[AbstractNode]): """Excludes given nodes from consideration by compiler.""" - pass def require_nodes(self, nodes: ty.List[AbstractNode]): """Requires that compiler maps processes to given nodes.""" - pass def select(self, process: AbstractProcess, @@ -168,7 +166,7 @@ def __init__(self, self.exception_proc_model_map = {} def select(self, - proc: AbstractProcess, + process: AbstractProcess, proc_models: ty.List[ty.Type[AbstractProcessModel]]) \ -> ty.Type[AbstractProcessModel]: """ @@ -177,7 +175,7 @@ def select(self, Parameters ---------- - proc: AbstractProcess + process: AbstractProcess Process for which ProcessModel is selected proc_models: List[AbstractProcessModel] List of ProcessModels to select from @@ -191,9 +189,10 @@ def select(self, # ------------------------------ # Raise error if num_pm == 0: - raise AssertionError(f"[{self.__class__.__qualname__}]: No " - f"ProcessModels exist for Process " - f"{proc.name}::{proc.__class__.__qualname__}.") + raise AssertionError( + f"[{self.__class__.__qualname__}]: No ProcessModels exist for " + f"Process {process.name}::{process.__class__.__qualname__}." + ) # Required modules and helper functions from lava.magma.core.model.sub.model import AbstractSubProcessModel @@ -206,8 +205,8 @@ def _issubpm(pm: ty.Type[AbstractProcessModel]) -> bool: # ---------------------------- # We will simply return the ProcessModel class associated with a # Process class in the exceptions dictionary - if proc.__class__ in self.exception_proc_model_map: - return self.exception_proc_model_map[proc.__class__] + if process.__class__ in self.exception_proc_model_map: + return self.exception_proc_model_map[process.__class__] # Case 2: Only 1 PM found: # ----------------------- @@ -247,8 +246,8 @@ def _issubpm(pm: ty.Type[AbstractProcessModel]) -> bool: f"[{self.__class__.__qualname__}]: No " f"ProcessModels found with tag " f"'{self.select_tag}' for Process " - f"{proc.name}::" - f"{proc.__class__.__qualname__}.") + f"{process.name}::" + f"{process.__class__.__qualname__}.") # Case 3: Multiple PMs exist: # -------------------------- @@ -269,11 +268,13 @@ def _issubpm(pm: ty.Type[AbstractProcessModel]) -> bool: # Assumption: User doesn't care about tags. We return the first # SubProcessModel found if self.select_tag is None: - self.log.info(f"[{self.__class__.__qualname__}]: Using the" - f" first SubProcessModel " - f"{proc_models[sub_pm_idxs[0]].__qualname__} " - f"available for Process " - f"{proc.name}::{proc.__class__.__qualname__}.") + self.log.info( + f"[{self.__class__.__qualname__}]: Using the first " + f"SubProcessModel " + f"{proc_models[sub_pm_idxs[0]].__qualname__} " + f"available for Process " + f"{process.name}::{process.__class__.__qualname__}." + ) return proc_models[sub_pm_idxs[0]] # Case 3a(iii): User asked for a specific tag: # ------------------------------------------- @@ -286,8 +287,8 @@ def _issubpm(pm: ty.Type[AbstractProcessModel]) -> bool: raise AssertionError(f"[{self.__class__.__qualname__}]: No " f"ProcessModels found with tag " f"{self.select_tag} for Process " - f"{proc.name}::" - f"{proc.__class__.__qualname__}.") + f"{process.name}::" + f"{process.__class__.__qualname__}.") return proc_models[valid_sub_pm_idxs[0]] # Case 3b: User didn't ask for SubProcessModel: # -------------------------------------------- @@ -295,8 +296,8 @@ def _issubpm(pm: ty.Type[AbstractProcessModel]) -> bool: if len(leaf_pm_idxs) == 0: raise AssertionError(f"[{self.__class__.__qualname__}]: " f"No hardware-specific ProcessModels were " - f"found for Process {proc.name}::" - f"{proc.__class__.__qualname__}. " + f"found for Process {process.name}::" + f"{process.__class__.__qualname__}. " f"Try setting select_sub_proc_model=True.") # Case 3b(i): User didn't provide select_tag: # ------------------------------------------ @@ -307,7 +308,7 @@ def _issubpm(pm: ty.Type[AbstractProcessModel]) -> bool: f"Hardware-specific ProcessModel " f"{proc_models[leaf_pm_idxs[0]].__qualname__} " f"available for Process " - f"{proc.name}::{proc.__class__.__qualname__}.") + f"{process.name}::{process.__class__.__qualname__}.") return proc_models[leaf_pm_idxs[0]] # Case 3b(ii): User asked for a specific tag: # ------------------------------------------ @@ -320,8 +321,8 @@ def _issubpm(pm: ty.Type[AbstractProcessModel]) -> bool: raise AssertionError(f"[{self.__class__.__qualname__}]: No " f"ProcessModels found with tag " f"'{self.select_tag}' for Process " - f"{proc.name}::" - f"{proc.__class__.__qualname__}.") + f"{process.name}::" + f"{process.__class__.__qualname__}.") return proc_models[valid_leaf_pm_idxs[0]] def _is_hw_supported(self, pm: ty.Type[AbstractProcessModel]) -> bool: diff --git a/src/lava/magma/runtime/message_infrastructure/factory.py b/src/lava/magma/runtime/message_infrastructure/factory.py index 9531b6753..4aa392ce6 100644 --- a/src/lava/magma/runtime/message_infrastructure/factory.py +++ b/src/lava/magma/runtime/message_infrastructure/factory.py @@ -6,14 +6,14 @@ from lava.magma.runtime.message_infrastructure.multiprocessing import \ MultiProcessing -"""Factory class to create the messaging infrastructure""" - class MessageInfrastructureFactory: - """Creates the message infrastructure instance based on type""" + """Factory class to create the messaging infrastructure""" + @staticmethod def create(factory_type: ActorType): - """type of actor framework being chosen""" + """Creates the message infrastructure instance based on type + of actor framework being chosen.""" if factory_type == ActorType.MultiProcessing: return MultiProcessing() else: diff --git a/src/lava/magma/runtime/message_infrastructure/message_infrastructure_interface.py b/src/lava/magma/runtime/message_infrastructure/message_infrastructure_interface.py index 85106909d..7123e316e 100644 --- a/src/lava/magma/runtime/message_infrastructure/message_infrastructure_interface.py +++ b/src/lava/magma/runtime/message_infrastructure/message_infrastructure_interface.py @@ -13,40 +13,33 @@ from lava.magma.compiler.channels.interfaces import ChannelType, Channel from lava.magma.core.sync.domain import SyncDomain -"""A Message Infrastructure Interface which can create actors which would -participate in message passing/exchange, start and stop them as well as -declare the underlying Channel Infrastructure Class to be used for message -passing implementation.""" - class MessageInfrastructureInterface(ABC): - """Interface to provide the ability to create actors which can - communicate via message passing""" + """A Message Infrastructure Interface which can create actors which would + participate in message passing/exchange, start and stop them as well as + declare the underlying Channel Infrastructure Class to be used for message + passing implementation.""" + @abstractmethod def start(self): """Starts the messaging infrastructure""" - pass @abstractmethod def stop(self): """Stops the messaging infrastructure""" - pass @abstractmethod def build_actor(self, target_fn: ty.Callable, builder: ty.Union[ ty.Dict['AbstractProcess', 'PyProcessBuilder'], ty.Dict[ SyncDomain, 'RuntimeServiceBuilder']]): """Given a target_fn starts a system process""" - pass @property @abstractmethod def actors(self) -> ty.List[ty.Any]: """Returns a list of actors""" - pass @abstractmethod def channel_class(self, channel_type: ChannelType) -> ty.Type[Channel]: """Given the Channel Type, Return the Channel Implementation to be used during execution""" - pass diff --git a/src/lava/magma/runtime/message_infrastructure/nx.py b/src/lava/magma/runtime/message_infrastructure/nx.py index 1540f89b1..1ab70e319 100644 --- a/src/lava/magma/runtime/message_infrastructure/nx.py +++ b/src/lava/magma/runtime/message_infrastructure/nx.py @@ -17,21 +17,17 @@ class NxBoardMsgInterface(MessageInfrastructureInterface): @property def actors(self): """Returns a list of actors""" - pass def start(self): """Starts the shared memory manager""" - pass def build_actor(self, target_fn: ty.Callable, builder: ty.Union[ ty.Dict['AbstractProcess', 'PyProcessBuilder'], ty.Dict[ SyncDomain, 'RuntimeServiceBuilder']]) -> ty.Any: """Given a target_fn starts a system (os) process""" - pass def stop(self): """Stops the shared memory manager""" - pass def channel_class(self, channel_type: ChannelType) -> ty.Type[ChannelType]: """Given a channel type, returns the shared memory based class diff --git a/src/lava/magma/runtime/mgmt_token_enums.py b/src/lava/magma/runtime/mgmt_token_enums.py index 3c789cd74..bb0795fdf 100644 --- a/src/lava/magma/runtime/mgmt_token_enums.py +++ b/src/lava/magma/runtime/mgmt_token_enums.py @@ -2,12 +2,12 @@ # SPDX-License-Identifier: LGPL 2.1 or later # See: https://spdx.org/licenses/ +"""Defines message tokens for Actions (Commands) and Responses. Also defines +helper functions to convert scalar values to these message tokens.""" + import typing as ty import numpy as np -"""Defines message tokens for Actions (Commands) and Responses. Also defines -helper functions to convert scalar values to these message tokens""" - def enum_to_np(value: ty.Union[int, float], d_type: type = np.float64) -> np.array: diff --git a/src/lava/magma/runtime/runtime.py b/src/lava/magma/runtime/runtime.py index 906e7466e..1eb9b2ddd 100644 --- a/src/lava/magma/runtime/runtime.py +++ b/src/lava/magma/runtime/runtime.py @@ -7,12 +7,10 @@ import logging import sys import traceback -import typing import typing as ty import numpy as np from scipy.sparse import csr_matrix -from lava.magma.compiler.channels.pypychannel import CspRecvPort, CspSendPort from lava.magma.compiler.var_model import AbstractVarModel, LoihiSynapseVarModel from lava.magma.core.process.message_interface_enum import ActorType from lava.magma.runtime.message_infrastructure.factory import \ @@ -115,7 +113,7 @@ def __init__(self, loglevel: int = logging.WARNING): self.log = logging.getLogger(__name__) self.log.setLevel(loglevel) - self._run_cond: typing.Optional[AbstractRunCondition] = None + self._run_cond: ty.Optional[AbstractRunCondition] = None self._executable: Executable = exe self._messaging_infrastructure_type: ActorType = \ @@ -130,6 +128,7 @@ def __init__(self, self.runtime_to_service: ty.Iterable[CspSendPort] = [] self.service_to_runtime: ty.Iterable[CspRecvPort] = [] self._open_ports: ty.List[AbstractCspPort] = [] + self.num_steps: int = 0 def __del__(self): """On destruction, terminate Runtime automatically to diff --git a/src/lava/magma/runtime/runtime_services/channel_broker/channel_broker.py b/src/lava/magma/runtime/runtime_services/channel_broker/channel_broker.py index 11db68cd7..12da2257c 100644 --- a/src/lava/magma/runtime/runtime_services/channel_broker/channel_broker.py +++ b/src/lava/magma/runtime/runtime_services/channel_broker/channel_broker.py @@ -18,7 +18,6 @@ try: from nxcore.arch.base.nxboard import NxBoard from nxcore.graph.channel import Channel - from nxcore.graph.processes.phase_enums import Phase from nxcore.graph.processes.embedded.embedded_snip import EmbeddedSnip except ImportError: class NxBoard: diff --git a/src/lava/magma/runtime/runtime_services/runtime_service.py b/src/lava/magma/runtime/runtime_services/runtime_service.py index 0a2604cb5..d62cffdbd 100644 --- a/src/lava/magma/runtime/runtime_services/runtime_service.py +++ b/src/lava/magma/runtime/runtime_services/runtime_service.py @@ -2,6 +2,28 @@ # SPDX-License-Identifier: LGPL 2.1 or later # See: https://spdx.org/licenses/ +"""The RuntimeService interface is responsible for +coordinating the execution of a group of process models belonging to a common +synchronization domain. The domain will follow a SyncProtocol or will be +asynchronous. The processes and their corresponding process models are +selected by the Runtime dependent on the RunConfiguration assigned at the +start of execution. For each group of processes which follow the same +protocol and execute on the same node, the Runtime creates a RuntimeService. +Each RuntimeService coordinates all actions and commands from the Runtime, +transmitting them to the processes under its management and +returning action and command responses back to Runtime. + +RuntimeService Types: + +PyRuntimeService: (Abstract Class) Coordinates process models executing on + the CPU and written in Python. + Concrete Implementations: + a. LoihiPyRuntimeService: Coordinates process models executing on + the CPU and written in Python and following the LoihiProtocol. + b. AsyncPyRuntimeService: Coordinates process models executing on + the CPU and written in Python and following the AsyncProtocol. +""" + import logging import typing as ty from abc import abstractmethod @@ -25,28 +47,6 @@ from lava.magma.runtime.runtime_services.interfaces import \ AbstractRuntimeService -"""The RuntimeService interface is responsible for -coordinating the execution of a group of process models belonging to a common -synchronization domain. The domain will follow a SyncProtocol or will be -asynchronous. The processes and their corresponding process models are -selected by the Runtime dependent on the RunConfiguration assigned at the -start of execution. For each group of processes which follow the same -protocol and execute on the same node, the Runtime creates a RuntimeService. -Each RuntimeService coordinates all actions and commands from the Runtime, - transmitting them to the the processes under it's managment and -returning action and command responses back to Runtime. - -RuntimeService Types: - -PyRuntimeService: (Abstract Class) Coordinates process models executing on - the CPU and written in Python. - Concrete Implementations: - a. LoihiPyRuntimeService: Coordinates process models executing on - the CPU and written in Python and following the LoihiProtocol. - b. AsyncPyRuntimeService: Coordinates process models executing on - the CPU and written in Python and following the AsyncProtocol. -""" - class PyRuntimeService(AbstractRuntimeService): """Abstract RuntimeService for Python, it provides base methods @@ -78,7 +78,6 @@ def run(self): """Override this method to implement the runtime service. The run method is invoked upon start which called when the execution is started by the runtime.""" - pass def join(self): """Stop the necessary channels to coordinate with runtime and group @@ -98,7 +97,7 @@ def _relay_to_runtime_data_given_model_id(self, model_id: int): data_relay_port = self.service_to_runtime num_items = data_recv_port.recv() data_relay_port.send(num_items) - for i in range(int(num_items[0])): + for _ in range(int(num_items[0])): value = data_recv_port.recv() data_relay_port.send(value) @@ -113,7 +112,7 @@ def _relay_to_pm_data_given_model_id(self, model_id: int) -> MGMT_RESPONSE: num_items = data_recv_port.recv() data_relay_port.send(num_items) # Receive and relay data1, data2, ... - for i in range(int(num_items[0].item())): + for _ in range(int(num_items[0].item())): data_relay_port.send(data_recv_port.recv()) rsp = resp_port.recv() return rsp diff --git a/src/lava/proc/conv/utils.py b/src/lava/proc/conv/utils.py index 7441abccc..811ab0781 100644 --- a/src/lava/proc/conv/utils.py +++ b/src/lava/proc/conv/utils.py @@ -157,7 +157,7 @@ def output_shape(input_shape: Tuple[int, int, int], return x_out, y_out, out_channels -def conv(input: np.ndarray, +def conv(input_: np.ndarray, weight: np.ndarray, kernel_size: Tuple[int, int], stride: Tuple[int, int], @@ -168,7 +168,7 @@ def conv(input: np.ndarray, Parameters ---------- - input : 3 dimensional np array + input_ : 3 dimensional np array convolution input. weight : 4 dimensional np array convolution kernel weight. @@ -192,7 +192,7 @@ def conv(input: np.ndarray, # with torch.no_grad(): # this seems to cause problems output = F.conv2d( torch.unsqueeze( # torch expects a batch dimension NCHW - torch.FloatTensor(input.transpose([2, 1, 0])), + torch.FloatTensor(input_.transpose([2, 1, 0])), dim=0, ), torch.FloatTensor( @@ -209,13 +209,13 @@ def conv(input: np.ndarray, )[0].cpu().data.numpy().transpose([2, 1, 0]) else: output = conv_scipy( - input, weight, kernel_size, stride, padding, dilation, groups + input_, weight, kernel_size, stride, padding, dilation, groups ) return output.astype(weight.dtype) -def conv_scipy(input: np.ndarray, +def conv_scipy(input_: np.ndarray, weight: np.ndarray, kernel_size: Tuple[int, int], stride: Tuple[int, int], @@ -226,7 +226,7 @@ def conv_scipy(input: np.ndarray, Parameters ---------- - input : 3 dimensional np array + input_ : 3 dimensional np array convolution input. weight : 4 dimensional np array convolution kernel weight. @@ -246,7 +246,7 @@ def conv_scipy(input: np.ndarray, 3 dimensional np array convolution output """ - input_shape = input.shape + input_shape = input_.shape output = np.zeros( output_shape( input_shape, weight.shape[0], @@ -263,12 +263,12 @@ def conv_scipy(input: np.ndarray, dilated_weight[:, ::dilation[0], ::dilation[1], :] = weight input_padded = np.pad( - input, + input_, ((padding[0], padding[0]), (padding[1], padding[1]), (0, 0)), mode='constant', ) - if input.shape[-1] % groups != 0: + if input_.shape[-1] % groups != 0: raise Exception( f'Expected number of in_channels to be divisible by group.' f'Found {weight.shape[3] = } and {groups = }.' @@ -280,7 +280,7 @@ def conv_scipy(input: np.ndarray, ) k_grp = output.shape[2] // groups - c_grp = input.shape[2] // groups + c_grp = input_.shape[2] // groups for g in range(groups): for k in range(k_grp): for c in range(c_grp): diff --git a/src/lava/proc/dense/models.py b/src/lava/proc/dense/models.py index 4369733da..5bc69ce69 100644 --- a/src/lava/proc/dense/models.py +++ b/src/lava/proc/dense/models.py @@ -72,9 +72,10 @@ def __init__(self, proc_params): super().__init__(proc_params) # Flag to determine whether weights have already been scaled. self.weights_set = False + self.weight_exp: int = self.proc_params.get("weight_exp", 0) def run_spk(self): - self.weight_exp: int = self.proc_params.get("weight_exp", 0) + self.weight_exp = self.proc_params.get("weight_exp", 0) # Since this Process has no learning, weights are assumed to be static # and only require scaling on the first timestep of run_spk(). @@ -194,6 +195,9 @@ class AbstractPyDelayDenseModel(PyLoihiProcessModel): """Abstract Conn Process with Dense synaptic connections which incorporates delays into the Conn Process. """ + weights: np.ndarray = None + delays: np.ndarray = None + a_buff: np.ndarray = None def calc_act(self, s_in) -> np.ndarray: """ @@ -270,7 +274,7 @@ class PyDelayDenseModelFloat(AbstractPyDelayDenseModel): num_message_bits: np.ndarray = LavaPyType(np.ndarray, int, precision=5) def run_spk(self): - # The a_out sent on a each timestep is a buffered value from dendritic + # The a_out sent on each timestep is a buffered value from dendritic # accumulation at timestep t-1. This prevents deadlocking in # networks with recurrent connectivity structures. self.a_out.send(self.a_buff[:, 0]) diff --git a/src/lava/proc/io/encoder.py b/src/lava/proc/io/encoder.py index 6a2fe39e7..3e284ce85 100644 --- a/src/lava/proc/io/encoder.py +++ b/src/lava/proc/io/encoder.py @@ -15,7 +15,6 @@ from lava.magma.core.resources import HostCPU from lava.magma.core.decorator import implements, requires, tag from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.proc.sdn.models import AbstractDeltaModel @unique diff --git a/src/lava/proc/lif/process.py b/src/lava/proc/lif/process.py index 426f9b216..767adf3c9 100644 --- a/src/lava/proc/lif/process.py +++ b/src/lava/proc/lif/process.py @@ -5,10 +5,7 @@ import numpy as np import typing as ty -from lava.magma.core.learning.learning_rule import ( - LoihiLearningRule, - Loihi2FLearningRule, -) +from lava.magma.core.learning.learning_rule import Loihi2FLearningRule from lava.magma.core.process.process import LogConfig, AbstractProcess from lava.magma.core.process.variable import Var from lava.magma.core.process.ports.ports import InPort, OutPort diff --git a/src/lava/proc/monitor/process.py b/src/lava/proc/monitor/process.py index b907c4103..0c8b4d17a 100644 --- a/src/lava/proc/monitor/process.py +++ b/src/lava/proc/monitor/process.py @@ -2,7 +2,6 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import matplotlib.pyplot as plt from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.variable import Var from lava.magma.core.process.ports.ports import InPort, OutPort, RefPort @@ -131,23 +130,19 @@ def probe(self, target, num_steps): # Create names for Ports/Vars to be created in Monitor process for # probing purposes. Names are given incrementally each time probe(..) # method is called. - self.new_ref_port_name = "ref_port_" + \ - str(self.proc_params["n_ref_ports"]) - self.new_var_read_name = "var_read_" + \ - str(self.proc_params["n_ref_ports"]) - self.new_in_port_name = "in_port_" + \ - str(self.proc_params["n_in_ports"]) - self.new_out_read_name = "out_read_" + \ - str(self.proc_params["n_in_ports"]) + new_ref_port_name = f"ref_port_{self.proc_params['n_ref_ports']}" + new_var_read_name = f"var_read_{self.proc_params['n_ref_ports']}" + new_in_port_name = f"in_port_{self.proc_params['n_in_ports']}" + new_out_read_name = f"out_read_{self.proc_params['n_in_ports']}" # Create and set new Refport and corresponding Var to store data - setattr(self, self.new_ref_port_name, RefPort(shape=target.shape)) - setattr(self, self.new_var_read_name, + setattr(self, new_ref_port_name, RefPort(shape=target.shape)) + setattr(self, new_var_read_name, Var(shape=(num_steps,) + target.shape, init=0)) # Create and set new InPort and corresponding Var to store data - setattr(self, self.new_in_port_name, InPort(shape=target.shape)) - setattr(self, self.new_out_read_name, + setattr(self, new_in_port_name, InPort(shape=target.shape)) + setattr(self, new_out_read_name, Var(shape=(num_steps,) + target.shape, init=0)) # Add the names of new RefPort and Var_read name to proc_params dict @@ -179,11 +174,11 @@ def probe(self, target, num_steps): self.proc_params.overwrite("n_ref_ports", n_ref_ports + 1) # Connect newly created Refport to the var to be monitored - getattr(self, self.new_ref_port_name).connect_var(target) + getattr(self, new_ref_port_name).connect_var(target) # Add the name of probed Var and its process to the target_names - self.target_names[self.new_var_read_name] = [target.process.name, - target.name] + self.target_names[new_var_read_name] = [target.process.name, + target.name] # If target to be monitored is an OutPort elif isinstance(target, OutPort): @@ -192,11 +187,11 @@ def probe(self, target, num_steps): self.proc_params.overwrite("n_in_ports", n_in_ports + 1) # Connect newly created InPort from the OutPort to be monitored - getattr(self, self.new_in_port_name).connect_from(target) + getattr(self, new_in_port_name).connect_from(target) # Add the name of OutPort and its process to the target_names - self.target_names[self.new_out_read_name] = [target.process.name, - target.name] + self.target_names[new_out_read_name] = [target.process.name, + target.name] # If target is an InPort raise a Type error, as monitoring InPorts is # not supported yet diff --git a/src/lava/proc/sparse/models.py b/src/lava/proc/sparse/models.py index 75bd43206..bb79e10b6 100644 --- a/src/lava/proc/sparse/models.py +++ b/src/lava/proc/sparse/models.py @@ -4,7 +4,6 @@ import numpy as np from scipy.sparse import csr_matrix, spmatrix, vstack, find -import warnings from lava.magma.core.model.py.connection import ( LearningConnectionModelFloat, LearningConnectionModelBitApproximate, @@ -209,6 +208,9 @@ class AbstractPyDelaySparseModel(PyLoihiProcessModel): """Abstract Conn Process with Sparse synaptic connections which incorporates delays into the Conn Process. """ + weights: csr_matrix = None + delays: csr_matrix = None + a_buff: np.ndarray = None def calc_act(self, s_in) -> np.ndarray: """ diff --git a/src/lava/proc/sparse/process.py b/src/lava/proc/sparse/process.py index cbc9c3117..d8c52c7ce 100644 --- a/src/lava/proc/sparse/process.py +++ b/src/lava/proc/sparse/process.py @@ -3,7 +3,7 @@ # See: https://spdx.org/licenses/ import numpy as np -from scipy.sparse import spmatrix, csr_matrix +from scipy.sparse import spmatrix import typing as ty from lava.magma.core.process.process import AbstractProcess, LogConfig @@ -260,7 +260,7 @@ def __init__(self, # Variables self.delays = Var(shape=shape, init=delays) - self.a_buff = Var(shape=(shape[0], max_delay + 1) , init=0) + self.a_buff = Var(shape=(shape[0], max_delay + 1), init=0) @staticmethod def _validate_delays(weights: spmatrix, delays: spmatrix) -> None: diff --git a/src/lava/proc/spiker/models.py b/src/lava/proc/spiker/models.py index 17352767f..af5b7d5d9 100644 --- a/src/lava/proc/spiker/models.py +++ b/src/lava/proc/spiker/models.py @@ -5,7 +5,7 @@ import numpy as np from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.ports import PyOutPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol diff --git a/src/lava/utils/dataloader/mnist.py b/src/lava/utils/dataloader/mnist.py index c7b9e4eaa..724b25a34 100644 --- a/src/lava/utils/dataloader/mnist.py +++ b/src/lava/utils/dataloader/mnist.py @@ -56,7 +56,8 @@ def download_mnist(path=os.path.join(os.path.dirname(__file__), 'temp')): f.write(res.read()) break else: - raise "Url does not start with http" + raise ValueError(f"Specified URL ({url}) does not " + "start with 'http'.") except urllib.error.URLError as exception: err = exception continue diff --git a/tests/lava/magma/compiler/subcompilers/py/test_pyproc_compiler.py b/tests/lava/magma/compiler/subcompilers/py/test_pyproc_compiler.py index 18b968c9c..e0f9c7c03 100644 --- a/tests/lava/magma/compiler/subcompilers/py/test_pyproc_compiler.py +++ b/tests/lava/magma/compiler/subcompilers/py/test_pyproc_compiler.py @@ -54,7 +54,6 @@ def __init__(self, **kwargs): class MockRuntimeService: __name__ = "MockRuntimeService" - pass # Define minimal Protocol to be implemented. @@ -124,7 +123,7 @@ def test_compile_py_proc_models(self): # There should be three PyProcessBuilders... self.assertEqual(len(builders), 3) - for proc, builder in builders.items(): + for builder in builders.values(): self.assertIsInstance(builder, PyProcessBuilder) # ... one for each Process. b1 = ty.cast(PyProcessBuilder, builders[p1]) diff --git a/tests/lava/magma/compiler/subcompilers/test_channel_builders_factory.py b/tests/lava/magma/compiler/subcompilers/test_channel_builders_factory.py index 805feecfb..41dded64a 100644 --- a/tests/lava/magma/compiler/subcompilers/test_channel_builders_factory.py +++ b/tests/lava/magma/compiler/subcompilers/test_channel_builders_factory.py @@ -2,13 +2,11 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -from enum import Enum import logging import typing as ty import unittest from unittest.mock import Mock -import numpy as np from lava.magma.compiler.builders.channel_builder import ChannelBuilderMp from lava.magma.compiler.channel_map import ChannelMap, Payload, PortPair from lava.magma.compiler.channels.interfaces import ChannelType @@ -16,9 +14,7 @@ from lava.magma.compiler.subcompilers.channel_builders_factory import \ ChannelBuildersFactory from lava.magma.compiler.utils import LoihiPortInitializer, PortInitializer -from lava.magma.compiler.var_model import LoihiAddress, LoihiVarModel from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.model import AbstractProcessModel from lava.magma.core.model.interfaces import AbstractPortImplementation from lava.magma.core.model.py.model import AbstractPyProcessModel from lava.magma.core.model.py.ports import (PyInPort, PyOutPort, PyRefPort, @@ -30,7 +26,7 @@ from lava.magma.core.process.ports.reduce_ops import ReduceSum from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.variable import Var -from lava.magma.core.resources import CPU, LMT, NeuroCore +from lava.magma.core.resources import CPU from lava.magma.core.run_configs import RunConfig from lava.magma.core.sync.protocol import AbstractSyncProtocol @@ -67,7 +63,6 @@ def __init__(self, **kwargs): class MockRuntimeService: __name__ = "MockRuntimeService" - pass # Define minimal Protocol to be implemented @@ -153,7 +148,7 @@ def __init__( def select(self, proc, proc_models): py_proc_model = None sub_proc_model = None - c_proc_model = None + # Find PyProcModel or SubProcModel for pm in proc_models: if issubclass(pm, AbstractSubProcessModel): @@ -161,7 +156,6 @@ def select(self, proc, proc_models): if issubclass(pm, AbstractPyProcessModel): py_proc_model = pm # Make selection - if self.select_sub_proc_model and sub_proc_model: return sub_proc_model if py_proc_model and not self.select_lmt: @@ -357,9 +351,6 @@ def test_create_channel_builders(self): channel_builders = self.factory.from_channel_map(channel_map, self.cfg) # This should result in 5 channel builders (one for each arrow above) - from lava.magma.compiler.builders.channel_builder import \ - ChannelBuilderMp - self.assertEqual(len(channel_builders), 5) for cb in channel_builders: self.assertIsInstance(cb, ChannelBuilderMp) diff --git a/tests/lava/magma/compiler/test_channel_map.py b/tests/lava/magma/compiler/test_channel_map.py index abce36844..43b2262c5 100644 --- a/tests/lava/magma/compiler/test_channel_map.py +++ b/tests/lava/magma/compiler/test_channel_map.py @@ -33,7 +33,6 @@ def __init__(self, **kwargs): class MockRuntimeService: __name__ = "MockRuntimeService" - pass # Define minimal Protocol to be implemented diff --git a/tests/lava/magma/compiler/test_compiler.py b/tests/lava/magma/compiler/test_compiler.py index 8d750218b..1046253c3 100644 --- a/tests/lava/magma/compiler/test_compiler.py +++ b/tests/lava/magma/compiler/test_compiler.py @@ -66,7 +66,6 @@ def __init__(self, **kwargs): class MockRuntimeService: __name__ = "MockRuntimeService" - pass # Define minimal Protocol to be implemented @@ -436,9 +435,6 @@ def test_extract_proc_builders(self) -> None: # Create some mock Processes. proc1 = Mock(spec_set=AbstractProcess) proc2 = Mock(spec_set=AbstractProcess) - proc3 = Mock(spec_set=AbstractProcess) - proc4 = Mock(spec_set=AbstractProcess) - proc5 = Mock(spec_set=AbstractProcess) # Create some Builders. py_builder1 = PyProcessBuilder(AbstractPyProcessModel, 0) diff --git a/tests/lava/magma/compiler/test_node.py b/tests/lava/magma/compiler/test_node.py index ea2bcfece..c23b84a07 100644 --- a/tests/lava/magma/compiler/test_node.py +++ b/tests/lava/magma/compiler/test_node.py @@ -12,8 +12,6 @@ class MockProcess(AbstractProcess): """A mock process""" - pass - class TestNode(unittest.TestCase): def test_node_creation(self): diff --git a/tests/lava/magma/core/learning/test_learning_rule.py b/tests/lava/magma/core/learning/test_learning_rule.py index 7b1384f00..4c2956553 100644 --- a/tests/lava/magma/core/learning/test_learning_rule.py +++ b/tests/lava/magma/core/learning/test_learning_rule.py @@ -7,30 +7,15 @@ from lava.magma.core.learning.learning_rule import ( LoihiLearningRule, - Loihi2FLearningRule, - Loihi3FLearningRule, + Loihi2FLearningRule ) from lava.magma.core.learning.product_series import ProductSeries from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi2SimCfg -from lava.proc.lif.process import LIF, LearningLIF +from lava.proc.lif.process import LIF from lava.proc.dense.process import LearningDense, Dense from lava.proc.monitor.process import Monitor from lava.proc.io.source import RingBuffer as SpikeIn -from lava.magma.core.model.py.neuron import ( - LearningNeuronModelFloat, - LearningNeuronModelFixed, -) -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU -from lava.magma.core.decorator import implements, requires, tag -from lava.proc.lif.models import ( - AbstractPyLifModelFloat, - AbstractPyLifModelFixed, -) -from lava.proc.io.source import RingBuffer as SpikeIn def create_network( diff --git a/tests/lava/magma/core/learning/test_random.py b/tests/lava/magma/core/learning/test_random.py index ecbc18cdd..21e6ad52b 100644 --- a/tests/lava/magma/core/learning/test_random.py +++ b/tests/lava/magma/core/learning/test_random.py @@ -3,7 +3,6 @@ # See: https://spdx.org/licenses/ import unittest -import numpy as np from lava.magma.core.learning.random import TraceRandom, ConnVarRandom diff --git a/tests/lava/magma/core/model/py/test_model.py b/tests/lava/magma/core/model/py/test_model.py index 34bab40d8..2af801fe6 100644 --- a/tests/lava/magma/core/model/py/test_model.py +++ b/tests/lava/magma/core/model/py/test_model.py @@ -13,7 +13,6 @@ PyLoihiProcessModel, PyLoihiModelToPyAsyncModel ) -from lava.proc import io from lava.proc.lif.process import LIF from lava.proc.sdn.process import SigmaDelta from lava.proc.dense.process import Dense @@ -127,10 +126,10 @@ def test_sdn_dense_sdn(self): 'state_exp': 6} dense_params = {'weights': weights, 'num_message_bits': 16} - input = SigmaDelta(**input_params) + input_ = SigmaDelta(**input_params) output = SigmaDelta(**output_params) dense = Dense(**dense_params) - input.s_out.connect(dense.s_in) + input_.s_out.connect(dense.s_in) dense.a_out.connect(output.a_in) run_cnd = RunSteps(num_steps=2) @@ -141,12 +140,12 @@ def test_sdn_dense_sdn(self): output.stop() # Run the same network in async mode. - # Currently we don't allow the same process to run twice + # Currently, we don't allow the same process to run twice # Copy the model used for pyproc model - input = SigmaDelta(**input_params) + input_ = SigmaDelta(**input_params) output = SigmaDelta(**output_params) dense = Dense(**dense_params) - input.s_out.connect(dense.s_in) + input_.s_out.connect(dense.s_in) dense.a_out.connect(output.a_in) output.run(condition=run_cnd, diff --git a/tests/lava/magma/core/model/test_decorators.py b/tests/lava/magma/core/model/test_decorators.py index 1c92e1665..a40cd74df 100644 --- a/tests/lava/magma/core/model/test_decorators.py +++ b/tests/lava/magma/core/model/test_decorators.py @@ -48,21 +48,21 @@ class TestProtocol(AbstractSyncProtocol): # We must pass a class, not an instance or anything else with self.assertRaises(TypeError): @implements(proc=TestProc(), protocol=TestProtocol) # type: ignore - class TestModel(AbstractProcessModel): # type: ignore + class TestModel(AbstractProcessModel): # pylint: disable=W0612 def run(self): pass # Same for 'protocol' with self.assertRaises(TypeError): @implements(proc=TestProc, protocol=TestProtocol()) # type: ignore - class TestModel2(AbstractProcessModel): + class TestModel2(AbstractProcessModel): # pylint: disable=W0612 def run(self): pass # And we can only decorate a subclass of 'AbstractProcessModel' with self.assertRaises(AssertionError): @implements(proc=TestProc, protocol=TestProtocol) - class TestProcess2(AbstractProcess): + class TestProcess2(AbstractProcess): # pylint: disable=W0612 pass def test_implements_subclassing(self): @@ -125,7 +125,7 @@ def run(self): # attributes must fail with self.assertRaises(AssertionError): @implements(protocol=TestProtocol2) - class SubTestModel(TestModel): + class SubTestModel(TestModel): # pylint: disable=W0612 pass def test_requires(self): @@ -190,7 +190,7 @@ def test_requires_failing(self): # We must decorate a ProcessModel and nothing else: with self.assertRaises(AssertionError): @requires(CPU) - class Something(AbstractProcess): + class Something(AbstractProcess): # pylint: disable=W0612 pass # We must decorate a ProcessModel with an 'AbstractResource' class @@ -198,7 +198,7 @@ class Something(AbstractProcess): with self.assertRaises(TypeError): @requires(CPU()) # type: ignore - class TestModel(AbstractProcessModel): + class TestModel(AbstractProcessModel): # pylint: disable=W0612 def run(self): pass @@ -241,20 +241,20 @@ def test_tags_failing(self): # Only decorating ProcessModels is allowed with self.assertRaises(AssertionError): @tag('some-tag') - class SomeClass(AbstractProcess): + class SomeClass(AbstractProcess): # pylint: disable=W0612 pass # Tags should be just comma-separated keywords with self.assertRaises(AssertionError): @tag('keyword1', ['keyword2', 'keyword3']) - class TestModel2(AbstractProcessModel): + class TestModel2(AbstractProcessModel): # pylint: disable=W0612 def run(self): pass # Tags should be just comma-separated keywords with self.assertRaises(AssertionError): @tag('tag1', [['tag2'], 'tag4']) - class SomeOtherClass(AbstractProcess): + class SomeOtherClass(AbstractProcess): # pylint: disable=W0612 pass diff --git a/tests/lava/magma/core/process/test_lif_dense_lif.py b/tests/lava/magma/core/process/test_lif_dense_lif.py index 591214967..798c47fb9 100644 --- a/tests/lava/magma/core/process/test_lif_dense_lif.py +++ b/tests/lava/magma/core/process/test_lif_dense_lif.py @@ -29,11 +29,11 @@ def select(self, process, proc_models): class TestLifDenseLif(unittest.TestCase): def test_lif_dense_lif(self): - self.lif1 = LIF(shape=(1,)) - self.dense = Dense(weights=np.eye(1)) - self.lif2 = LIF(shape=(1,)) - self.lif1.out_ports.s_out.connect(self.dense.in_ports.s_in) - self.dense.out_ports.a_out.connect(self.lif2.in_ports.a_in) - self.lif1.run(condition=RunSteps(num_steps=10), - run_cfg=SimpleRunConfig(sync_domains=[])) - self.lif1.stop() + lif1 = LIF(shape=(1,)) + dense = Dense(weights=np.eye(1)) + lif2 = LIF(shape=(1,)) + lif1.out_ports.s_out.connect(dense.in_ports.s_in) + dense.out_ports.a_out.connect(lif2.in_ports.a_in) + lif1.run(condition=RunSteps(num_steps=10), + run_cfg=SimpleRunConfig(sync_domains=[])) + lif1.stop() diff --git a/tests/lava/magma/core/process/test_process.py b/tests/lava/magma/core/process/test_process.py index ae97b193b..aad895c06 100644 --- a/tests/lava/magma/core/process/test_process.py +++ b/tests/lava/magma/core/process/test_process.py @@ -9,7 +9,6 @@ from lava.magma.compiler.executable import Executable from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import AbstractPyProcessModel -from lava.magma.core.model.sub.model import AbstractSubProcessModel from lava.magma.core.process.ports.ports import ( InPort, diff --git a/tests/lava/magma/runtime/test_async_protocol.py b/tests/lava/magma/runtime/test_async_protocol.py index 52ff37024..c430282c9 100644 --- a/tests/lava/magma/runtime/test_async_protocol.py +++ b/tests/lava/magma/runtime/test_async_protocol.py @@ -69,7 +69,7 @@ def test_async_process_model(self): Verifies the working of Asynchronous Process """ process = AsyncProcess1(shape=(2, 2)) - simple_sync_domain = SyncDomain("simple", AsyncProtocol(), [process]) + _ = SyncDomain("simple", AsyncProtocol(), [process]) process.run(condition=RunContinuous(), run_cfg=Loihi2SimCfg()) process.stop() @@ -79,7 +79,7 @@ def test_async_process_model_pause(self): effect """ process = AsyncProcess1(shape=(2, 2)) - simple_sync_domain = SyncDomain("simple", AsyncProtocol(), [process]) + _ = SyncDomain("simple", AsyncProtocol(), [process]) process.run(condition=RunContinuous(), run_cfg=Loihi2SimCfg()) process.pause() process.stop() @@ -90,7 +90,7 @@ def test_async_process_num_steps(self): implicitly passed as num_steps for the process. """ process = AsyncProcess2(shape=(2, 2)) - simple_sync_domain = SyncDomain("simple", AsyncProtocol(), [process]) + _ = SyncDomain("simple", AsyncProtocol(), [process]) process.run(condition=RunSteps(num_steps=10), run_cfg=Loihi2SimCfg()) process.stop() @@ -100,7 +100,7 @@ def test_async_process_get(self): of the variable after run finishes. """ process = AsyncProcess2(shape=(2, 2)) - simple_sync_domain = SyncDomain("simple", AsyncProtocol(), [process]) + _ = SyncDomain("simple", AsyncProtocol(), [process]) process.run(condition=RunSteps(num_steps=10), run_cfg=Loihi2SimCfg()) print(process.u.get()) process.stop() diff --git a/tests/lava/magma/runtime/test_get_set_non_determinism.py b/tests/lava/magma/runtime/test_get_set_non_determinism.py index 5c804faf8..a10002a20 100644 --- a/tests/lava/magma/runtime/test_get_set_non_determinism.py +++ b/tests/lava/magma/runtime/test_get_set_non_determinism.py @@ -41,7 +41,7 @@ class TestNonDeterminismUpdate(unittest.TestCase): def test_non_determinism_update(self): nb_runs = 10000 demo_process = DemoProcess(nb_runs=nb_runs) - for i in range(nb_runs): + for _ in range(nb_runs): demo_process.run(condition=RunSteps(num_steps=1), run_cfg=Loihi1SimCfg()) diff --git a/tests/lava/magma/runtime/test_loihi_with_async_protocol.py b/tests/lava/magma/runtime/test_loihi_with_async_protocol.py index 634b0be84..8c6865b18 100644 --- a/tests/lava/magma/runtime/test_loihi_with_async_protocol.py +++ b/tests/lava/magma/runtime/test_loihi_with_async_protocol.py @@ -18,11 +18,6 @@ from lava.magma.core.sync.protocols.async_protocol import AsyncProtocol from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -""" -This test checks if Process with Loihi Protocol works properly with -process with Async Protocol. -""" - class AsyncProcessDest(AbstractProcess): def __init__(self, **kwargs): @@ -92,6 +87,10 @@ def run_spk(self): class TestProcess(unittest.TestCase): + """This test checks if Process with Loihi Protocol works properly with + process with Async Protocol. + """ + def test_async_with_loihi_protocol(self): """ Test is to send the data to AsyncProcessSrc from AsyncProcessSrc via diff --git a/tests/lava/proc/conv/test_utils.py b/tests/lava/proc/conv/test_utils.py index 4542973b5..ac1e8b2f5 100644 --- a/tests/lava/proc/conv/test_utils.py +++ b/tests/lava/proc/conv/test_utils.py @@ -36,7 +36,7 @@ def test_conv(self) -> None: kernel_size[0], kernel_size[1], in_channels // groups] weights = np.random.randint(256, size=weight_dims) - 128 - input = np.random.random( + input_ = np.random.random( ( # input needs to be a certain size # to make sure the output dimension is never negative @@ -45,13 +45,13 @@ def test_conv(self) -> None: + [in_channels] ) - out = utils.conv_scipy(input, weights, kernel_size, + out = utils.conv_scipy(input_, weights, kernel_size, stride, padding, dilation, groups) if compare: # if torch is available, compare against it. out_gt = F.conv2d( torch.unsqueeze( # torch expects a batch dimension NCHW - torch.FloatTensor(input.transpose([2, 1, 0])), + torch.FloatTensor(input_.transpose([2, 1, 0])), dim=0, ), torch.FloatTensor( @@ -70,7 +70,7 @@ def test_conv(self) -> None: error = np.abs(out - out_gt).mean() if error >= 1e-3: # small eps to account for float/double calc # Setting failed! Print out the dimensions for debugging. - print(f'{input.shape=}') + print(f'{input_.shape=}') print(f'{weights.shape=}') print(f'{kernel_size=}') print(f'{stride=}') diff --git a/tests/lava/proc/io/test_dataloader.py b/tests/lava/proc/io/test_dataloader.py index aeb371483..be53492c7 100644 --- a/tests/lava/proc/io/test_dataloader.py +++ b/tests/lava/proc/io/test_dataloader.py @@ -49,19 +49,19 @@ def __init__(self, shape: tuple) -> None: def __len__(self) -> int: return 10 - def __getitem__(self, id: int) -> Tuple[np.ndarray, int]: - data = np.arange(np.prod(self.shape)).reshape(self.shape) + id + def __getitem__(self, id_: int) -> Tuple[np.ndarray, int]: + data = np.arange(np.prod(self.shape)).reshape(self.shape) + id_ data = data % np.prod(self.shape) - label = id + label = id_ return data, label class SpikeDataset(DummyDataset): - def __getitem__(self, id: int) -> Tuple[np.ndarray, int]: - data = np.arange(np.prod(self.shape)).reshape(self.shape[::-1]) + id + def __getitem__(self, id_: int) -> Tuple[np.ndarray, int]: + data = np.arange(np.prod(self.shape)).reshape(self.shape[::-1]) + id_ data = data.transpose(np.arange(len(self.shape))[::-1]) % 13 data = data >= 10 - label = id + label = id_ return data, label diff --git a/tests/lava/proc/io/test_source_sink.py b/tests/lava/proc/io/test_source_sink.py index b800601d6..134706572 100644 --- a/tests/lava/proc/io/test_source_sink.py +++ b/tests/lava/proc/io/test_source_sink.py @@ -38,11 +38,11 @@ def test_source_sink(self) -> None: """Test whatever is being sent form source is received at sink.""" num_steps = 10 shape = (64, 64, 16) - input = np.random.randint(256, size=shape + (num_steps,)) - input -= 128 + input_ = np.random.randint(256, size=shape + (num_steps,)) + input_ -= 128 # input = 0.5 * input - source = SendProcess(data=input) + source = SendProcess(data=input_) sink = ReceiveProcess(shape=shape, buffer=num_steps) source.out_ports.s_out.connect(sink.in_ports.a_in) @@ -53,8 +53,8 @@ def test_source_sink(self) -> None: sink.stop() self.assertTrue( - np.all(output == input), + np.all(output == input_), f'Input and Ouptut do not match.\n' - f'{output[output!=input]=}\n' - f'{input[output!=input] =}\n' + f'{output[output!=input_]=}\n' + f'{input_[output!=input_] =}\n' ) diff --git a/tests/lava/proc/lif/test_models.py b/tests/lava/proc/lif/test_models.py index e5d03126a..5a8bd80e2 100644 --- a/tests/lava/proc/lif/test_models.py +++ b/tests/lava/proc/lif/test_models.py @@ -13,7 +13,7 @@ from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.variable import Var from lava.magma.core.resources import CPU -from lava.magma.core.run_configs import Loihi1SimCfg, Loihi2SimCfg, RunConfig +from lava.magma.core.run_configs import Loihi2SimCfg, RunConfig from lava.magma.core.run_conditions import RunSteps from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.proc.lif.process import LIF, LIFReset, TernaryLIF @@ -244,7 +244,7 @@ def test_float_pm_impulse_dv(self): rcfg = LifRunConfig(select_tag='floating_pt') lif_v = [] # Run 1 timestep at a time and collect state variable u - for j in range(num_steps): + for _ in range(num_steps): lif.run(condition=rcnd, run_cfg=rcfg) lif_v.append(lif.v.get()[0]) lif.stop() @@ -393,7 +393,6 @@ def test_bitacc_pm_scaling_of_bias(self): """ Tests fixed point LIF ProcessModel's scaling of threshold. """ - num_steps = 1 bias_mant = 2 ** 12 - 1 bias_exp = 5 # Set up high threshold and high bias current to check for potential diff --git a/tests/lava/proc/rf/test_models.py b/tests/lava/proc/rf/test_models.py index 50906f419..a89a66a67 100644 --- a/tests/lava/proc/rf/test_models.py +++ b/tests/lava/proc/rf/test_models.py @@ -67,15 +67,15 @@ def run_test( self, period: float, alpha: float, - input: np.ndarray, + input_: np.ndarray, state_exp: int = 0, decay_bits: int = 0, vth: float = 1, tag: str = 'floating_pt', ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: - input = np.int32(input.reshape(1, -1)) - num_steps = input.size - source = io.source.RingBuffer(data=input) + input_ = np.int32(input_.reshape(1, -1)) + num_steps = input_.size + source = io.source.RingBuffer(data=input_) rf = RF(shape=(1,), period=period, alpha=alpha, @@ -102,7 +102,7 @@ def run_test( imag = imag_monitor.get_data()[rf.name]["imag"] rf.stop() - return input, real, imag, s_out + return input_, real, imag, s_out def test_float_no_decay(self): """Verify that a neuron with no voltage decay spikes @@ -112,9 +112,9 @@ def test_float_no_decay(self): alpha = 0 num_steps = 100 - input = np.zeros(num_steps) - input[0] = 1.1 # spike at first timestep - _, _, _, s_out = self.run_test(period, alpha, input) + input_ = np.zeros(num_steps) + input_[0] = 1.1 # spike at first timestep + _, _, _, s_out = self.run_test(period, alpha, input_) # observe differences in spike times spike_idx = np.argwhere(s_out[0, :]) @@ -134,9 +134,9 @@ def test_float_decay(self): vth = 1.1 num_steps = 100 - input = np.zeros(num_steps) - input[0] = 1 # spike at first timestep - _, real, _, _ = self.run_test(period, alpha, input, vth=vth) + input_ = np.zeros(num_steps) + input_[0] = 1 # spike at first timestep + _, real, _, _ = self.run_test(period, alpha, input_, vth=vth) ideal_real = np.round((1 - alpha)**np.arange(num_steps), 6) round_real = np.round(real.flatten(), 6) @@ -155,10 +155,10 @@ def test_fixed_pm_no_decay(self): period = 10 num_steps = 100 - input = np.zeros(num_steps) - input[0] = 1 # spike at first timestep + input_ = np.zeros(num_steps) + input_[0] = 1 # spike at first timestep - _, _, _, s_out = self.run_test(period, alpha, input, vth=vth, + _, _, _, s_out = self.run_test(period, alpha, input_, vth=vth, state_exp=state_exp, decay_bits=decay_bits, tag="fixed_pt") @@ -173,7 +173,7 @@ def test_fixed_pm_no_decay(self): # Run Test RF Dynamics real, imag = rf_dynamics(0, 0, sin_decay, cos_decay, - input * (1 << state_exp), + input_ * (1 << state_exp), np.zeros(num_steps), decay_bits) @@ -197,10 +197,10 @@ def test_fixed_pm_decay1(self): period = 10 num_steps = 100 - input = np.zeros(num_steps) - input[0] = 2 # spike at first timestep + input_ = np.zeros(num_steps) + input_[0] = 2 # spike at first timestep - _, _, _, s_out = self.run_test(period, alpha, input, vth=vth, + _, _, _, s_out = self.run_test(period, alpha, input_, vth=vth, state_exp=state_exp, decay_bits=decay_bits, tag="fixed_pt") @@ -215,7 +215,7 @@ def test_fixed_pm_decay1(self): # Run Test RF Dynamics real, imag = rf_dynamics(0, 0, sin_decay, cos_decay, - input * (1 << state_exp), + input_ * (1 << state_exp), np.zeros(num_steps), decay_bits) @@ -248,12 +248,12 @@ def test_fixed_pm_decay2(self): cos_decay = int(cos_decay * (1 << decay_bits)) num_steps = 100 - input = np.zeros(num_steps) - input[0] = 1 # spike at first timestep + input_ = np.zeros(num_steps) + input_[0] = 1 # spike at first timestep decay_bits = 12 state_exp = 6 - _, real, _, _ = self.run_test(period, alpha, input, vth=vth, + _, real, _, _ = self.run_test(period, alpha, input_, vth=vth, state_exp=state_exp, decay_bits=decay_bits, tag="fixed_pt") diff --git a/tests/lava/proc/rf_iz/test_models.py b/tests/lava/proc/rf_iz/test_models.py index c1c8a56e4..12e5621db 100644 --- a/tests/lava/proc/rf_iz/test_models.py +++ b/tests/lava/proc/rf_iz/test_models.py @@ -20,15 +20,15 @@ def run_test( self, period: float, alpha: float, - input: np.ndarray, + input_: np.ndarray, state_exp: int = 0, decay_bits: int = 0, vth: float = 1, tag: str = 'floating_pt', ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: - input = input.reshape(1, -1) - num_steps = input.size - source = io.source.RingBuffer(data=input) + input_ = input_.reshape(1, -1) + num_steps = input_.size + source = io.source.RingBuffer(data=input_) rf = RF_IZ(shape=(1,), period=period, alpha=alpha, @@ -55,7 +55,7 @@ def run_test( imag = imag_monitor.get_data()[rf.name]["imag"] rf.stop() - return input, real, imag, s_out + return input_, real, imag, s_out def test_float_reset(self): """Ensure that spikes events are followed by proper rf_iz reset @@ -65,9 +65,9 @@ def test_float_reset(self): eps = 1e-5 num_steps = 100 - input = np.zeros(num_steps) - input[[0, 10, 20]] = 1 # Will ensure 3 spikes - _, real, imag, s_out = self.run_test(period, alpha, input) + input_ = np.zeros(num_steps) + input_[[0, 10, 20]] = 1 # Will ensure 3 spikes + _, real, imag, s_out = self.run_test(period, alpha, input_) s_out = s_out.flatten() == 1 # change to bool self.assertGreaterEqual(s_out.sum(), 1) # ensure network is spiking self.assertListEqual(real.flatten()[s_out].tolist(), @@ -84,9 +84,9 @@ def test_fixed_pm_reset(self): eps = 1 # in fixed point 1 is the smallest value we can have state_exp = 6 num_steps = 100 - input = np.zeros(num_steps) - input[[0, 10, 20]] = 1 # Will ensure 3 spikes - _, real, imag, s_out = self.run_test(period, alpha, input, + input_ = np.zeros(num_steps) + input_[[0, 10, 20]] = 1 # Will ensure 3 spikes + _, real, imag, s_out = self.run_test(period, alpha, input_, tag="fixed_pt", state_exp=state_exp, decay_bits=12) diff --git a/tests/lava/proc/sdn/test_models.py b/tests/lava/proc/sdn/test_models.py index 18ff73b56..76572c531 100644 --- a/tests/lava/proc/sdn/test_models.py +++ b/tests/lava/proc/sdn/test_models.py @@ -24,13 +24,13 @@ def run_test( num_steps: int, tag: str = 'fixed_pt' ) -> Tuple[np.ndarray, np.ndarray]: - input = np.sin(0.1 * np.arange(num_steps).reshape(1, -1)) + input_ = np.sin(0.1 * np.arange(num_steps).reshape(1, -1)) if tag == 'fixed_pt': - input *= (1 << 12) - input = input.astype(int) - input[:, 1:] -= input[:, :-1] + input_ *= (1 << 12) + input_ = input_.astype(int) + input_[:, 1:] -= input_[:, :-1] - source = io.source.RingBuffer(data=input) + source = io.source.RingBuffer(data=input_) sigma = Sigma(shape=(1,)) sink = io.sink.RingBuffer(shape=sigma.shape, buffer=num_steps) @@ -44,18 +44,18 @@ def run_test( output = sink.data.get() sigma.stop() - return input, output + return input_, output def test_sigma_decoding_fixed(self) -> None: """Test sigma decoding with cumulative sum.""" num_steps = 100 - input, output = self.run_test( + input_, output = self.run_test( num_steps=num_steps, tag='fixed_pt' ) - error = np.abs(np.cumsum(input, axis=1) - output).max() + error = np.abs(np.cumsum(input_, axis=1) - output).max() if verbose: print(f'Max abs error = {error}') @@ -65,12 +65,12 @@ def test_sigma_decoding_float(self) -> None: """Test sigma decoding with cumulative sum.""" num_steps = 100 - input, output = self.run_test( + input_, output = self.run_test( num_steps=num_steps, tag='floating_pt' ) - error = np.abs(np.cumsum(input, axis=1) - output).max() + error = np.abs(np.cumsum(input_, axis=1) - output).max() if verbose: print(f'Max abs error = {error}') @@ -90,11 +90,11 @@ def run_test( cum_error: bool, tag: str = 'fixed_pt', ) -> Tuple[np.ndarray, np.ndarray]: - input = np.sin(0.1 * np.arange(num_steps).reshape(1, -1)) - input *= (1 << spike_exp + state_exp) - input[:, 1:] -= input[:, :-1] + input_ = np.sin(0.1 * np.arange(num_steps).reshape(1, -1)) + input_ *= (1 << spike_exp + state_exp) + input_[:, 1:] -= input_[:, :-1] - source = io.source.RingBuffer(data=input.astype(int) * (1 << 6)) + source = io.source.RingBuffer(data=input_.astype(int) * (1 << 6)) sdn = SigmaDelta( shape=(1,), vth=vth, @@ -115,10 +115,10 @@ def run_test( output = sink.data.get() sdn.stop() - input = np.cumsum(input, axis=1) + input_ = np.cumsum(input_, axis=1) output = np.cumsum(output, axis=1) - return input, output + return input_, output def test_reconstruction_fixed(self) -> None: """Tests fixed point sigma delta reconstruction. The max absolute @@ -128,7 +128,7 @@ def test_reconstruction_fixed(self) -> None: spike_exp = 6 state_exp = 6 vth = 10 << (spike_exp + state_exp) - input, output = self.run_test( + input_, output = self.run_test( num_steps=num_steps, vth=vth, act_mode=ActivationMode.UNIT, @@ -137,7 +137,7 @@ def test_reconstruction_fixed(self) -> None: cum_error=False, ) - error = np.abs(input - output).max() + error = np.abs(input_ - output).max() if verbose: print(f'Max abs error = {error}') @@ -151,7 +151,7 @@ def test_reconstruction_float(self) -> None: spike_exp = 0 state_exp = 0 vth = 10 - input, output = self.run_test( + input_, output = self.run_test( num_steps=num_steps, vth=vth, act_mode=ActivationMode.UNIT, @@ -161,7 +161,7 @@ def test_reconstruction_float(self) -> None: tag='floating_pt' ) - error = np.abs(input - output).max() + error = np.abs(input_ - output).max() if verbose: print(f'Max abs error = {error}') @@ -175,7 +175,7 @@ def test_reconstruction_cum_error_fixed(self) -> None: spike_exp = 6 state_exp = 6 vth = 10 << (spike_exp + state_exp) - input, output = self.run_test( + input_, output = self.run_test( num_steps=num_steps, vth=vth, act_mode=ActivationMode.UNIT, @@ -184,7 +184,7 @@ def test_reconstruction_cum_error_fixed(self) -> None: cum_error=True, ) - error = np.abs(input - output).max() + error = np.abs(input_ - output).max() if verbose: print(f'Max abs error = {error}') @@ -198,7 +198,7 @@ def test_reconstruction_cum_error_float(self) -> None: spike_exp = 0 state_exp = 0 vth = 10 - input, output = self.run_test( + input_, output = self.run_test( num_steps=num_steps, vth=vth, act_mode=ActivationMode.UNIT, @@ -208,7 +208,7 @@ def test_reconstruction_cum_error_float(self) -> None: tag='floating_pt' ) - error = np.abs(input - output).max() + error = np.abs(input_ - output).max() if verbose: print(f'Max abs error = {error}') @@ -222,7 +222,7 @@ def test_reconstruction_relu_fixed(self) -> None: spike_exp = 0 state_exp = 0 vth = 10 << (spike_exp + state_exp) - input, output = self.run_test( + input_, output = self.run_test( num_steps=num_steps, vth=vth, act_mode=ActivationMode.RELU, @@ -231,7 +231,7 @@ def test_reconstruction_relu_fixed(self) -> None: cum_error=False, ) - error = np.abs(np.maximum(input, 0) - output).max() + error = np.abs(np.maximum(input_, 0) - output).max() if verbose: print(f'Max abs error = {error}') @@ -245,7 +245,7 @@ def test_reconstruction_relu_float(self) -> None: vth = 10 spike_exp = 0 state_exp = 0 - input, output = self.run_test( + input_, output = self.run_test( num_steps=num_steps, vth=vth, act_mode=ActivationMode.RELU, @@ -255,7 +255,7 @@ def test_reconstruction_relu_float(self) -> None: tag='floating_pt', ) - error = np.abs(np.maximum(input, 0) - output).max() + error = np.abs(np.maximum(input_, 0) - output).max() if verbose: print(f'Max abs error = {error}') diff --git a/tests/lava/proc/sparse/test_models.py b/tests/lava/proc/sparse/test_models.py index fa803a8f3..3ffb181a0 100644 --- a/tests/lava/proc/sparse/test_models.py +++ b/tests/lava/proc/sparse/test_models.py @@ -3,21 +3,19 @@ # See: https://spdx.org/licenses/ import unittest +import numpy as np from lava.magma.core.learning.learning_rule import Loihi2FLearningRule from scipy.sparse import csr_matrix -from lava.proc.sparse.process import Sparse -from lava.proc.dense.process import Dense, LearningDense +from lava.proc.dense.process import LearningDense from lava.proc.sparse.process import Sparse, DelaySparse, LearningSparse from lava.proc.learning_rules.stdp_learning_rule import STDPLoihi from lava.proc.dense.process import Dense -from lava.proc.sparse.process import Sparse, DelaySparse from lava.proc.sparse.models import AbstractPyDelaySparseModel as APDSM from lava.proc.io.source import RingBuffer as Source from lava.proc.io.sink import RingBuffer as Sink -import unittest + from lava.magma.core.run_configs import Loihi2SimCfg -import numpy as np from lava.magma.core.decorator import implements, requires, tag from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.model.py.ports import PyOutPort, PyInPort @@ -30,7 +28,6 @@ from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.proc.dense.models import AbstractPyDelayDenseModel from lava.utils.weightutils import SignMode -from tests.lava.magma.core import learning def create_network(input_data, conn, weights): @@ -158,7 +155,7 @@ def test_weights_get(self): run_cfg = Loihi2SimCfg(select_tag='floating_pt') conn = Sparse(weights=weights_sparse) - sparse_net = create_network(inp, conn, weights_sparse) + create_network(inp, conn, weights_sparse) conn.run(condition=run_cond, run_cfg=run_cfg) weights_got = conn.weights.get() @@ -187,7 +184,7 @@ def test_weights_set(self): run_cfg = Loihi2SimCfg(select_tag='floating_pt') conn = Sparse(weights=weights_init_sparse) - sparse_net = create_network(inp, conn, weights_init_sparse) + create_network(inp, conn, weights_init_sparse) conn.run(condition=run_cond, run_cfg=run_cfg) new_weights_sparse = conn.weights.init.copy() @@ -404,7 +401,7 @@ def test_consistency_with_learning_dense_random_shape(self): tag_1=weights.copy(), tag_2=weights.copy(), learning_rule=learning_rule) - dense_net = create_learning_network(pre, conn, post) + create_learning_network(pre, conn, post) run_cond = RunSteps(num_steps=simtime) run_cfg = Loihi2SimCfg(select_tag='floating_pt') @@ -422,7 +419,7 @@ def test_consistency_with_learning_dense_random_shape(self): tag_1=weights_sparse.copy(), tag_2=weights_sparse.copy(), learning_rule=learning_rule) - sparse_net = create_learning_network(pre, conn, post) + create_learning_network(pre, conn, post) conn.run(condition=run_cond, run_cfg=run_cfg) weights_got_sparse = conn.weights.get() diff --git a/tests/lava/proc/sparse/test_process.py b/tests/lava/proc/sparse/test_process.py index 1cddbfdff..e26c22ae8 100644 --- a/tests/lava/proc/sparse/test_process.py +++ b/tests/lava/proc/sparse/test_process.py @@ -20,7 +20,7 @@ def test_find_with_explicit_zeros(self): spmat = csr_matrix(mat) spmat.data[0] = 0 - dst, src, vals = find(spmat, explicit_zeros=True) + _, _, vals = find(spmat, explicit_zeros=True) self.assertTrue(np.all(spmat.data in vals)) diff --git a/tests/lava/proc/spiker/test_models.py b/tests/lava/proc/spiker/test_models.py index 147dbf269..9d92bc328 100644 --- a/tests/lava/proc/spiker/test_models.py +++ b/tests/lava/proc/spiker/test_models.py @@ -17,7 +17,7 @@ def test_single_spiker_counter(self): "Tests a single spiker for multiple time steps." spiker = Spiker(shape=(1,), period=5) counter = [] - for timestep in range(20): + for _ in range(20): spiker.run(condition=RunSteps(num_steps=1), run_cfg=Loihi2SimCfg()) counter.append(spiker.counter.get()[0]) @@ -31,7 +31,7 @@ def test_multiple_spikers_counter(self): spiker = Spiker(shape=(2,), period=5) counter1 = [] counter2 = [] - for timestep in range(20): + for _ in range(20): spiker.run(condition=RunSteps(num_steps=1), run_cfg=Loihi2SimCfg()) counter1.append(spiker.counter.get()[0]) diff --git a/tests/lava/test_utils/utils.py b/tests/lava/test_utils/utils.py index e1ec81397..27eb9f11d 100644 --- a/tests/lava/test_utils/utils.py +++ b/tests/lava/test_utils/utils.py @@ -3,13 +3,13 @@ # See: https://spdx.org/licenses/ import os -import subprocess +import subprocess # noqa S404 import importlib class Utils: - """Utility Class containing testing helper - code that can be reused between tests + """Utility Class containing testing helper code that can be reused + between tests. """ @staticmethod @@ -26,7 +26,7 @@ def get_bool_env_setting(env_var: str): @staticmethod def is_loihi2_available() -> bool: - """"Checks if Loihi2 is available and can be accessed.""" + """Checks if Loihi 2 is available and can be accessed.""" is_loihi2 = False is_slurm = False @@ -37,7 +37,7 @@ def is_loihi2_available() -> bool: is_slurm = True # Check if Loihi2 is available - sinfo = subprocess.run("sinfo", # noqa: S603, S607 + sinfo = subprocess.run("sinfo", # nosec # noqa: S603, S607 stdout=subprocess.PIPE).stdout.decode( 'utf-8') for line in sinfo.split("\n"): diff --git a/tests/lava/tutorials/test_tutorials.py b/tests/lava/tutorials/test_tutorials.py index 6b33e8594..2b64b8948 100644 --- a/tests/lava/tutorials/test_tutorials.py +++ b/tests/lava/tutorials/test_tutorials.py @@ -5,7 +5,7 @@ import glob import os import platform -import subprocess # noqa: S404 +import subprocess # noqa S404 import sys import tempfile import typing as ty @@ -118,7 +118,7 @@ def _convert_and_execute_notebook( fout.name, notebook, ] - subprocess.check_call(args, env=env) # noqa: S603 + subprocess.check_call(args, env=env) # nosec # noqa: S603 fout.seek(0) return nbformat.read(fout, nbformat.current_nbformat) diff --git a/tests/lava/utils/test_plots.py b/tests/lava/utils/test_plots.py index 04765af1f..4219e5297 100644 --- a/tests/lava/utils/test_plots.py +++ b/tests/lava/utils/test_plots.py @@ -30,7 +30,7 @@ def test_bad_spikes_shape(self) -> None: with self.assertRaises(ValueError) as cm: raster_plot(spikes) - self.assertEquals( + self.assertEqual( str(cm.exception), "Parameter must have exactly two dimensions and " "they must be non-empty.", @@ -43,19 +43,19 @@ def test_non_binary_values(self) -> None: with self.assertRaises(ValueError) as cm: raster_plot(spikes) - self.assertEquals(str(cm.exception), error_msg) + self.assertEqual(str(cm.exception), error_msg) spikes = np.array([[0, -1], [0, 0]]) with self.assertRaises(ValueError) as cm: raster_plot(spikes) - self.assertEquals(str(cm.exception), error_msg) + self.assertEqual(str(cm.exception), error_msg) def test_bad_stride(self) -> None: with self.assertRaises(ValueError) as cm: raster_plot(self.spikes, stride=11) - self.assertEquals( + self.assertEqual( str(cm.exception), "Stride must not be greater than the number of neurons.", ) @@ -64,7 +64,7 @@ def test_both_fig_and_figsize_provided(self) -> None: with self.assertRaises(ValueError) as cm: raster_plot(self.spikes, fig=plt.figure(), figsize=(10, 10)) - self.assertEquals( + self.assertEqual( str(cm.exception), "Must use at most one of the following: fig, figsize.", ) diff --git a/tutorials/in_depth/three_factor_learning/utils.py b/tutorials/in_depth/three_factor_learning/utils.py index 230cadfc6..7320fe94d 100644 --- a/tutorials/in_depth/three_factor_learning/utils.py +++ b/tutorials/in_depth/three_factor_learning/utils.py @@ -3,21 +3,14 @@ # See: https://spdx.org/licenses/ import matplotlib.pyplot as plt -import typing as ty import numpy as np -from lava.proc.lif.process import LIF, AbstractLIF, LogConfig, LearningLIF -from lava.proc.io.source import RingBuffer -from lava.proc.dense.process import LearningDense, Dense -from lava.magma.core.process.neuron import LearningNeuronProcess -from lava.proc.learning_rules.r_stdp_learning_rule import RewardModulatedSTDP -from lava.magma.core.process.variable import Var -from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.proc.lif.process import LearningLIF from lava.magma.core.model.py.neuron import ( LearningNeuronModelFloat, LearningNeuronModelFixed ) from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.ports import PyOutPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires, tag @@ -188,8 +181,7 @@ def run_spk(self) -> None: self.s_out_y3.send(self.y3) -def generate_post_spikes(pre_spike_times, - num_steps, spike_prob_post): +def generate_post_spikes(pre_spike_times, num_steps, spike_prob_post): """generates specific post synaptic spikes to demonstrate potentiation and depression. """ @@ -211,16 +203,17 @@ def generate_post_spikes(pre_spike_times, return spike_raster_post + def plot_spikes(spikes, figsize, legend, colors, title, num_steps): offsets = list(range(1, len(spikes) + 1)) num_x_ticks = np.arange(0, num_steps+1, 25) plt.figure(figsize=figsize) - spikes_plot = plt.eventplot(positions=spikes, - lineoffsets=offsets, - linelength=0.9, - colors=colors) + plt.eventplot(positions=spikes, + lineoffsets=offsets, + linelength=0.9, + colors=colors) plt.title(title) plt.xlabel("Time steps") @@ -233,9 +226,9 @@ def plot_spikes(spikes, figsize, legend, colors, title, num_steps): plt.yticks(ticks=offsets, labels=legend) - plt.show() + def plot_time_series(time, time_series, ylabel, title, figsize, color): plt.figure(figsize=figsize) plt.step(time, time_series, color=color) @@ -250,7 +243,10 @@ def plot_time_series(time, time_series, ylabel, title, figsize, color): plt.show() -def plot_time_series_subplots(time, time_series_y1, time_series_y2, ylabel, title, figsize, color, legend, leg_loc="upper left"): + +def plot_time_series_subplots(time, time_series_y1, time_series_y2, ylabel, + title, figsize, color, legend, + leg_loc="upper left"): plt.figure(figsize=figsize) plt.step(time, time_series_y1, label=legend[0], color=color[0]) @@ -268,7 +264,9 @@ def plot_time_series_subplots(time, time_series_y1, time_series_y2, ylabel, titl plt.show() -def plot_spikes_time_series(time, time_series, spikes, figsize, legend, colors, title, num_steps): + +def plot_spikes_time_series(time, time_series, spikes, figsize, legend, + colors, title, num_steps): offsets = list(range(1, len(spikes) + 1)) num_x_ticks = np.arange(0, num_steps+1, 25)