From 141e732c74bd4575c115fdaecd5621aac90f3c33 Mon Sep 17 00:00:00 2001 From: bamsumit Date: Tue, 8 Mar 2022 10:23:14 -0800 Subject: [PATCH 01/14] update refport unittest to always wait when it writes to port for consistent behavior Signed-off-by: bamsumit --- poetry.lock | 42 +++++++++---------- pyproject.toml | 4 +- .../lava/magma/runtime/test_ref_var_ports.py | 3 ++ 3 files changed, 26 insertions(+), 23 deletions(-) diff --git a/poetry.lock b/poetry.lock index b1c390b6f..130f4162a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -666,7 +666,7 @@ python-versions = "*" [[package]] name = "numpy" -version = "1.22.2" +version = "1.22.3" description = "NumPy is the fundamental package for array computing with Python." category = "main" optional = false @@ -1311,7 +1311,7 @@ testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest- [metadata] lock-version = "1.1" python-versions = ">=3.8, <3.11" -content-hash = "17e10f1725cfdae4b4a2e8da85965da95263c227a876fb1afc363ed447d49583" +content-hash = "e5bd66852734a32a6fdc9358cf6f54452bffb6d31f4249c1618abdbc47625e39" [metadata.files] alabaster = [ @@ -1791,25 +1791,25 @@ msgpack = [ {file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"}, ] numpy = [ - {file = "numpy-1.22.2-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:515a8b6edbb904594685da6e176ac9fbea8f73a5ebae947281de6613e27f1956"}, - {file = "numpy-1.22.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76a4f9bce0278becc2da7da3b8ef854bed41a991f4226911a24a9711baad672c"}, - {file = "numpy-1.22.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:168259b1b184aa83a514f307352c25c56af111c269ffc109d9704e81f72e764b"}, - {file = "numpy-1.22.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3556c5550de40027d3121ebbb170f61bbe19eb639c7ad0c7b482cd9b560cd23b"}, - {file = "numpy-1.22.2-cp310-cp310-win_amd64.whl", hash = "sha256:aafa46b5a39a27aca566198d3312fb3bde95ce9677085efd02c86f7ef6be4ec7"}, - {file = "numpy-1.22.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:55535c7c2f61e2b2fc817c5cbe1af7cb907c7f011e46ae0a52caa4be1f19afe2"}, - {file = "numpy-1.22.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:60cb8e5933193a3cc2912ee29ca331e9c15b2da034f76159b7abc520b3d1233a"}, - {file = "numpy-1.22.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b536b6840e84c1c6a410f3a5aa727821e6108f3454d81a5cd5900999ef04f89"}, - {file = "numpy-1.22.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2638389562bda1635b564490d76713695ff497242a83d9b684d27bb4a6cc9d7a"}, - {file = "numpy-1.22.2-cp38-cp38-win32.whl", hash = "sha256:6767ad399e9327bfdbaa40871be4254d1995f4a3ca3806127f10cec778bd9896"}, - {file = "numpy-1.22.2-cp38-cp38-win_amd64.whl", hash = "sha256:03ae5850619abb34a879d5f2d4bb4dcd025d6d8fb72f5e461dae84edccfe129f"}, - {file = "numpy-1.22.2-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:d76a26c5118c4d96e264acc9e3242d72e1a2b92e739807b3b69d8d47684b6677"}, - {file = "numpy-1.22.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:15efb7b93806d438e3bc590ca8ef2f953b0ce4f86f337ef4559d31ec6cf9d7dd"}, - {file = "numpy-1.22.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:badca914580eb46385e7f7e4e426fea6de0a37b9e06bec252e481ae7ec287082"}, - {file = "numpy-1.22.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94dd11d9f13ea1be17bac39c1942f527cbf7065f94953cf62dfe805653da2f8f"}, - {file = "numpy-1.22.2-cp39-cp39-win32.whl", hash = "sha256:8cf33634b60c9cef346663a222d9841d3bbbc0a2f00221d6bcfd0d993d5543f6"}, - {file = "numpy-1.22.2-cp39-cp39-win_amd64.whl", hash = "sha256:59153979d60f5bfe9e4c00e401e24dfe0469ef8da6d68247439d3278f30a180f"}, - {file = "numpy-1.22.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a176959b6e7e00b5a0d6f549a479f869829bfd8150282c590deee6d099bbb6e"}, - {file = "numpy-1.22.2.zip", hash = "sha256:076aee5a3763d41da6bef9565fdf3cb987606f567cd8b104aded2b38b7b47abf"}, + {file = "numpy-1.22.3-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:92bfa69cfbdf7dfc3040978ad09a48091143cffb778ec3b03fa170c494118d75"}, + {file = "numpy-1.22.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8251ed96f38b47b4295b1ae51631de7ffa8260b5b087808ef09a39a9d66c97ab"}, + {file = "numpy-1.22.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48a3aecd3b997bf452a2dedb11f4e79bc5bfd21a1d4cc760e703c31d57c84b3e"}, + {file = "numpy-1.22.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3bae1a2ed00e90b3ba5f7bd0a7c7999b55d609e0c54ceb2b076a25e345fa9f4"}, + {file = "numpy-1.22.3-cp310-cp310-win_amd64.whl", hash = "sha256:08d9b008d0156c70dc392bb3ab3abb6e7a711383c3247b410b39962263576cd4"}, + {file = "numpy-1.22.3-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:201b4d0552831f7250a08d3b38de0d989d6f6e4658b709a02a73c524ccc6ffce"}, + {file = "numpy-1.22.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f8c1f39caad2c896bc0018f699882b345b2a63708008be29b1f355ebf6f933fe"}, + {file = "numpy-1.22.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:568dfd16224abddafb1cbcce2ff14f522abe037268514dd7e42c6776a1c3f8e5"}, + {file = "numpy-1.22.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ca688e1b9b95d80250bca34b11a05e389b1420d00e87a0d12dc45f131f704a1"}, + {file = "numpy-1.22.3-cp38-cp38-win32.whl", hash = "sha256:e7927a589df200c5e23c57970bafbd0cd322459aa7b1ff73b7c2e84d6e3eae62"}, + {file = "numpy-1.22.3-cp38-cp38-win_amd64.whl", hash = "sha256:07a8c89a04997625236c5ecb7afe35a02af3896c8aa01890a849913a2309c676"}, + {file = "numpy-1.22.3-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:2c10a93606e0b4b95c9b04b77dc349b398fdfbda382d2a39ba5a822f669a0123"}, + {file = "numpy-1.22.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fade0d4f4d292b6f39951b6836d7a3c7ef5b2347f3c420cd9820a1d90d794802"}, + {file = "numpy-1.22.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bfb1bb598e8229c2d5d48db1860bcf4311337864ea3efdbe1171fb0c5da515d"}, + {file = "numpy-1.22.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97098b95aa4e418529099c26558eeb8486e66bd1e53a6b606d684d0c3616b168"}, + {file = "numpy-1.22.3-cp39-cp39-win32.whl", hash = "sha256:fdf3c08bce27132395d3c3ba1503cac12e17282358cb4bddc25cc46b0aca07aa"}, + {file = "numpy-1.22.3-cp39-cp39-win_amd64.whl", hash = "sha256:639b54cdf6aa4f82fe37ebf70401bbb74b8508fddcf4797f9fe59615b8c5813a"}, + {file = "numpy-1.22.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c34ea7e9d13a70bf2ab64a2532fe149a9aced424cd05a2c4ba662fd989e3e45f"}, + {file = "numpy-1.22.3.zip", hash = "sha256:dbc7601a3b7472d559dc7b933b18b4b66f9aa7452c120e87dfb33d02008c8a18"}, ] packaging = [ {file = "packaging-20.9-py2.py3-none-any.whl", hash = "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"}, diff --git a/pyproject.toml b/pyproject.toml index 0695e8144..02cb3996c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ unittest2 = "^1.1.0" [tool.poetry.dev-dependencies] bandit = "1.7.2" -coverage = { extras = ["toml"], version = "^6.3.2" } +coverage = "^6.3.2" darglint = "^1.8.1" flake8 = "^4.0.1" flake8-bandit = "^2.1.2" @@ -70,7 +70,7 @@ flakeheaven = "^0.11.1" pep8-naming = "^0.11.1" poetry = "^1.1.13" pytest = "^7.0.1" -pytest-cov = { extras = ["toml"], version = "^3.0.0" } +pytest-cov = "^3.0.0" sphinx = { extras = ["toml"], version = "^4.4.0" } sphinx-tabs = { extras = ["toml"], version = "^3.2.0" } sphinx_rtd_theme = { extras = ["toml"], version = "^1.0.0" } diff --git a/tests/lava/magma/runtime/test_ref_var_ports.py b/tests/lava/magma/runtime/test_ref_var_ports.py index 68bb8fce5..338bb29ec 100644 --- a/tests/lava/magma/runtime/test_ref_var_ports.py +++ b/tests/lava/magma/runtime/test_ref_var_ports.py @@ -96,6 +96,9 @@ def run_post_mgmt(self): ref_data = np.array([5, 5, 5]) + self.time_step self.ref1.write(ref_data) self.ref3.write(ref_data[:2]) + # ensure write() has finished before moving on + self.ref1.wait() + self.ref3.wait() # A minimal PyProcModel implementing P2 From 7097dd8aeb5942ce570c187119cdf95be5d1ec6a Mon Sep 17 00:00:00 2001 From: bamsumit Date: Tue, 8 Mar 2022 11:45:18 -0800 Subject: [PATCH 02/14] Removed pyproject changes Signed-off-by: bamsumit --- poetry.lock | 2 +- pyproject.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 130f4162a..5be154e19 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1311,7 +1311,7 @@ testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest- [metadata] lock-version = "1.1" python-versions = ">=3.8, <3.11" -content-hash = "e5bd66852734a32a6fdc9358cf6f54452bffb6d31f4249c1618abdbc47625e39" +content-hash = "17e10f1725cfdae4b4a2e8da85965da95263c227a876fb1afc363ed447d49583" [metadata.files] alabaster = [ diff --git a/pyproject.toml b/pyproject.toml index 02cb3996c..0695e8144 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ unittest2 = "^1.1.0" [tool.poetry.dev-dependencies] bandit = "1.7.2" -coverage = "^6.3.2" +coverage = { extras = ["toml"], version = "^6.3.2" } darglint = "^1.8.1" flake8 = "^4.0.1" flake8-bandit = "^2.1.2" @@ -70,7 +70,7 @@ flakeheaven = "^0.11.1" pep8-naming = "^0.11.1" poetry = "^1.1.13" pytest = "^7.0.1" -pytest-cov = "^3.0.0" +pytest-cov = { extras = ["toml"], version = "^3.0.0" } sphinx = { extras = ["toml"], version = "^4.4.0" } sphinx-tabs = { extras = ["toml"], version = "^3.2.0" } sphinx_rtd_theme = { extras = ["toml"], version = "^1.0.0" } From 1fb7353d788b1b14e96ea4cf4212d5c22d485973 Mon Sep 17 00:00:00 2001 From: bamsumit Date: Fri, 20 May 2022 12:16:35 -0700 Subject: [PATCH 03/14] Fix to convolution tests. Fixed imcompatible mnist_pretrained for old python versions. Signed-off-by: bamsumit --- tests/lava/proc/conv/test_models.py | 10 ++++------ tutorials/end_to_end/mnist_pretrained.npy | 4 ++-- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/tests/lava/proc/conv/test_models.py b/tests/lava/proc/conv/test_models.py index 0d6fe7291..a55008a55 100644 --- a/tests/lava/proc/conv/test_models.py +++ b/tests/lava/proc/conv/test_models.py @@ -92,7 +92,6 @@ def setup_conv() -> Tuple[ class TestConvProcessModels(unittest.TestCase): """Tests for all ProcessModels of Conv""" - @unittest.skip def test_conv_float(self) -> None: """Test for float conv process.""" num_steps = 10 @@ -117,8 +116,8 @@ def test_conv_float(self) -> None: utils.TORCH_IS_AVAILABLE = TORCH_IS_AVAILABLE output_gt = np.zeros_like(output) - for t in range(output.shape[-1]): - output_gt[..., t] = utils.conv(input[..., t], **params) + for t in range(output.shape[-1] - 1): + output_gt[..., t + 1] = utils.conv(input[..., t], **params) error = np.abs(output - output_gt).mean() @@ -139,7 +138,6 @@ def test_conv_float(self) -> None: f'{output_gt[output!=output_gt]=}\n' ) - @unittest.skip def test_conv_fixed(self) -> None: """Test for fixed point conv process.""" num_steps = 10 @@ -164,8 +162,8 @@ def test_conv_fixed(self) -> None: utils.TORCH_IS_AVAILABLE = TORCH_IS_AVAILABLE output_gt = np.zeros_like(output) - for t in range(output.shape[-1]): - output_gt[..., t] = utils.conv(input[..., t], **params) + for t in range(output.shape[-1] - 1): + output_gt[..., t + 1] = utils.conv(input[..., t], **params) output_gt = utils.signed_clamp(output_gt, bits=24) error = np.abs(output - output_gt).mean() diff --git a/tutorials/end_to_end/mnist_pretrained.npy b/tutorials/end_to_end/mnist_pretrained.npy index 97d4d8e42..99ec90bec 100644 --- a/tutorials/end_to_end/mnist_pretrained.npy +++ b/tutorials/end_to_end/mnist_pretrained.npy @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a55246798c24fb8122ef7be5bd3b8fbb1222a121d9db502d382c3d5590203555 -size 220771 +oid sha256:94f32a3ae7f8dd278cc8933b214642f246ffd859a129d19130ac88208f35c9d6 +size 220767 From 68c817cbe38b2f829de4592db9f11be3a0d556e5 Mon Sep 17 00:00:00 2001 From: bamsumit Date: Thu, 28 Jul 2022 09:44:48 -0700 Subject: [PATCH 04/14] Missing moudle parent fix Signed-off-by: bamsumit --- src/lava/magma/compiler/compiler_graphs.py | 31 +++++++++++----------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/src/lava/magma/compiler/compiler_graphs.py b/src/lava/magma/compiler/compiler_graphs.py index b25e4736d..9197fdb8a 100644 --- a/src/lava/magma/compiler/compiler_graphs.py +++ b/src/lava/magma/compiler/compiler_graphs.py @@ -811,23 +811,24 @@ def _find_proc_models(proc: AbstractProcess) \ if not proc_module.__name__ == "__main__": # Get the parent module. module_spec = importlib.util.find_spec(proc_module.__name__) - parent_module = importlib.import_module(module_spec.parent) - - # Get all the modules inside the parent (namespace) module. This - # is required here, because the namespace module can span multiple - # repositories. - namespace_module_infos = list( - pkgutil.iter_modules( - parent_module.__path__, - parent_module.__name__ + "." + if module_spec.parent: + parent_module = importlib.import_module(module_spec.parent) + + # Get all the modules inside the parent (namespace) module. + # This is required here, because the namespace module can span + # multiple repositories. + namespace_module_infos = list( + pkgutil.iter_modules( + parent_module.__path__, + parent_module.__name__ + "." + ) ) - ) - # Extract the directory name of each module. - for _, name, _ in namespace_module_infos: - module = importlib.import_module(name) - module_dir_name = os.path.dirname(inspect.getfile(module)) - dir_names.append(module_dir_name) + # Extract the directory name of each module. + for _, name, _ in namespace_module_infos: + module = importlib.import_module(name) + module_dir_name = os.path.dirname(inspect.getfile(module)) + dir_names.append(module_dir_name) # Go through all directories and extract all the ProcModels. for dir_name in dir_names: From fd2163cb443794b54e01ac5eab92d1e2d620cc53 Mon Sep 17 00:00:00 2001 From: Joyesh Mishra Date: Fri, 26 Aug 2022 15:09:39 -0700 Subject: [PATCH 05/14] Added ConvVarModel --- src/lava/magma/compiler/utils.py | 11 ++++- src/lava/magma/compiler/var_model.py | 63 ++++++++++++++++++++++++++-- 2 files changed, 69 insertions(+), 5 deletions(-) diff --git a/src/lava/magma/compiler/utils.py b/src/lava/magma/compiler/utils.py index 10014906a..b3c813cac 100644 --- a/src/lava/magma/compiler/utils.py +++ b/src/lava/magma/compiler/utils.py @@ -6,7 +6,7 @@ from lava.magma.compiler.mappable_interface import Mappable from lava.magma.compiler.subcompilers.address import NcLogicalAddress, \ NcVirtualAddress -from lava.magma.compiler.var_model import LoihiVarModel +from lava.magma.compiler.var_model import LoihiVarModel, ConvInVarModel from lava.magma.core.model.spike_type import SpikeType @@ -58,6 +58,10 @@ def get_logical(self) -> ty.List[NcLogicalAddress]: ------- Returns logical address of the port initializer. """ + # TODO: Need to clean this + if isinstance(self.var_model, ConvInVarModel): + return self.var_model.get_logical() + return [NcLogicalAddress(chip_id=addr.logical_chip_id, core_id=addr.logical_core_id) for addr in self.var_model.address] @@ -73,6 +77,11 @@ def set_virtual(self, addrs: ty.List[NcVirtualAddress]): ------- """ + # TODO: Need to clean this + if isinstance(self.var_model, ConvInVarModel): + self.var_model.set_virtual(addrs) + return + if len(addrs) != len(self.var_model.address): raise ValueError("Length of list of address provided doesn't " "match size of the address list of the port " diff --git a/src/lava/magma/compiler/var_model.py b/src/lava/magma/compiler/var_model.py index 9393fe045..a0a6d4b44 100644 --- a/src/lava/magma/compiler/var_model.py +++ b/src/lava/magma/compiler/var_model.py @@ -15,17 +15,20 @@ pass from lava.magma.core.process.variable import Var +ChipIdx: int +CoreIdx: int + @dataclass class LoihiAddress: # physical chip id of the var - physical_chip_id: int + physical_chip_id: ChipIdx # physical core id of the nc var or lmt_id of the spike counter - physical_core_id: int + physical_core_id: CoreIdx # logical chip id used in compilation, before mapping to hardware addresses - logical_chip_id: int + logical_chip_id: ChipIdx # logical core id used in compilation, before mapping to hardware addresses - logical_core_id: int + logical_core_id: CoreIdx # logical address/index of the var; used with nodesets for get/set logical_idx_addr: int # length of the contiguous addresses of var on core_id on chip_id @@ -122,3 +125,55 @@ class CVarModel(LoihiVarModel): @dataclass class NcVarModel(LoihiVarModel): pass + + +@dataclass +class Region: + x_min: int + x_max: int + y_min: int + y_max: int + logical_chip_idx: ChipIdx + logical_core_idx: CoreIdx + physical_chip_idx: ChipIdx = None + physical_core_idx: CoreIdx = None + + +@dataclass +class ConvInVarModel(AbstractVarModel, Mappable): + x_dim: int = 0 + y_dim: int = 0 + f_dim: int = 0 + x_split: int = 0 + f_split: int = 0 + regions: ty.List[Region] = None + + def get_logical(self) -> ty.List[NcLogicalAddress]: + """ + + Returns + ------- + Returns logical address of the port initializer. + """ + return [NcLogicalAddress(chip_id=region.logical_chip_idx, + core_id=region.logical_core_idx) for region in + self.regions] + + def set_virtual(self, addrs: ty.List[NcVirtualAddress]): + """ + Sets physical address of the port initializer + Parameters + ---------- + addrs: List of address + + Returns + ------- + + """ + if len(addrs) != len(self.regions): + raise ValueError("Length of list of address provided doesn't " + "match size of the regions list of the port " + "initializer.") + for idx, addr in enumerate(addrs): + self.regions[idx].physical_chip_idx = addr.chip_id + self.regions[idx].physical_core_idx = addr.core_id From c35d0b5c6df60a52e77005539ddeb1a6ceb32c87 Mon Sep 17 00:00:00 2001 From: yashward Date: Mon, 29 Aug 2022 12:05:32 -0700 Subject: [PATCH 06/14] Made changes in mapper to map regions for ConvInVarModel --- src/lava/magma/compiler/mapper.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/lava/magma/compiler/mapper.py b/src/lava/magma/compiler/mapper.py index c1f04e0ba..a14782a21 100644 --- a/src/lava/magma/compiler/mapper.py +++ b/src/lava/magma/compiler/mapper.py @@ -128,9 +128,18 @@ def map_cores(self, executable: Executable, # Checking if the initializers are same if channel_map[port_pair].src_port_initializer == ports[ port]: - dst_addr: ty.List[LoihiAddress] = channel_map[ - port_pair].dst_port_initializer.var_model.address - chips = [addr.physical_chip_id for addr in dst_addr] + var_model = channel_map[ + port_pair].dst_port_initializer.var_model + # Checking to see if its ConvInVarModel or not + if hasattr(var_model, "address"): + dst_addr: ty.List[LoihiAddress] = channel_map[ + port_pair].dst_port_initializer.var_model.address + chips = [addr.physical_chip_id for addr in dst_addr] + else: + # Will be here for Conv Regions which will have + # ConvInVarModel + chips = [region.physical_chip_idx for region in + var_model.regions] address.update(chips) # Set address of c ref_var as that of the var port its # pointing to From 340164a6b3a9197ded5396d9455a252d7c404680 Mon Sep 17 00:00:00 2001 From: Joyesh Mishra Date: Mon, 29 Aug 2022 13:18:09 -0700 Subject: [PATCH 07/14] Added ConvNeuronVarModel --- src/lava/magma/compiler/var_model.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/lava/magma/compiler/var_model.py b/src/lava/magma/compiler/var_model.py index a0a6d4b44..ac28040bc 100644 --- a/src/lava/magma/compiler/var_model.py +++ b/src/lava/magma/compiler/var_model.py @@ -177,3 +177,9 @@ def set_virtual(self, addrs: ty.List[NcVirtualAddress]): for idx, addr in enumerate(addrs): self.regions[idx].physical_chip_idx = addr.chip_id self.regions[idx].physical_core_idx = addr.core_id + + +@dataclass +class ConvNeuronVarModel(LoihiNeuronVarModel): + alloc_dims: ty.List[ty.Tuple[int, int, int]] = None + valid_dims: ty.List[ty.Tuple[int, int, int]] = None \ No newline at end of file From 25d633ad2942c0ea458cb241f448663c9e36a07f Mon Sep 17 00:00:00 2001 From: Joyesh Mishra Date: Tue, 30 Aug 2022 16:46:06 -0700 Subject: [PATCH 08/14] Implement ConvNeuronVarModel --- src/lava/magma/compiler/var_model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lava/magma/compiler/var_model.py b/src/lava/magma/compiler/var_model.py index ac28040bc..d43e1fe5b 100644 --- a/src/lava/magma/compiler/var_model.py +++ b/src/lava/magma/compiler/var_model.py @@ -182,4 +182,5 @@ def set_virtual(self, addrs: ty.List[NcVirtualAddress]): @dataclass class ConvNeuronVarModel(LoihiNeuronVarModel): alloc_dims: ty.List[ty.Tuple[int, int, int]] = None - valid_dims: ty.List[ty.Tuple[int, int, int]] = None \ No newline at end of file + valid_dims: ty.List[ty.Tuple[int, int, int]] = None + var_shape: ty.Tuple[int, int, int] = None From 3aa972e3ada5a4fdbcce2188b120ab7e2cc4bfa1 Mon Sep 17 00:00:00 2001 From: bamsumit Date: Fri, 2 Sep 2022 08:06:12 -0700 Subject: [PATCH 09/14] Fixed lifrest doc Signed-off-by: bamsumit --- src/lava/proc/lif/process.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/lava/proc/lif/process.py b/src/lava/proc/lif/process.py index 5c54e9130..d34d581c0 100644 --- a/src/lava/proc/lif/process.py +++ b/src/lava/proc/lif/process.py @@ -194,6 +194,10 @@ class LIFReset(LIF): Neuron threshold voltage, exceeding which, the neuron will spike. Currently, only a single threshold can be set for the entire population of neurons. + reset_interval : int, optional + The interval of neuron state reset. By default 1 timestep. + reset_offset : int, optional + The phase/offset of neuron reset. By defalt at 0th timestep. See Also From 5abcd865a629b871093a8a85f8cd175d34056624 Mon Sep 17 00:00:00 2001 From: Joyesh Mishra Date: Mon, 12 Sep 2022 15:36:11 -0700 Subject: [PATCH 10/14] Add embedded core allocation order --- src/lava/magma/compiler/channel_map.py | 18 ++++++++++++++++-- src/lava/magma/compiler/compiler.py | 3 +++ .../magma/compiler/subcompilers/constants.py | 10 ++++++++++ src/lava/magma/core/run_configs.py | 14 ++++++++++++-- 4 files changed, 41 insertions(+), 4 deletions(-) diff --git a/src/lava/magma/compiler/channel_map.py b/src/lava/magma/compiler/channel_map.py index 9ce8d5702..0ff48d913 100644 --- a/src/lava/magma/compiler/channel_map.py +++ b/src/lava/magma/compiler/channel_map.py @@ -11,6 +11,9 @@ from lava.magma.compiler.utils import PortInitializer from lava.magma.core.process.ports.ports import AbstractPort from lava.magma.core.process.ports.ports import AbstractSrcPort, AbstractDstPort +from lava.magma.core.run_configs import RunConfig, AbstractLoihiHWRunCfg +from lava.magma.compiler.subcompilers.constants import \ + EMBEDDED_CORE_ALLOCATION_ORDER, MAX_EMBEDDED_CORES_PER_CHIP @dataclass(eq=True, frozen=True) @@ -35,7 +38,17 @@ class ChannelMap(dict): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._initializers_lookup = dict() - self._lmt_allocation_dict: ty.Dict[int, int] = defaultdict(lambda: -1) + self.embedded_core_allocation_order: EMBEDDED_CORE_ALLOCATION_ORDER = \ + EMBEDDED_CORE_ALLOCATION_ORDER.NORMAL + self._lmt_allocation_dict: ty.Dict[int, int] = \ + defaultdict(self._get_embedded_initial_allocation_id) + + def _get_embedded_initial_allocation_id(self) -> int: + if self.embedded_core_allocation_order is \ + EMBEDDED_CORE_ALLOCATION_ORDER.NORMAL: + return -1 + else: + return MAX_EMBEDDED_CORES_PER_CHIP def __setitem__( self, key: PortPair, value: Payload, dict_setitem=dict.__setitem__ @@ -61,7 +74,8 @@ def lmt_allocation_dict(self) -> ty.Dict[int, int]: return self._lmt_allocation_dict @classmethod - def from_proc_groups(self, proc_groups: ty.List[ProcGroup]) -> "ChannelMap": + def from_proc_groups(self, + proc_groups: ty.List[ProcGroup]) -> "ChannelMap": """Initializes a ChannelMap from a list of process ProcGroups extracting the ports from every process group. diff --git a/src/lava/magma/compiler/compiler.py b/src/lava/magma/compiler/compiler.py index f878f5cd2..8b047d158 100644 --- a/src/lava/magma/compiler/compiler.py +++ b/src/lava/magma/compiler/compiler.py @@ -129,6 +129,9 @@ def compile( proc_group_digraph = ProcGroupDiGraphs(process, run_cfg) proc_groups: ty.List[ProcGroup] = proc_group_digraph.get_proc_groups() channel_map = ChannelMap.from_proc_groups(proc_groups) + if isinstance(run_cfg, AbstractLoihiHWRunCfg): + channel_map.embedded_core_allocation_order = \ + run_cfg.embedded_core_allocation_order proc_builders, channel_map = self._compile_proc_groups( proc_groups, channel_map ) diff --git a/src/lava/magma/compiler/subcompilers/constants.py b/src/lava/magma/compiler/subcompilers/constants.py index 1b8b7089e..cb2e03c12 100644 --- a/src/lava/magma/compiler/subcompilers/constants.py +++ b/src/lava/magma/compiler/subcompilers/constants.py @@ -1,6 +1,7 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: LGPL 2.1 or later # See: https://spdx.org/licenses/ +from enum import IntEnum COUNTERS_PER_EMBEDDED_CORE = 900 EMBEDDED_CORE_COUNTER_START_INDEX = 33 @@ -9,3 +10,12 @@ NUM_VIRTUAL_CORES_L3 = 120 MAX_EMBEDDED_CORES_PER_CHIP = 3 + +class EMBEDDED_CORE_ALLOCATION_ORDER(IntEnum): + NORMAL = 1 + """Allocate embedded cores in normal order 0, 1, 2""" + REVERSED = -1 + """Allocate embedded cores in reverse order 2, 1, 0. This is useful in + situations in case of certain tasks which take longer than others and + need to be scheduled on embedded core 0 to ensure nxcore does not stop + communicating on channels""" diff --git a/src/lava/magma/core/run_configs.py b/src/lava/magma/core/run_configs.py index ccba6133e..c7c629175 100644 --- a/src/lava/magma/core/run_configs.py +++ b/src/lava/magma/core/run_configs.py @@ -21,6 +21,8 @@ class AbstractNcProcessModel: pass from lava.magma.core.sync.domain import SyncDomain +from lava.magma.compiler.subcompilers.constants import \ + EMBEDDED_CORE_ALLOCATION_ORDER if ty.TYPE_CHECKING: from lava.magma.core.process.process import AbstractProcess @@ -371,7 +373,9 @@ def __init__(self, AbstractProcessModel]]] = None, loglevel: int = logging.WARNING, pre_run_fxs: ty.List[ty.Callable] = [], - post_run_fxs: ty.List[ty.Callable] = []): + post_run_fxs: ty.List[ty.Callable] = [], + embedded_core_allocation_order = \ + EMBEDDED_CORE_ALLOCATION_ORDER.NORMAL): super().__init__(custom_sync_domains, select_tag, select_sub_proc_model, @@ -379,6 +383,8 @@ def __init__(self, loglevel) self.pre_run_fxs: ty.List[ty.Callable] = pre_run_fxs self.post_run_fxs: ty.List[ty.Callable] = post_run_fxs + self.embedded_core_allocation_order: EMBEDDED_CORE_ALLOCATION_ORDER = \ + embedded_core_allocation_order def _order_according_to_resources(self, proc_models: ty.List[ty.Type[ AbstractProcessModel]]) -> ty.List[int]: @@ -430,7 +436,9 @@ def __init__(self, AbstractProcessModel]]] = None, loglevel: int = logging.WARNING, pre_run_fxs: ty.List[ty.Callable] = [], - post_run_fxs: ty.List[ty.Callable] = []): + post_run_fxs: ty.List[ty.Callable] = [], + embedded_core_allocation_order = \ + EMBEDDED_CORE_ALLOCATION_ORDER.NORMAL): super().__init__(custom_sync_domains, select_tag, select_sub_proc_model, @@ -438,6 +446,8 @@ def __init__(self, loglevel) self.pre_run_fxs: ty.List[ty.Callable] = pre_run_fxs self.post_run_fxs: ty.List[ty.Callable] = post_run_fxs + self.embedded_core_allocation_order: EMBEDDED_CORE_ALLOCATION_ORDER = \ + embedded_core_allocation_order def _order_according_to_resources(self, proc_models: ty.List[ty.Type[ AbstractProcessModel]]) -> ty.List[int]: From cff2a2d2d90018a1e9a0747f787e110527c2794a Mon Sep 17 00:00:00 2001 From: Joyesh Mishra Date: Mon, 12 Sep 2022 16:03:07 -0700 Subject: [PATCH 11/14] Fix embedded core allocation order --- src/lava/magma/compiler/channel_map.py | 16 +--------------- src/lava/magma/compiler/compiler.py | 5 ++--- 2 files changed, 3 insertions(+), 18 deletions(-) diff --git a/src/lava/magma/compiler/channel_map.py b/src/lava/magma/compiler/channel_map.py index 0ff48d913..4d8e11000 100644 --- a/src/lava/magma/compiler/channel_map.py +++ b/src/lava/magma/compiler/channel_map.py @@ -11,10 +11,6 @@ from lava.magma.compiler.utils import PortInitializer from lava.magma.core.process.ports.ports import AbstractPort from lava.magma.core.process.ports.ports import AbstractSrcPort, AbstractDstPort -from lava.magma.core.run_configs import RunConfig, AbstractLoihiHWRunCfg -from lava.magma.compiler.subcompilers.constants import \ - EMBEDDED_CORE_ALLOCATION_ORDER, MAX_EMBEDDED_CORES_PER_CHIP - @dataclass(eq=True, frozen=True) class PortPair: @@ -38,17 +34,7 @@ class ChannelMap(dict): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._initializers_lookup = dict() - self.embedded_core_allocation_order: EMBEDDED_CORE_ALLOCATION_ORDER = \ - EMBEDDED_CORE_ALLOCATION_ORDER.NORMAL - self._lmt_allocation_dict: ty.Dict[int, int] = \ - defaultdict(self._get_embedded_initial_allocation_id) - - def _get_embedded_initial_allocation_id(self) -> int: - if self.embedded_core_allocation_order is \ - EMBEDDED_CORE_ALLOCATION_ORDER.NORMAL: - return -1 - else: - return MAX_EMBEDDED_CORES_PER_CHIP + self._lmt_allocation_dict: ty.Dict[int, int] = defaultdict(lambda: -1) def __setitem__( self, key: PortPair, value: Payload, dict_setitem=dict.__setitem__ diff --git a/src/lava/magma/compiler/compiler.py b/src/lava/magma/compiler/compiler.py index 8b047d158..c8ac39807 100644 --- a/src/lava/magma/compiler/compiler.py +++ b/src/lava/magma/compiler/compiler.py @@ -129,9 +129,6 @@ def compile( proc_group_digraph = ProcGroupDiGraphs(process, run_cfg) proc_groups: ty.List[ProcGroup] = proc_group_digraph.get_proc_groups() channel_map = ChannelMap.from_proc_groups(proc_groups) - if isinstance(run_cfg, AbstractLoihiHWRunCfg): - channel_map.embedded_core_allocation_order = \ - run_cfg.embedded_core_allocation_order proc_builders, channel_map = self._compile_proc_groups( proc_groups, channel_map ) @@ -709,6 +706,8 @@ def _create_runtime_service_builder( if isinstance(run_cfg, AbstractLoihiHWRunCfg): rs_kwargs["pre_run_fxs"] = run_cfg.pre_run_fxs rs_kwargs["post_run_fxs"] = run_cfg.post_run_fxs + rs_kwargs["embedded_core_allocation_order"] = \ + run_cfg.embedded_core_allocation_order rs_builder = RuntimeServiceBuilder( rs_class, From 3c5a52bec2d4254ca4b84c41a83cf802189e9eae Mon Sep 17 00:00:00 2001 From: bamsumit Date: Fri, 16 Sep 2022 15:06:55 -0700 Subject: [PATCH 12/14] Delta encoder implementation Signed-off-by: bamsumit --- src/lava/proc/io/__init__.py | 4 +- src/lava/proc/io/encoder.py | 244 +++++++++++++++++++++++++++++++++++ src/lava/utils/system.py | 55 ++++++++ 3 files changed, 301 insertions(+), 2 deletions(-) create mode 100644 src/lava/proc/io/encoder.py create mode 100644 src/lava/utils/system.py diff --git a/src/lava/proc/io/__init__.py b/src/lava/proc/io/__init__.py index 757a71f38..4db497580 100644 --- a/src/lava/proc/io/__init__.py +++ b/src/lava/proc/io/__init__.py @@ -2,6 +2,6 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -from . import reset, source, sink, dataloader +from . import reset, source, sink, dataloader, encoder -__all__ = ['reset', 'source', 'sink', 'dataloader'] +__all__ = ['reset', 'source', 'sink', 'dataloader', 'encoder'] diff --git a/src/lava/proc/io/encoder.py b/src/lava/proc/io/encoder.py new file mode 100644 index 000000000..68b6ea814 --- /dev/null +++ b/src/lava/proc/io/encoder.py @@ -0,0 +1,244 @@ +# Copyright (C) 2021-22 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +from typing import Dict, Tuple, Union, Optional +import numpy as np +from enum import Enum, unique + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import HostCPU +from lava.magma.core.decorator import implements, requires, tag +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.proc.sdn.models import AbstractDeltaModel + + +@unique +class Compression(Enum): + """Enumeration of message compression mode. + + Atrributes + ---------- + DENSE: + No compression. Raw 32 bit data is communicated as it is. + SPARSE: + Sparse 32 bit data and index is communicated. + PACKED_4: + Four 8 bit data packed into 32 bit message. NOTE: only works for 8 bit + data. + DELTA_SPARSE_8: + 8 bit data and 8 bit delta encoded index. NOTE: only works for 8 bit + data. + """ + DENSE = 0 + SPARSE = 1 + PACKED_4 = 2 + DELTA_SPARSE_8 = 3 + + +class DeltaEncoder(AbstractProcess): + """Delta encoding with threshold. + + Delta encoding looks at the difference of new input and sends only the + difference (delta) when it is more than a positive threshold. + + Delta dynamics: + delta = act_new - act + residue # delta encoding + s_out = delta if abs(delta) > vth else 0 # spike mechanism + residue = delta - s_out # residue accumulation + act = act_new + + Parameters + ---------- + shape: Tuple + Shape of the sigma process. + vth: int or float + Threshold of the delta encoder. + spike_exp: int + Scaling exponent with base 2 for the spike message. + Note: This should only be used for fixed point models. + Default is 0. + compression : Compression + Data compression mode, by default DENSE compression. + """ + def __init__(self, + *, + shape: Tuple[int, ...], + vth: Union[int, float], + spike_exp: Optional[int] = 0, + compression: Compression = Compression.DENSE) -> None: + super().__init__(shape=shape, vth=vth, cum_error=False, + spike_exp=spike_exp, state_exp=0) + + vth = vth * (1 << (spike_exp)) + + self.a_in = InPort(shape=shape) + self.s_out = OutPort(shape=shape) + + self.vth = Var(shape=(1,), init=vth) + self.act = Var(shape=shape, init=0) + self.residue = Var(shape=shape, init=0) + self.spike_exp = Var(shape=(1,), init=spike_exp) + self.proc_params['compression'] = compression + + @property + def shape(self) -> Tuple[int, ...]: + return self.proc_params['shape'] + + +@requires(HostCPU) +class AbstractPyDeltaEncoderModel(PyLoihiProcessModel): + """Implementation of Delta encoder.""" + a_in = None + s_out = None + + vth: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24) + act: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24) + residue: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24) + spike_exp: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=3) + + def encode_delta(self, act_new): + delta = act_new - self.act + self.residue + s_out = np.where(np.abs(delta) >= self.vth, delta, 0) + self.residue = delta - s_out + self.act = act_new + return s_out + + +@implements(proc=DeltaEncoder, protocol=LoihiProtocol) +@tag('dense_out') +class PyDeltaEncoderModelDense(AbstractPyDeltaEncoderModel): + """Dense (No) compression Model of PyDeltaEncoder.""" + a_in = LavaPyType(PyInPort.VEC_DENSE, np.int32, precision=24) + s_out = LavaPyType(PyOutPort.VEC_DENSE, np.int32, precision=24) + + def __init__(self, proc_params: Optional[Dict] = None): + super().__init__(proc_params) + self.s_out_buf = np.zeros(self.proc_params['shape']) + self.compression = self.proc_params['compression'] + if self.compression != Compression.DENSE: + raise RuntimeError('Wrong process model selected. ' + 'Expected DENSE compression mode. ' + f'Found {self.compression=}.') + + def run_spk(self): + self.s_out.send(self.s_out_buf) + a_in_data = np.left_shift(self.a_in.recv(), self.spike_exp) + self.s_out_buf = self.encode_delta(a_in_data) + + +@implements(proc=DeltaEncoder, protocol=LoihiProtocol) +@tag('sparse_out') +class PyDeltaEncoderModelSparse(AbstractPyDeltaEncoderModel): + """Sparse compression Model of PyDeltaEncoder. + + Based on compression mode, it can be + * SPARSE: 32 bit data and 32 bit index used for messaging sparse data. + * PACKED_4: Four 8 bit data packed into one 32 bit data for messaging. + * DELTA_SPARSE_8: 8 bit index and 8 bit data messaging. + """ + a_in = LavaPyType(PyInPort.VEC_DENSE, np.int32, precision=24) + s_out = LavaPyType(PyOutPort.VEC_SPARSE, np.int32, precision=24) + + def __init__(self, proc_params: Optional[Dict] = None): + super().__init__(proc_params) + self.data = np.array([0]) + self.idx = np.array([0]) + self.compression = self.proc_params['compression'] + if not( + self.compression == Compression.SPARSE + or self.compression == Compression.PACKED_4 + or self.compression == Compression.DELTA_SPARSE_8 + ): + raise RuntimeError('Wrong process model selected. ' + 'Expected SPARSE or PACKED_4 or DELTA_SPARSE_8 ' + 'compression mode. ' + f'Found {self.compression=}.') + + def encode_sparse(self, s_out): + """Basic sparse encoding.""" + idx = np.argwhere(s_out.flatten() != 0) + data = s_out.flatten()[idx] + if len(idx) == 0: + idx = np.array([0]) + data = np.array([0]) + return data, idx + + def encode_packed_4(self, s_out): + """4x 8bit data encodig into one 32 bit data.""" + padded = np.zeros(int(np.ceil(np.prod(s_out.shape) / 8) * 8)) + padded[:np.prod(s_out.shape)] = np.bitwise_and(s_out.flatten(), 0xFF) + padded = padded.astype(np.int32) + packed = (np.left_shift(padded[3::4], 24) + + np.left_shift(padded[2::4], 16) + + np.left_shift(padded[1::4], 8) + + padded[0::4]) + return packed[0::2], packed[1::2] + + def encode_delta_sparse_8(self, s_out): + """8 bit compressed data and index encoding.""" + idx = np.argwhere(s_out.flatten() != 0) + data = s_out.flatten()[idx] + if len(idx) == 0: + idx = np.array([0]) + data = np.array([0]) + + # 8 bit index encoding + idx[1:] = idx[1:] - idx[:-1] - 1 # default increment of 1 + delta_idx = [] + delta_data = [] + max_idx = 0xFF + start = 0 + for i in np.argwhere(idx >= max_idx)[:, 0]: + delta_idx.append((idx[start:i].flatten()) % max_idx) + delta_data.append(data[start:i].flatten()) + delta_idx.append(np.array([max_idx - 1] * (idx[i] // max_idx))) + delta_data.append(np.array([0] * (idx[i] // max_idx))) + start = i + if len(delta_idx) > 0: + delta_idx = np.concatenate(delta_idx) + delta_data = np.concatenate(delta_data) + else: + delta_idx = idx.flatten() + delta_data = data.flatten() + + # Decoding + # idx = delta_idx + # idx[1:] += 1 + # idx = np.cumsum(idx) + # data = delta_data + padded_idx = np.zeros(int(np.ceil(len(delta_idx) / 4) * 4)) + padded_data = np.zeros(int(np.ceil(len(delta_data) / 4) * 4)) + + padded_idx[:len(delta_idx)] = np.bitwise_and(delta_idx, 0xFF) + padded_data[:len(delta_data)] = np.bitwise_and(delta_data, 0xFF) + + padded_idx = padded_idx.astype(np.int32) + padded_data = padded_data.astype(np.int32) + + packed_idx = (np.left_shift(padded_idx[3::4], 24) + + np.left_shift(padded_idx[2::4], 16) + + np.left_shift(padded_idx[1::4], 8) + + padded_idx[0::4]) + packed_data = (np.left_shift(padded_data[3::4], 24) + + np.left_shift(padded_data[2::4], 16) + + np.left_shift(padded_data[1::4], 8) + + padded_data[0::4]) + return packed_data, packed_idx + + def run_spk(self): + self.s_out.send(self.data, self.idx) + # Receive synaptic input + a_in_data = np.left_shift(self.a_in.recv(), self.spike_exp) + s_out = self.encode_delta(a_in_data) + if self.compression == Compression.SPARSE: + self.data, self.idx = self.encode_sparse(s_out) + elif self.compression == Compression.PACKED_4: + self.data, self.idx = self.encode_packed_4(s_out) + elif self.compression == Compression.DELTA_SPARSE_8: + self.data, self.idx = self.encode_delta_sparse_8(s_out) diff --git a/src/lava/utils/system.py b/src/lava/utils/system.py new file mode 100644 index 000000000..b13358692 --- /dev/null +++ b/src/lava/utils/system.py @@ -0,0 +1,55 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import os + +class staticproperty(property): + """Wraps static member function of a class as a static property of that + class. + """ + def __get__(self, cls, owner): + return staticmethod(self.fget).__get__(None, owner)() + +class Loihi2: + preferred_partition: str = 'kp_stack' + + @staticmethod + def set_environ_settings(partititon: str = 'kp_stack') -> None: + """Sets the os environment for execution on Loihi. + + Parameters + ---------- + partititon : str, optional + Loihi partition name, by default 'kp_stack' + """ + os.environ['SLURM'] = '1' + os.environ['LOIHI_GEN'] = 'N3B3' + os.environ['PARTITION'] = partititon + + @staticproperty + def is_loihi2_available() -> bool: + """Checks if Loihi2 compiler is available and sets the environment + vairables. + + Returns + ------- + bool + Flag indicating whether Loih 2 is available or not. + """ + try: + from lava.magma.compiler.subcompilers.nc.ncproc_compiler import \ + CompilerOptions + CompilerOptions.verbose = True + except ModuleNotFoundError: + # Loihi2 compiler is not availabe + return False + Loihi2.set_environ_settings(Loihi2.preferred_partition) + return True + + @staticproperty + def partition(): + """Get the partition information.""" + if 'PARTITION' in os.environ.keys(): + return os.environ['PARTITION'] + return 'Unspecified' From bef9b023db97f91bcbcff012be6e98ec7b4ee0d7 Mon Sep 17 00:00:00 2001 From: bamsumit Date: Fri, 16 Sep 2022 15:24:40 -0700 Subject: [PATCH 13/14] Input encoding and compression for PilotNet Signed-off-by: bamsumit --- src/lava/magma/compiler/channel_map.py | 1 + src/lava/magma/compiler/mapper.py | 5 ++-- .../magma/compiler/subcompilers/constants.py | 9 +++--- src/lava/magma/core/run_configs.py | 18 ++++++------ src/lava/proc/io/encoder.py | 29 ++++++++++--------- src/lava/utils/system.py | 3 ++ 6 files changed, 36 insertions(+), 29 deletions(-) diff --git a/src/lava/magma/compiler/channel_map.py b/src/lava/magma/compiler/channel_map.py index 4d8e11000..79039cd0a 100644 --- a/src/lava/magma/compiler/channel_map.py +++ b/src/lava/magma/compiler/channel_map.py @@ -12,6 +12,7 @@ from lava.magma.core.process.ports.ports import AbstractPort from lava.magma.core.process.ports.ports import AbstractSrcPort, AbstractDstPort + @dataclass(eq=True, frozen=True) class PortPair: src: AbstractSrcPort diff --git a/src/lava/magma/compiler/mapper.py b/src/lava/magma/compiler/mapper.py index a14782a21..815d83eb9 100644 --- a/src/lava/magma/compiler/mapper.py +++ b/src/lava/magma/compiler/mapper.py @@ -132,8 +132,9 @@ def map_cores(self, executable: Executable, port_pair].dst_port_initializer.var_model # Checking to see if its ConvInVarModel or not if hasattr(var_model, "address"): - dst_addr: ty.List[LoihiAddress] = channel_map[ - port_pair].dst_port_initializer.var_model.address + vm = channel_map[ + port_pair].dst_port_initializer.var_model + dst_addr: ty.List[LoihiAddress] = vm.address chips = [addr.physical_chip_id for addr in dst_addr] else: # Will be here for Conv Regions which will have diff --git a/src/lava/magma/compiler/subcompilers/constants.py b/src/lava/magma/compiler/subcompilers/constants.py index cb2e03c12..805f97a38 100644 --- a/src/lava/magma/compiler/subcompilers/constants.py +++ b/src/lava/magma/compiler/subcompilers/constants.py @@ -11,11 +11,12 @@ MAX_EMBEDDED_CORES_PER_CHIP = 3 -class EMBEDDED_CORE_ALLOCATION_ORDER(IntEnum): + +class EMBEDDED_ALLOCATION_ORDER(IntEnum): NORMAL = 1 """Allocate embedded cores in normal order 0, 1, 2""" REVERSED = -1 - """Allocate embedded cores in reverse order 2, 1, 0. This is useful in - situations in case of certain tasks which take longer than others and - need to be scheduled on embedded core 0 to ensure nxcore does not stop + """Allocate embedded cores in reverse order 2, 1, 0. This is useful in + situations in case of certain tasks which take longer than others and + need to be scheduled on embedded core 0 to ensure nxcore does not stop communicating on channels""" diff --git a/src/lava/magma/core/run_configs.py b/src/lava/magma/core/run_configs.py index c7c629175..2b551d748 100644 --- a/src/lava/magma/core/run_configs.py +++ b/src/lava/magma/core/run_configs.py @@ -22,7 +22,7 @@ class AbstractNcProcessModel: from lava.magma.core.sync.domain import SyncDomain from lava.magma.compiler.subcompilers.constants import \ - EMBEDDED_CORE_ALLOCATION_ORDER + EMBEDDED_ALLOCATION_ORDER if ty.TYPE_CHECKING: from lava.magma.core.process.process import AbstractProcess @@ -364,6 +364,7 @@ class Loihi1HwCfg(AbstractLoihiHWRunCfg): a tag provided by the user. This RunConfig will default to a PyProcModel if no Loihi1-compatible ProcModel is being found. .""" + def __init__(self, custom_sync_domains: ty.Optional[ty.List[SyncDomain]] = None, select_tag: ty.Optional[str] = None, @@ -374,8 +375,7 @@ def __init__(self, loglevel: int = logging.WARNING, pre_run_fxs: ty.List[ty.Callable] = [], post_run_fxs: ty.List[ty.Callable] = [], - embedded_core_allocation_order = \ - EMBEDDED_CORE_ALLOCATION_ORDER.NORMAL): + embedded_allocation_order=EMBEDDED_ALLOCATION_ORDER.NORMAL): super().__init__(custom_sync_domains, select_tag, select_sub_proc_model, @@ -383,8 +383,8 @@ def __init__(self, loglevel) self.pre_run_fxs: ty.List[ty.Callable] = pre_run_fxs self.post_run_fxs: ty.List[ty.Callable] = post_run_fxs - self.embedded_core_allocation_order: EMBEDDED_CORE_ALLOCATION_ORDER = \ - embedded_core_allocation_order + self.embedded_allocation_order: EMBEDDED_ALLOCATION_ORDER = \ + embedded_allocation_order def _order_according_to_resources(self, proc_models: ty.List[ty.Type[ AbstractProcessModel]]) -> ty.List[int]: @@ -427,6 +427,7 @@ class Loihi2HwCfg(AbstractLoihiHWRunCfg): a tag provided by the user. This RunConfig will default to a PyProcModel if no Loihi2-compatible ProcModel is being found. """ + def __init__(self, custom_sync_domains: ty.Optional[ty.List[SyncDomain]] = None, select_tag: ty.Optional[str] = None, @@ -437,8 +438,7 @@ def __init__(self, loglevel: int = logging.WARNING, pre_run_fxs: ty.List[ty.Callable] = [], post_run_fxs: ty.List[ty.Callable] = [], - embedded_core_allocation_order = \ - EMBEDDED_CORE_ALLOCATION_ORDER.NORMAL): + embedded_allocation_order=EMBEDDED_ALLOCATION_ORDER.NORMAL): super().__init__(custom_sync_domains, select_tag, select_sub_proc_model, @@ -446,8 +446,8 @@ def __init__(self, loglevel) self.pre_run_fxs: ty.List[ty.Callable] = pre_run_fxs self.post_run_fxs: ty.List[ty.Callable] = post_run_fxs - self.embedded_core_allocation_order: EMBEDDED_CORE_ALLOCATION_ORDER = \ - embedded_core_allocation_order + self.embedded_allocation_order: EMBEDDED_ALLOCATION_ORDER = \ + embedded_allocation_order def _order_according_to_resources(self, proc_models: ty.List[ty.Type[ AbstractProcessModel]]) -> ty.List[int]: diff --git a/src/lava/proc/io/encoder.py b/src/lava/proc/io/encoder.py index 68b6ea814..cb67473b3 100644 --- a/src/lava/proc/io/encoder.py +++ b/src/lava/proc/io/encoder.py @@ -66,6 +66,7 @@ class DeltaEncoder(AbstractProcess): compression : Compression Data compression mode, by default DENSE compression. """ + def __init__(self, *, shape: Tuple[int, ...], @@ -136,7 +137,7 @@ def run_spk(self): @tag('sparse_out') class PyDeltaEncoderModelSparse(AbstractPyDeltaEncoderModel): """Sparse compression Model of PyDeltaEncoder. - + Based on compression mode, it can be * SPARSE: 32 bit data and 32 bit index used for messaging sparse data. * PACKED_4: Four 8 bit data packed into one 32 bit data for messaging. @@ -159,7 +160,7 @@ def __init__(self, proc_params: Optional[Dict] = None): 'Expected SPARSE or PACKED_4 or DELTA_SPARSE_8 ' 'compression mode. ' f'Found {self.compression=}.') - + def encode_sparse(self, s_out): """Basic sparse encoding.""" idx = np.argwhere(s_out.flatten() != 0) @@ -174,10 +175,10 @@ def encode_packed_4(self, s_out): padded = np.zeros(int(np.ceil(np.prod(s_out.shape) / 8) * 8)) padded[:np.prod(s_out.shape)] = np.bitwise_and(s_out.flatten(), 0xFF) padded = padded.astype(np.int32) - packed = (np.left_shift(padded[3::4], 24) + - np.left_shift(padded[2::4], 16) + - np.left_shift(padded[1::4], 8) + - padded[0::4]) + packed = (np.left_shift(padded[3::4], 24) + + np.left_shift(padded[2::4], 16) + + np.left_shift(padded[1::4], 8) + + padded[0::4]) return packed[0::2], packed[1::2] def encode_delta_sparse_8(self, s_out): @@ -221,14 +222,14 @@ def encode_delta_sparse_8(self, s_out): padded_idx = padded_idx.astype(np.int32) padded_data = padded_data.astype(np.int32) - packed_idx = (np.left_shift(padded_idx[3::4], 24) + - np.left_shift(padded_idx[2::4], 16) + - np.left_shift(padded_idx[1::4], 8) + - padded_idx[0::4]) - packed_data = (np.left_shift(padded_data[3::4], 24) + - np.left_shift(padded_data[2::4], 16) + - np.left_shift(padded_data[1::4], 8) + - padded_data[0::4]) + packed_idx = (np.left_shift(padded_idx[3::4], 24) + + np.left_shift(padded_idx[2::4], 16) + + np.left_shift(padded_idx[1::4], 8) + + padded_idx[0::4]) + packed_data = (np.left_shift(padded_data[3::4], 24) + + np.left_shift(padded_data[2::4], 16) + + np.left_shift(padded_data[1::4], 8) + + padded_data[0::4]) return packed_data, packed_idx def run_spk(self): diff --git a/src/lava/utils/system.py b/src/lava/utils/system.py index b13358692..9d824edc0 100644 --- a/src/lava/utils/system.py +++ b/src/lava/utils/system.py @@ -4,13 +4,16 @@ import os + class staticproperty(property): """Wraps static member function of a class as a static property of that class. """ + def __get__(self, cls, owner): return staticmethod(self.fget).__get__(None, owner)() + class Loihi2: preferred_partition: str = 'kp_stack' From b62e771e4644e32d317e681ed474cf6c2157fdb6 Mon Sep 17 00:00:00 2001 From: bamsumit Date: Fri, 16 Sep 2022 15:58:05 -0700 Subject: [PATCH 14/14] Refactoring fix Signed-off-by: bamsumit --- src/lava/magma/compiler/compiler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lava/magma/compiler/compiler.py b/src/lava/magma/compiler/compiler.py index c8ac39807..a658aa2bd 100644 --- a/src/lava/magma/compiler/compiler.py +++ b/src/lava/magma/compiler/compiler.py @@ -706,8 +706,8 @@ def _create_runtime_service_builder( if isinstance(run_cfg, AbstractLoihiHWRunCfg): rs_kwargs["pre_run_fxs"] = run_cfg.pre_run_fxs rs_kwargs["post_run_fxs"] = run_cfg.post_run_fxs - rs_kwargs["embedded_core_allocation_order"] = \ - run_cfg.embedded_core_allocation_order + rs_kwargs["embedded_allocation_order"] = \ + run_cfg.embedded_allocation_order rs_builder = RuntimeServiceBuilder( rs_class,