Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Input compression features for large dimension inputs and infrastructure for convolution feature #344

Merged
merged 32 commits into from
Sep 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
141e732
update refport unittest to always wait when it writes to port for con…
bamsumit Mar 8, 2022
7097dd8
Removed pyproject changes
bamsumit Mar 8, 2022
5234c1b
Merge branch 'lava-nc:main' into main
bamsumit Mar 8, 2022
2644255
Merge branch 'lava-nc:main' into main
bamsumit Mar 8, 2022
a3116bb
Merge branch 'lava-nc:main' into main
bamsumit Apr 20, 2022
31715fc
Merge branch 'lava-nc:main' into main
bamsumit Apr 21, 2022
f90595d
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Apr 21, 2022
c988ad8
Merge branch 'main' of github.com:bamsumit/lava into main
bamsumit Apr 21, 2022
be2ba52
Merge branch 'lava-nc:main' into main
bamsumit May 20, 2022
1fb7353
Fix to convolution tests. Fixed imcompatible mnist_pretrained for old…
bamsumit May 20, 2022
4a58d97
merged with upstream
bamsumit Jul 15, 2022
d4636c3
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Jul 27, 2022
68c817c
Missing moudle parent fix
bamsumit Jul 28, 2022
1b59e58
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Jul 29, 2022
9099529
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Aug 1, 2022
fd5b813
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Aug 25, 2022
fd2163c
Added ConvVarModel
joyeshmishra Aug 26, 2022
2fb345c
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Aug 29, 2022
8e73642
Merge branch 'conv_manager' of github.com:lava-nc/lava into main
bamsumit Aug 29, 2022
c35d0b5
Made changes in mapper to map regions for ConvInVarModel
ysingh7 Aug 29, 2022
340164a
Added ConvNeuronVarModel
joyeshmishra Aug 29, 2022
25d633a
Implement ConvNeuronVarModel
joyeshmishra Aug 30, 2022
7e8d7a6
Merge branch 'conv_manager' of github.com:lava-nc/lava into conv_manager
bamsumit Aug 30, 2022
3aa972e
Fixed lifrest doc
bamsumit Sep 2, 2022
5abcd86
Add embedded core allocation order
joyeshmishra Sep 12, 2022
01270bf
Merge branch 'conv_manager' of github.com:lava-nc/lava into conv_manager
bamsumit Sep 12, 2022
cff2a2d
Fix embedded core allocation order
joyeshmishra Sep 12, 2022
9ff0823
Merge branch 'conv_manager' of github.com:lava-nc/lava into conv_manager
bamsumit Sep 12, 2022
3c5a52b
Delta encoder implementation
bamsumit Sep 16, 2022
bef9b02
Input encoding and compression for PilotNet
bamsumit Sep 16, 2022
99a7942
Merge branch 'main' into conv_manager
bamsumit Sep 16, 2022
b62e771
Refactoring fix
bamsumit Sep 16, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion src/lava/magma/compiler/channel_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ def lmt_allocation_dict(self) -> ty.Dict[int, int]:
return self._lmt_allocation_dict

@classmethod
def from_proc_groups(self, proc_groups: ty.List[ProcGroup]) -> "ChannelMap":
def from_proc_groups(self,
proc_groups: ty.List[ProcGroup]) -> "ChannelMap":
"""Initializes a ChannelMap from a list of process ProcGroups
extracting the ports from every process group.

Expand Down
2 changes: 2 additions & 0 deletions src/lava/magma/compiler/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -706,6 +706,8 @@ def _create_runtime_service_builder(
if isinstance(run_cfg, AbstractLoihiHWRunCfg):
rs_kwargs["pre_run_fxs"] = run_cfg.pre_run_fxs
rs_kwargs["post_run_fxs"] = run_cfg.post_run_fxs
rs_kwargs["embedded_allocation_order"] = \
run_cfg.embedded_allocation_order

rs_builder = RuntimeServiceBuilder(
rs_class,
Expand Down
16 changes: 13 additions & 3 deletions src/lava/magma/compiler/mapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,9 +128,19 @@ def map_cores(self, executable: Executable,
# Checking if the initializers are same
if channel_map[port_pair].src_port_initializer == ports[
port]:
dst_addr: ty.List[LoihiAddress] = channel_map[
port_pair].dst_port_initializer.var_model.address
chips = [addr.physical_chip_id for addr in dst_addr]
var_model = channel_map[
port_pair].dst_port_initializer.var_model
# Checking to see if its ConvInVarModel or not
if hasattr(var_model, "address"):
vm = channel_map[
port_pair].dst_port_initializer.var_model
dst_addr: ty.List[LoihiAddress] = vm.address
chips = [addr.physical_chip_id for addr in dst_addr]
else:
# Will be here for Conv Regions which will have
# ConvInVarModel
chips = [region.physical_chip_idx for region in
var_model.regions]
address.update(chips)
# Set address of c ref_var as that of the var port its
# pointing to
Expand Down
11 changes: 11 additions & 0 deletions src/lava/magma/compiler/subcompilers/constants.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: LGPL 2.1 or later
# See: https://spdx.org/licenses/
from enum import IntEnum

COUNTERS_PER_EMBEDDED_CORE = 900
EMBEDDED_CORE_COUNTER_START_INDEX = 33
Expand All @@ -9,3 +10,13 @@
NUM_VIRTUAL_CORES_L3 = 120

MAX_EMBEDDED_CORES_PER_CHIP = 3


class EMBEDDED_ALLOCATION_ORDER(IntEnum):
NORMAL = 1
"""Allocate embedded cores in normal order 0, 1, 2"""
REVERSED = -1
"""Allocate embedded cores in reverse order 2, 1, 0. This is useful in
situations in case of certain tasks which take longer than others and
need to be scheduled on embedded core 0 to ensure nxcore does not stop
communicating on channels"""
11 changes: 10 additions & 1 deletion src/lava/magma/compiler/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from lava.magma.compiler.mappable_interface import Mappable
from lava.magma.compiler.subcompilers.address import NcLogicalAddress, \
NcVirtualAddress
from lava.magma.compiler.var_model import LoihiVarModel
from lava.magma.compiler.var_model import LoihiVarModel, ConvInVarModel
from lava.magma.core.model.spike_type import SpikeType


Expand Down Expand Up @@ -58,6 +58,10 @@ def get_logical(self) -> ty.List[NcLogicalAddress]:
-------
Returns logical address of the port initializer.
"""
# TODO: Need to clean this
if isinstance(self.var_model, ConvInVarModel):
return self.var_model.get_logical()

return [NcLogicalAddress(chip_id=addr.logical_chip_id,
core_id=addr.logical_core_id) for addr in
self.var_model.address]
Expand All @@ -73,6 +77,11 @@ def set_virtual(self, addrs: ty.List[NcVirtualAddress]):
-------

"""
# TODO: Need to clean this
if isinstance(self.var_model, ConvInVarModel):
self.var_model.set_virtual(addrs)
return

if len(addrs) != len(self.var_model.address):
raise ValueError("Length of list of address provided doesn't "
"match size of the address list of the port "
Expand Down
70 changes: 66 additions & 4 deletions src/lava/magma/compiler/var_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,20 @@
pass
from lava.magma.core.process.variable import Var

ChipIdx: int
CoreIdx: int


@dataclass
class LoihiAddress:
# physical chip id of the var
physical_chip_id: int
physical_chip_id: ChipIdx
# physical core id of the nc var or lmt_id of the spike counter
physical_core_id: int
physical_core_id: CoreIdx
# logical chip id used in compilation, before mapping to hardware addresses
logical_chip_id: int
logical_chip_id: ChipIdx
# logical core id used in compilation, before mapping to hardware addresses
logical_core_id: int
logical_core_id: CoreIdx
# logical address/index of the var; used with nodesets for get/set
logical_idx_addr: int
# length of the contiguous addresses of var on core_id on chip_id
Expand Down Expand Up @@ -122,3 +125,62 @@ class CVarModel(LoihiVarModel):
@dataclass
class NcVarModel(LoihiVarModel):
pass


@dataclass
class Region:
x_min: int
x_max: int
y_min: int
y_max: int
logical_chip_idx: ChipIdx
logical_core_idx: CoreIdx
physical_chip_idx: ChipIdx = None
physical_core_idx: CoreIdx = None


@dataclass
class ConvInVarModel(AbstractVarModel, Mappable):
x_dim: int = 0
y_dim: int = 0
f_dim: int = 0
x_split: int = 0
f_split: int = 0
regions: ty.List[Region] = None

def get_logical(self) -> ty.List[NcLogicalAddress]:
"""

Returns
-------
Returns logical address of the port initializer.
"""
return [NcLogicalAddress(chip_id=region.logical_chip_idx,
core_id=region.logical_core_idx) for region in
self.regions]

def set_virtual(self, addrs: ty.List[NcVirtualAddress]):
"""
Sets physical address of the port initializer
Parameters
----------
addrs: List of address

Returns
-------

"""
if len(addrs) != len(self.regions):
raise ValueError("Length of list of address provided doesn't "
"match size of the regions list of the port "
"initializer.")
for idx, addr in enumerate(addrs):
self.regions[idx].physical_chip_idx = addr.chip_id
self.regions[idx].physical_core_idx = addr.core_id


@dataclass
class ConvNeuronVarModel(LoihiNeuronVarModel):
alloc_dims: ty.List[ty.Tuple[int, int, int]] = None
valid_dims: ty.List[ty.Tuple[int, int, int]] = None
var_shape: ty.Tuple[int, int, int] = None
14 changes: 12 additions & 2 deletions src/lava/magma/core/run_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ class AbstractNcProcessModel:
pass

from lava.magma.core.sync.domain import SyncDomain
from lava.magma.compiler.subcompilers.constants import \
EMBEDDED_ALLOCATION_ORDER

if ty.TYPE_CHECKING:
from lava.magma.core.process.process import AbstractProcess
Expand Down Expand Up @@ -365,6 +367,7 @@ class Loihi1HwCfg(AbstractLoihiHWRunCfg):
a tag provided by the user. This RunConfig will default to a PyProcModel
if no Loihi1-compatible ProcModel is being found.
."""

def __init__(self,
custom_sync_domains: ty.Optional[ty.List[SyncDomain]] = None,
select_tag: ty.Optional[str] = None,
Expand All @@ -374,14 +377,17 @@ def __init__(self,
AbstractProcessModel]]] = None,
loglevel: int = logging.WARNING,
pre_run_fxs: ty.List[ty.Callable] = [],
post_run_fxs: ty.List[ty.Callable] = []):
post_run_fxs: ty.List[ty.Callable] = [],
embedded_allocation_order=EMBEDDED_ALLOCATION_ORDER.NORMAL):
super().__init__(custom_sync_domains,
select_tag,
select_sub_proc_model,
exception_proc_model_map,
loglevel)
self.pre_run_fxs: ty.List[ty.Callable] = pre_run_fxs
self.post_run_fxs: ty.List[ty.Callable] = post_run_fxs
self.embedded_allocation_order: EMBEDDED_ALLOCATION_ORDER = \
embedded_allocation_order

def _order_according_to_resources(self, proc_models: ty.List[ty.Type[
AbstractProcessModel]]) -> ty.List[int]:
Expand Down Expand Up @@ -424,6 +430,7 @@ class Loihi2HwCfg(AbstractLoihiHWRunCfg):
a tag provided by the user. This RunConfig will default to a PyProcModel
if no Loihi2-compatible ProcModel is being found.
"""

def __init__(self,
custom_sync_domains: ty.Optional[ty.List[SyncDomain]] = None,
select_tag: ty.Optional[str] = None,
Expand All @@ -433,14 +440,17 @@ def __init__(self,
AbstractProcessModel]]] = None,
loglevel: int = logging.WARNING,
pre_run_fxs: ty.List[ty.Callable] = [],
post_run_fxs: ty.List[ty.Callable] = []):
post_run_fxs: ty.List[ty.Callable] = [],
embedded_allocation_order=EMBEDDED_ALLOCATION_ORDER.NORMAL):
super().__init__(custom_sync_domains,
select_tag,
select_sub_proc_model,
exception_proc_model_map,
loglevel)
self.pre_run_fxs: ty.List[ty.Callable] = pre_run_fxs
self.post_run_fxs: ty.List[ty.Callable] = post_run_fxs
self.embedded_allocation_order: EMBEDDED_ALLOCATION_ORDER = \
embedded_allocation_order

def _order_according_to_resources(self, proc_models: ty.List[ty.Type[
AbstractProcessModel]]) -> ty.List[int]:
Expand Down
4 changes: 2 additions & 2 deletions src/lava/proc/io/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

from . import reset, source, sink, dataloader
from . import reset, source, sink, dataloader, encoder

__all__ = ['reset', 'source', 'sink', 'dataloader']
__all__ = ['reset', 'source', 'sink', 'dataloader', 'encoder']
Loading