Skip to content

Commit

Permalink
Input compression features for large dimension inputs and infrastruct…
Browse files Browse the repository at this point in the history
…ure for convolution feature (lava-nc#344)

* update refport unittest to always wait when it writes to port for consistent behavior

Signed-off-by: bamsumit <[email protected]>

* Removed pyproject changes

Signed-off-by: bamsumit <[email protected]>

* Fix to convolution tests. Fixed imcompatible mnist_pretrained for old python versions.

Signed-off-by: bamsumit <[email protected]>

* Missing moudle parent fix

Signed-off-by: bamsumit <[email protected]>

* Added ConvVarModel

* Made changes in mapper to map regions for ConvInVarModel

* Added ConvNeuronVarModel

* Implement ConvNeuronVarModel

* Fixed lifrest doc

Signed-off-by: bamsumit <[email protected]>

* Add embedded core allocation order

* Fix embedded core allocation order

* Delta encoder implementation

Signed-off-by: bamsumit <[email protected]>

* Input encoding and compression for PilotNet

Signed-off-by: bamsumit <[email protected]>

* Refactoring fix

Signed-off-by: bamsumit <[email protected]>

Signed-off-by: bamsumit <[email protected]>
Co-authored-by: Joyesh Mishra <[email protected]>
Co-authored-by: yashward <[email protected]>
  • Loading branch information
3 people committed Sep 18, 2022
1 parent 97b5073 commit caa284a
Show file tree
Hide file tree
Showing 11 changed files with 425 additions and 13 deletions.
3 changes: 2 additions & 1 deletion src/lava/magma/compiler/channel_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ def lmt_allocation_dict(self) -> ty.Dict[int, int]:
return self._lmt_allocation_dict

@classmethod
def from_proc_groups(self, proc_groups: ty.List[ProcGroup]) -> "ChannelMap":
def from_proc_groups(self,
proc_groups: ty.List[ProcGroup]) -> "ChannelMap":
"""Initializes a ChannelMap from a list of process ProcGroups
extracting the ports from every process group.
Expand Down
2 changes: 2 additions & 0 deletions src/lava/magma/compiler/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -706,6 +706,8 @@ def _create_runtime_service_builder(
if isinstance(run_cfg, AbstractLoihiHWRunCfg):
rs_kwargs["pre_run_fxs"] = run_cfg.pre_run_fxs
rs_kwargs["post_run_fxs"] = run_cfg.post_run_fxs
rs_kwargs["embedded_allocation_order"] = \
run_cfg.embedded_allocation_order

rs_builder = RuntimeServiceBuilder(
rs_class,
Expand Down
16 changes: 13 additions & 3 deletions src/lava/magma/compiler/mapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,9 +128,19 @@ def map_cores(self, executable: Executable,
# Checking if the initializers are same
if channel_map[port_pair].src_port_initializer == ports[
port]:
dst_addr: ty.List[LoihiAddress] = channel_map[
port_pair].dst_port_initializer.var_model.address
chips = [addr.physical_chip_id for addr in dst_addr]
var_model = channel_map[
port_pair].dst_port_initializer.var_model
# Checking to see if its ConvInVarModel or not
if hasattr(var_model, "address"):
vm = channel_map[
port_pair].dst_port_initializer.var_model
dst_addr: ty.List[LoihiAddress] = vm.address
chips = [addr.physical_chip_id for addr in dst_addr]
else:
# Will be here for Conv Regions which will have
# ConvInVarModel
chips = [region.physical_chip_idx for region in
var_model.regions]
address.update(chips)
# Set address of c ref_var as that of the var port its
# pointing to
Expand Down
11 changes: 11 additions & 0 deletions src/lava/magma/compiler/subcompilers/constants.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: LGPL 2.1 or later
# See: https://spdx.org/licenses/
from enum import IntEnum

COUNTERS_PER_EMBEDDED_CORE = 900
EMBEDDED_CORE_COUNTER_START_INDEX = 33
Expand All @@ -9,3 +10,13 @@
NUM_VIRTUAL_CORES_L3 = 120

MAX_EMBEDDED_CORES_PER_CHIP = 3


class EMBEDDED_ALLOCATION_ORDER(IntEnum):
NORMAL = 1
"""Allocate embedded cores in normal order 0, 1, 2"""
REVERSED = -1
"""Allocate embedded cores in reverse order 2, 1, 0. This is useful in
situations in case of certain tasks which take longer than others and
need to be scheduled on embedded core 0 to ensure nxcore does not stop
communicating on channels"""
11 changes: 10 additions & 1 deletion src/lava/magma/compiler/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from lava.magma.compiler.mappable_interface import Mappable
from lava.magma.compiler.subcompilers.address import NcLogicalAddress, \
NcVirtualAddress
from lava.magma.compiler.var_model import LoihiVarModel
from lava.magma.compiler.var_model import LoihiVarModel, ConvInVarModel
from lava.magma.core.model.spike_type import SpikeType


Expand Down Expand Up @@ -58,6 +58,10 @@ def get_logical(self) -> ty.List[NcLogicalAddress]:
-------
Returns logical address of the port initializer.
"""
# TODO: Need to clean this
if isinstance(self.var_model, ConvInVarModel):
return self.var_model.get_logical()

return [NcLogicalAddress(chip_id=addr.logical_chip_id,
core_id=addr.logical_core_id) for addr in
self.var_model.address]
Expand All @@ -73,6 +77,11 @@ def set_virtual(self, addrs: ty.List[NcVirtualAddress]):
-------
"""
# TODO: Need to clean this
if isinstance(self.var_model, ConvInVarModel):
self.var_model.set_virtual(addrs)
return

if len(addrs) != len(self.var_model.address):
raise ValueError("Length of list of address provided doesn't "
"match size of the address list of the port "
Expand Down
70 changes: 66 additions & 4 deletions src/lava/magma/compiler/var_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,20 @@
pass
from lava.magma.core.process.variable import Var

ChipIdx: int
CoreIdx: int


@dataclass
class LoihiAddress:
# physical chip id of the var
physical_chip_id: int
physical_chip_id: ChipIdx
# physical core id of the nc var or lmt_id of the spike counter
physical_core_id: int
physical_core_id: CoreIdx
# logical chip id used in compilation, before mapping to hardware addresses
logical_chip_id: int
logical_chip_id: ChipIdx
# logical core id used in compilation, before mapping to hardware addresses
logical_core_id: int
logical_core_id: CoreIdx
# logical address/index of the var; used with nodesets for get/set
logical_idx_addr: int
# length of the contiguous addresses of var on core_id on chip_id
Expand Down Expand Up @@ -122,3 +125,62 @@ class CVarModel(LoihiVarModel):
@dataclass
class NcVarModel(LoihiVarModel):
pass


@dataclass
class Region:
x_min: int
x_max: int
y_min: int
y_max: int
logical_chip_idx: ChipIdx
logical_core_idx: CoreIdx
physical_chip_idx: ChipIdx = None
physical_core_idx: CoreIdx = None


@dataclass
class ConvInVarModel(AbstractVarModel, Mappable):
x_dim: int = 0
y_dim: int = 0
f_dim: int = 0
x_split: int = 0
f_split: int = 0
regions: ty.List[Region] = None

def get_logical(self) -> ty.List[NcLogicalAddress]:
"""
Returns
-------
Returns logical address of the port initializer.
"""
return [NcLogicalAddress(chip_id=region.logical_chip_idx,
core_id=region.logical_core_idx) for region in
self.regions]

def set_virtual(self, addrs: ty.List[NcVirtualAddress]):
"""
Sets physical address of the port initializer
Parameters
----------
addrs: List of address
Returns
-------
"""
if len(addrs) != len(self.regions):
raise ValueError("Length of list of address provided doesn't "
"match size of the regions list of the port "
"initializer.")
for idx, addr in enumerate(addrs):
self.regions[idx].physical_chip_idx = addr.chip_id
self.regions[idx].physical_core_idx = addr.core_id


@dataclass
class ConvNeuronVarModel(LoihiNeuronVarModel):
alloc_dims: ty.List[ty.Tuple[int, int, int]] = None
valid_dims: ty.List[ty.Tuple[int, int, int]] = None
var_shape: ty.Tuple[int, int, int] = None
14 changes: 12 additions & 2 deletions src/lava/magma/core/run_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ class AbstractNcProcessModel:
pass

from lava.magma.core.sync.domain import SyncDomain
from lava.magma.compiler.subcompilers.constants import \
EMBEDDED_ALLOCATION_ORDER

if ty.TYPE_CHECKING:
from lava.magma.core.process.process import AbstractProcess
Expand Down Expand Up @@ -365,6 +367,7 @@ class Loihi1HwCfg(AbstractLoihiHWRunCfg):
a tag provided by the user. This RunConfig will default to a PyProcModel
if no Loihi1-compatible ProcModel is being found.
."""

def __init__(self,
custom_sync_domains: ty.Optional[ty.List[SyncDomain]] = None,
select_tag: ty.Optional[str] = None,
Expand All @@ -374,14 +377,17 @@ def __init__(self,
AbstractProcessModel]]] = None,
loglevel: int = logging.WARNING,
pre_run_fxs: ty.List[ty.Callable] = [],
post_run_fxs: ty.List[ty.Callable] = []):
post_run_fxs: ty.List[ty.Callable] = [],
embedded_allocation_order=EMBEDDED_ALLOCATION_ORDER.NORMAL):
super().__init__(custom_sync_domains,
select_tag,
select_sub_proc_model,
exception_proc_model_map,
loglevel)
self.pre_run_fxs: ty.List[ty.Callable] = pre_run_fxs
self.post_run_fxs: ty.List[ty.Callable] = post_run_fxs
self.embedded_allocation_order: EMBEDDED_ALLOCATION_ORDER = \
embedded_allocation_order

def _order_according_to_resources(self, proc_models: ty.List[ty.Type[
AbstractProcessModel]]) -> ty.List[int]:
Expand Down Expand Up @@ -424,6 +430,7 @@ class Loihi2HwCfg(AbstractLoihiHWRunCfg):
a tag provided by the user. This RunConfig will default to a PyProcModel
if no Loihi2-compatible ProcModel is being found.
"""

def __init__(self,
custom_sync_domains: ty.Optional[ty.List[SyncDomain]] = None,
select_tag: ty.Optional[str] = None,
Expand All @@ -433,14 +440,17 @@ def __init__(self,
AbstractProcessModel]]] = None,
loglevel: int = logging.WARNING,
pre_run_fxs: ty.List[ty.Callable] = [],
post_run_fxs: ty.List[ty.Callable] = []):
post_run_fxs: ty.List[ty.Callable] = [],
embedded_allocation_order=EMBEDDED_ALLOCATION_ORDER.NORMAL):
super().__init__(custom_sync_domains,
select_tag,
select_sub_proc_model,
exception_proc_model_map,
loglevel)
self.pre_run_fxs: ty.List[ty.Callable] = pre_run_fxs
self.post_run_fxs: ty.List[ty.Callable] = post_run_fxs
self.embedded_allocation_order: EMBEDDED_ALLOCATION_ORDER = \
embedded_allocation_order

def _order_according_to_resources(self, proc_models: ty.List[ty.Type[
AbstractProcessModel]]) -> ty.List[int]:
Expand Down
4 changes: 2 additions & 2 deletions src/lava/proc/io/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

from . import reset, source, sink, dataloader
from . import reset, source, sink, dataloader, encoder

__all__ = ['reset', 'source', 'sink', 'dataloader']
__all__ = ['reset', 'source', 'sink', 'dataloader', 'encoder']
Loading

0 comments on commit caa284a

Please sign in to comment.