Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixed point (bit-accurate with Loihi) and floating point ProcessModels for LIF process #39

Closed
wants to merge 12 commits into from
Closed
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions lava/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,5 @@
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

__import__("pkg_resources").declare_namespace(__name__)
2 changes: 1 addition & 1 deletion lava/magma/compiler/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def build(self):
if issubclass(lt.cls, np.ndarray):
var = lt.cls(v.shape, lt.d_type)
var[:] = v.value
elif issubclass(lt.cls, int):
elif issubclass(lt.cls, int) or issubclass(lt.cls, float):
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
var = v.value
else:
raise NotImplementedError
Expand Down
62 changes: 62 additions & 0 deletions lava/magma/core/decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,3 +102,65 @@ def decorate_process_model(cls: type):
return cls

return decorate_process_model


def tag(*args: ty.Union[str, ty.List[str]]):
"""
Decorator for ProcessModel to add a class variable (a list of tags) to
ProcessModel class, which further distinguishes ProcessModels
implementing the same Process, with the same type and requiring the same
ComputeResources.

For example, a user may write multiple ProcessModels in Python (
`PyProcessModels`), requiring CPU for execution (`@requires(CPU)`). The
compiler selects the appropriate ProcessModel via `RunConfig` using
the keywords stored in the list of tags set by this decorator.

The list of tags is additive over inheritance. Which means, if `@tag`
decorates a child class, whose parent is already decorated, then the new
keywords are appended to the tag-list inherited from the parent.

Parameters
----------
args: keywords that tag a ProcessModel

Returns
-------
Decorated class

Examples
--------
@implements(proc=ExampleProcess)
@tag('bit-accurate', 'loihi')
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
class ExampleProcModel(AbstractProcessModel):...
-> these tags identify a particular ProcessModel as being
bit-accurate with Loihi hardware platform. Which means,
the numerical output produced by such a ProcessModel on a CPU would be
exactly same as that of Loihi.
"""

arg_list = list(args)
tags = []

for arg in arg_list:
if isinstance(arg, str):
tags.append(arg)
else:
raise AssertionError("Invalid input to the 'tags' decorator. "
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
"Valid input should be comma-separated "
"keywords as strings")
srrisbud marked this conversation as resolved.
Show resolved Hide resolved

def decorate_process_model(cls: type):
if not issubclass(cls, AbstractProcessModel):
raise AssertionError("Decorated class must be a subclass "
"of 'AbstractProcessModel'.")
# Check existing 'tags' from parent in case of sub-classing
if hasattr(cls, 'tags'):
super_tags = cls.tags.copy()
# Add to the parent's tags
setattr(cls, 'tags', super_tags + tags)
else:
setattr(cls, 'tags', tags)
return cls

return decorate_process_model
107 changes: 97 additions & 10 deletions lava/proc/lif/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,29 +7,116 @@
from lava.magma.core.model.py.ports import PyInPort, PyOutPort
from lava.magma.core.model.py.type import LavaPyType
from lava.magma.core.resources import CPU
from lava.magma.core.decorator import implements, requires
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.model.py.model import PyLoihiProcessModel
from lava.proc.lif.process import LIF


@implements(proc=LIF, protocol=LoihiProtocol)
@requires(CPU)
class PyLifModel(PyLoihiProcessModel):
@tag('floating_pt')
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
class PyLifModel1(PyLoihiProcessModel):
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
a_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.float)
s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, bool, precision=1)
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
u: np.ndarray = LavaPyType(np.ndarray, np.float)
v: np.ndarray = LavaPyType(np.ndarray, np.float)
bias: np.ndarray = LavaPyType(np.ndarray, np.float)
bias_exp: np.ndarray = LavaPyType(np.ndarray, np.float)
du: float = LavaPyType(float, np.float)
dv: float = LavaPyType(float, np.float)
vth: float = LavaPyType(float, np.float)

def run_spk(self):
a_in_data = self.a_in.recv()
self.u[:] = self.u * (1 - self.du)
self.u[:] += a_in_data
bias = self.bias * (2**self.bias_exp)
self.v[:] = self.v * (1 - self.dv) + self.u + bias
s_out = self.v >= self.vth
self.v[s_out] = 0 # Reset voltage to 0
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
self.s_out.send(s_out)


@implements(proc=LIF, protocol=LoihiProtocol)
@requires(CPU)
@tag('bit_accurate_loihi', 'fixed_pt')
class PyLifModel2(PyLoihiProcessModel):
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
"""Implementation of Leaky-Integrate-and-Fire neural process bit-accurate
with Loihi's hardware LIF dynamics, which means, it mimics Loihi
behaviour bit-by-bit.
srrisbud marked this conversation as resolved.
Show resolved Hide resolved

Currently missing features (compared to Loihi hardware):
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
- refractory period after spiking
- axonal delays

Precisions of state variables
-----------------------------
du: unsigned 12-bit integer (0 to 4095)
dv: unsigned 12-bit integer (0 to 4095)
bias: signed 13-bit integer (-4096 to 4095). Mantissa part of neuron bias.
bias_exp: unsigned 3-bit integer (0 to 7). Exponent part of neuron bias.
vth: unsigned 17-bit integer (0 to 131071)
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
"""
a_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int16, precision=16)
s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, bool, precision=1)
u: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
v: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
bias: np.ndarray = LavaPyType(np.ndarray, np.int16, precision=12)
du: int = LavaPyType(int, np.uint16, precision=12)
dv: int = LavaPyType(int, np.uint16, precision=12)
vth: int = LavaPyType(int, int, precision=8)
bias: np.ndarray = LavaPyType(np.ndarray, np.int16, precision=13)
bias_exp: np.ndarray = LavaPyType(np.ndarray, np.int16, precision=3)
vth: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=17)

def __init__(self):
super(PyLifModel2, self).__init__()
self.ds_offset = 1 # hardware specific 1-bit value added to current
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
# decay
self.dm_offset = 0 # hardware specific 1-bit value added to voltage
# decay

def run_spk(self):
self.u[:] = self.u * ((2 ** 12 - self.du) // 2 ** 12)
# Receive synaptic input
a_in_data = self.a_in.recv()
self.u[:] += a_in_data
self.v[:] = self.v * \
((2 ** 12 - self.dv) // 2 ** 12) + self.u + self.bias
s_out = self.v > self.vth
self.v[s_out] = 0 # Reset voltage to 0. This is Loihi-1 compatible.

# Update current
# --------------
decay_const_u = self.du + self.ds_offset
# Below, u is promoted to int64 to avoid over flow of the product
# between u and decay constant beyond int32. Subsequent right shift by
# 12 brings us back within 24-bits (and hence, within 32-bits)
decayed_curr = np.right_shift(np.int64(self.u) * (np.left_shift(1, 12)
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
- decay_const_u), 12,
dtype=np.int32)
# Hardware left-shifts synpatic input for MSB alignment
a_in_data = np.left_shift(a_in_data, 6)
# Add synptic input to decayed current
decayed_curr += a_in_data
# Check if value of current is within bounds
neg_current_limit = -np.left_shift(1, 23, dtype=np.int32)
pos_current_limit = np.left_shift(1, 23, dtype=np.int32) - 1
clipped_curr = np.clip(decayed_curr, neg_current_limit,
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
pos_current_limit)
self.u[:] = clipped_curr
# Update voltage
# --------------
decay_const_v = self.dv + self.dm_offset
neg_voltage_limit = -np.left_shift(1, 23, dtype=np.int32) + 1
pos_voltage_limit = np.left_shift(1, 23, dtype=np.int32) - 1
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
# Decaying voltage similar to current. See the comment above to
# understand the need for each of the operations below.
decayed_volt = np.right_shift(np.int64(self.v) * (np.left_shift(1, 12)
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
- decay_const_v), 12,
dtype=np.int32)
bias = np.left_shift(self.bias, self.bias_exp, dtype=np.int32)
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
updated_volt = decayed_volt + self.u + bias
self.v[:] = np.clip(updated_volt, neg_voltage_limit, pos_voltage_limit)

# Spike when exceeds threshold
# ----------------------------
# In Loihi, user specified threshold is just the mantissa, with a
# constant exponent of 6
vth = np.left_shift(self.vth, 6, dtype=np.int32)
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
s_out = self.v >= vth
# Reset voltage of spiked neurons to 0
self.v[s_out] = 0
self.s_out.send(s_out)
39 changes: 28 additions & 11 deletions lava/proc/lif/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,23 +8,40 @@


class LIF(AbstractProcess):
"""Leaky-Integrate-and-Fire neural process with activation input and spike
output ports a_in and s_out.
"""Leaky-Integrate-and-Fire (LIF) neural Process.

Realizes the following abstract behavior:
u[t] = u[t-1] * (1-du) + a_in
v[t] = v[t-1] * (1-dv) + u[t] + bias
s_out = v[t] > vth
v[t] = v[t] - s_out*vth
LIF dynamics abstracts to:
u[t] = u[t-1] * (1-du) + a_in # neuron current
v[t] = v[t-1] * (1-dv) + u[t] + bias # neuron voltage
s_out = v[t] > vth # spike if threshold is exceeded
v[t] = 0 # reset at spike

Parameters
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
----------
du: Inverse of decay time-constant for current decay.
dv: Inverse of decay time-constant for voltage decay.
bias: Mantissa part of neuron bias.
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
bias_exp: Exponent part of neuron bias, if needed. Mostly for fixed point
implementations. Unnecessary for floating point
implementations. If specified, bias = bias * 2**bias_exp.
vth: Neuron threshold voltage, exceeding which, the neuron will spike.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
shape = kwargs.get("shape", (1,))
du = kwargs.pop("du", 0)
dv = kwargs.pop("dv", 0)
bias = kwargs.pop("bias", 0)
bias_exp = kwargs.pop("bias_exp", 0)
vth = kwargs.pop("vth", 10)

self.shape = shape
self.a_in = InPort(shape=shape)
self.s_out = OutPort(shape=shape)
self.u = Var(shape=shape, init=0)
srrisbud marked this conversation as resolved.
Show resolved Hide resolved
self.v = Var(shape=shape, init=0)
self.du = Var(shape=(1,), init=kwargs.pop("du", 0))
self.dv = Var(shape=(1,), init=kwargs.pop("dv", 0))
self.bias = Var(shape=shape, init=kwargs.pop("b", 0))
self.vth = Var(shape=(1,), init=kwargs.pop("vth", 10))
self.du = Var(shape=(1,), init=du)
self.dv = Var(shape=(1,), init=dv)
self.bias = Var(shape=shape, init=bias)
self.bias_exp = Var(shape=shape, init=bias_exp)
self.vth = Var(shape=(1,), init=vth)
57 changes: 56 additions & 1 deletion tests/magma/core/model/test_decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# See: https://spdx.org/licenses/
import unittest

from lava.magma.core.decorator import implements, requires
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.process.process import AbstractProcess
from lava.magma.core.model.model import AbstractProcessModel
from lava.magma.core.sync.protocol import AbstractSyncProtocol
Expand Down Expand Up @@ -201,6 +201,61 @@ class TestModel(AbstractProcessModel):
def run(self):
pass

def test_tags(self):
"""Checks 'tag' decorator"""
# Define minimal ProcModel and tag it
@tag('keyword1', 'keyword2')
class TestModel(AbstractProcessModel):
def run(self):
pass

self.assertListEqual(TestModel.tags, ['keyword1', 'keyword2'])

def test_tags_subclassing(self):
"""Checks that tags are additive over sub-classing/inheritance"""

# Define minimal ProcModel and tag it with first tag
@tag('loihi-1')
class TestModel(AbstractProcessModel):
def run(self):
pass

self.assertEqual(TestModel.tags, ['loihi-1'])

# Sub classes can add further tags
@tag('hardware')
class SubTestModel(TestModel):
pass

# Sub-classed ProcessModel should inherit parent's tags...
self.assertEqual(SubTestModel.tags,
['loihi-1', 'hardware'])

# ...but does not modify the tags of parent class
self.assertEqual(TestModel.tags, ['loihi-1'])

def test_tags_failing(self):
"""Checks if 'tag' decorator fails appropriately"""

# Only decorating ProcessModels is allowed
with self.assertRaises(AssertionError):
@tag('some-tag')
class SomeClass(AbstractProcess):
pass

# Tags should be just comma-separated keywords
with self.assertRaises(AssertionError):
@tag('keyword1', ['keyword2', 'keyword3'])
class TestModel2(AbstractProcessModel):
def run(self):
pass

# Tags should be just comma-separated keywords
with self.assertRaises(AssertionError):
@tag('tag1', [['tag2'], 'tag4'])
class SomeOtherClass(AbstractProcess):
pass


if __name__ == "__main__":
unittest.main()
5 changes: 5 additions & 0 deletions tests/proc/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

__import__("pkg_resources").declare_namespace(__name__)
5 changes: 5 additions & 0 deletions tests/proc/lif/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

__import__("pkg_resources").declare_namespace(__name__)
Loading