Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Synaptic delays for Dense connections #624

Merged
merged 37 commits into from
Feb 17, 2023
Merged
Show file tree
Hide file tree
Changes from 36 commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
f2109ca
Added Delay connection process and process model (floating point).
kds300 May 26, 2022
59d36f5
fixed bug in Delay() process. s_buff was initialized to the size of t…
kds300 May 27, 2022
ccda8c1
Merge branch 'lava-nc:main' into main
kds300 Jun 14, 2022
44291d2
updated docstring for delay process
kds300 Jun 28, 2022
03c2145
Merge branch 'main' of https://github.com/kds300/lava into main
kds300 Jun 28, 2022
d79ddfe
added license information
kds300 Jul 7, 2022
da9f65c
reformatting to improve line lengths
kds300 Jul 7, 2022
0027a02
updating formatting based on linting results
kds300 Jul 7, 2022
18c9752
added unit tests for delay process
kds300 Jul 8, 2022
d06b99d
finished adding unittests and fixed linting errors
kds300 Jul 8, 2022
492a12d
added __init__.py file to the test directory for the delay process
kds300 Jul 12, 2022
50daa27
test_models.py import from tests.lava
mgkwill Jul 12, 2022
1ca8563
Merge branch 'main' into main
mgkwill Jul 13, 2022
2f01a1b
Merge branch 'main' into main
mgkwill Aug 5, 2022
a36cb72
Merge branch 'main' into main
mgkwill Sep 21, 2022
f7ec1dc
Merge branch 'main' into main
mgkwill Sep 26, 2022
37a2409
Added DenseDelay Process class, which inherits from Dense and adds de…
kds300 Sep 27, 2022
acd5970
Merge branch 'main' into main
kds300 Sep 27, 2022
ce26653
Merge branch 'main' into main
mgkwill Sep 27, 2022
426c8ac
Merge branch 'main' into main
kds300 Sep 29, 2022
cf8eaf6
added PyDenseDelayModelFloat, the floating-point implementation for t…
kds300 Oct 25, 2022
f258040
Merge branch 'main' of https://github.com/kds300/lava into main
kds300 Oct 25, 2022
9b9c366
Merge branch 'main' of github.com:lava-nc/lava into lava-nc-main
kds300 Dec 5, 2022
03de565
Merge branch 'lava-nc-main' into main
kds300 Dec 5, 2022
b1c391a
Merge branch 'main' into main
PhilippPlank Feb 9, 2023
a9bf917
Merge branch 'main' into main
PhilippPlank Feb 10, 2023
c6d44f4
small fixes
PhilippPlank Feb 10, 2023
ed0f3ef
Added DelayDense process and process model
PhilippPlank Feb 15, 2023
bd61c97
Merge branch 'lava-nc:main' into main
PhilippPlank Feb 15, 2023
c6838d7
Merge branch 'main' of https://github.com/PhilippPlank/lava into dela…
PhilippPlank Feb 15, 2023
a7e5b7e
fix linting
PhilippPlank Feb 15, 2023
ce2ac72
deleted unused files
PhilippPlank Feb 15, 2023
11f4672
deleted unused files
PhilippPlank Feb 15, 2023
4296936
Merge branch 'main' into delay_dense
PhilippPlank Feb 16, 2023
4dcf585
modification from review suggestions
PhilippPlank Feb 17, 2023
894c61b
modifications from reviewer suggestions
PhilippPlank Feb 17, 2023
c97df21
Merge branch 'main' into delay_dense
bamsumit Feb 17, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
151 changes: 150 additions & 1 deletion src/lava/proc/dense/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from lava.magma.core.resources import CPU
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.model.py.model import PyLoihiProcessModel
from lava.proc.dense.process import Dense, LearningDense
from lava.proc.dense.process import Dense, LearningDense, DelayDense
from lava.utils.weightutils import SignMode, determine_sign_mode,\
truncate_weights, clip_weights

Expand Down Expand Up @@ -188,3 +188,152 @@ def run_spk(self):
)

self.recv_traces(s_in)


class AbstractPyDelayDenseModel(PyLoihiProcessModel):
"""Abstract Conn Process with Dense synaptic connections which incorporates
delays into the Conn Process.
"""

@staticmethod
def get_del_wgts(weights, delays) -> np.ndarray:
"""
Use self.weights and self.delays to create a matrix where the
weights are separated by delay. Returns 2D matrix of form
(num_flat_output_neurons * max_delay + 1, num_flat_input_neurons) where
del_wgts[
k * num_flat_output_neurons : (k + 1) * num_flat_output_neurons, :
]
contains the weights for all connections with a delay equal to k.
This allows for the updating of the activation buffer and updating
weights.
"""
return np.vstack([
np.where(delays == k, weights, 0)
for k in range(np.max(delays) + 1)
])

def calc_act(self, s_in) -> np.ndarray:
"""
Calculate the activations by performing del_wgts * s_in. This matrix
is then summed across each row to get the activations to the output
neurons for different delays. This activation vector is reshaped to a
matrix of the form
(n_flat_output_neurons * (max_delay + 1), n_flat_output_neurons)
which is then transposed to get the activation matrix.
"""
return np.reshape(
np.sum(self.get_del_wgts(self.weights,
self.delays) * s_in, axis=1),
(np.max(self.delays) + 1, self.weights.shape[0])).T

def update_act(self, s_in):
"""
Updates the activations for the connection.
Clears first column of a_buff and rolls them to the last column.
Finally, calculates the activations for the current time step and adds
them to a_buff.
This order of operations ensures that delays of 0 correspond to
the next time step.
"""
self.a_buff[:, 0] = 0
self.a_buff = np.roll(self.a_buff, -1)
self.a_buff += self.calc_act(s_in)


@implements(proc=DelayDense, protocol=LoihiProtocol)
@requires(CPU)
@tag("floating_pt")
class PyDelayDenseModelFloat(AbstractPyDelayDenseModel):
"""Implementation of Conn Process with Dense synaptic connections in
floating point precision. This short and simple ProcessModel can be used
for quick algorithmic prototyping, without engaging with the nuances of a
fixed point implementation. DelayDense incorporates delays into the Conn
Process.
"""
s_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, bool, precision=1)
a_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float)
a_buff: np.ndarray = LavaPyType(np.ndarray, float)
# weights is a 2D matrix of form (num_flat_output_neurons,
# num_flat_input_neurons) in C-order (row major).
weights: np.ndarray = LavaPyType(np.ndarray, float)
# delays is a 2D matrix of form (num_flat_output_neurons,
# num_flat_input_neurons) in C-order (row major).
delays: np.ndarray = LavaPyType(np.ndarray, int)
num_message_bits: np.ndarray = LavaPyType(np.ndarray, int, precision=5)

def run_spk(self):
# The a_out sent on a each timestep is a buffered value from dendritic
# accumulation at timestep t-1. This prevents deadlocking in
# networks with recurrent connectivity structures.
self.a_out.send(self.a_buff[:, 0])
if self.num_message_bits.item() > 0:
s_in = self.s_in.recv()
else:
s_in = self.s_in.recv().astype(bool)
self.update_act(s_in)


@implements(proc=DelayDense, protocol=LoihiProtocol)
@requires(CPU)
@tag("bit_accurate_loihi", "fixed_pt")
class PyDelayDenseModelBitAcc(AbstractPyDelayDenseModel):
"""Implementation of Conn Process with Dense synaptic connections that is
bit-accurate with Loihi's hardware implementation of Dense, which means,
it mimics Loihi behaviour bit-by-bit. DelayDense incorporates delays into
the Conn Process. Loihi 2 has a maximum of 6 bits for delays, meaning a
spike can be delayed by 0 to 63 time steps."""

s_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, bool, precision=1)
a_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32, precision=16)
a_buff: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=16)
# weights is a 2D matrix of form (num_flat_output_neurons,
# num_flat_input_neurons) in C-order (row major).
weights: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=8)
delays: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=6)
num_message_bits: np.ndarray = LavaPyType(np.ndarray, int, precision=5)

def __init__(self, proc_params):
super().__init__(proc_params)
# Flag to determine whether weights have already been scaled.
self.weights_set = False

def run_spk(self):
self.weight_exp: int = self.proc_params.get("weight_exp", 0)

# Since this Process has no learning, weights are assumed to be static
# and only require scaling on the first timestep of run_spk().
if not self.weights_set:
num_weight_bits: int = self.proc_params.get("num_weight_bits", 8)
sign_mode: SignMode = self.proc_params.get("sign_mode") \
or determine_sign_mode(self.weights)

self.weights = clip_weights(self.weights, sign_mode, num_bits=8)
self.weights = truncate_weights(self.weights,
sign_mode,
num_weight_bits)
self.weights_set = True

# Check if delays are within Loihi 2 constraints
if np.max(self.delays) > 63:
raise ValueError("DelayDense Process 'delays' expects values "
f"between 0 and 63 for Loihi, got "
f"{self.delays}.")

# The a_out sent at each timestep is a buffered value from dendritic
# accumulation at timestep t-1. This prevents deadlocking in
# networks with recurrent connectivity structures.
self.a_out.send(self.a_buff[:, 0])
if self.num_message_bits.item() > 0:
s_in = self.s_in.recv()
else:
s_in = self.s_in.recv().astype(bool)

a_accum = self.calc_act(s_in)
self.a_buff[:, 0] = 0
self.a_buff = np.roll(self.a_buff, -1)
self.a_buff += (
np.left_shift(a_accum, self.weight_exp)
if self.weight_exp > 0
else np.right_shift(a_accum, -self.weight_exp)
)
90 changes: 90 additions & 0 deletions src/lava/proc/dense/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,3 +144,93 @@ def __init__(self,
log_config=log_config,
learning_rule=learning_rule,
**kwargs)


class DelayDense(Dense):
PhilippPlank marked this conversation as resolved.
Show resolved Hide resolved
def __init__(self,
*,
weights: np.ndarray,
delays: ty.Union[np.ndarray, int],
max_delay: ty.Optional[int] = 0,
name: ty.Optional[str] = None,
num_message_bits: ty.Optional[int] = 0,
log_config: ty.Optional[LogConfig] = None,
**kwargs) -> None:
"""Dense, delayed connections between neurons. Realizes the following
abstract behavior: a_out = weights * s_in

Parameters
----------
weights : numpy.ndarray
2D connection weight matrix of form (num_flat_output_neurons,
num_flat_input_neurons) in C-order (row major).

delays : numpy.ndarray, int
2D connection delay matrix of form (num_flat_output_neurons,
num_flat_input_neurons) in C-order (row major) or integer value if
the same delay should be used for all synapses.

max_delay: int, optional
Maximum expected delay. Should be set if delays change during
execution. Default value is 0, in this case the maximum delay
will be determined from the values given in 'delays'.

weight_exp : int, optional
Shared weight exponent of base 2 used to scale magnitude of
weights, if needed. Mostly for fixed point implementations.
Unnecessary for floating point implementations.
Default value is 0.

num_weight_bits : int, optional
Shared weight width/precision used by weight. Mostly for fixed
point implementations. Unnecessary for floating point
implementations.
Default is for weights to use full 8 bit precision.

sign_mode : SignMode, optional
Shared indicator whether synapse is of type SignMode.NULL,
SignMode.MIXED, SignMode.EXCITATORY, or SignMode.INHIBITORY. If
SignMode.MIXED, the sign of the weight is
included in the weight bits and the fixed point weight used for
inference is scaled by 2.
Unnecessary for floating point implementations.

In the fixed point implementation, weights are scaled according to
the following equations:
w_scale = 8 - num_weight_bits + weight_exp + isMixed()
weights = weights * (2 ** w_scale)

num_message_bits : int, optional
Determines whether the Dense Process deals with the incoming
spikes as binary spikes (num_message_bits = 0) or as graded
spikes (num_message_bits > 0). Default is 0.
"""

super().__init__(weights=weights,
num_message_bits=num_message_bits,
name=name,
log_config=log_config,
**kwargs)

self._validate_delays(weights, delays)
shape = weights.shape
if max_delay == 0:
max_delay = int(np.max(delays))

# Variables
self.delays = Var(shape=shape, init=delays)
self.a_buff = Var(shape=(shape[0], max_delay + 1) , init=0)
PhilippPlank marked this conversation as resolved.
Show resolved Hide resolved

@staticmethod
def _validate_delays(weights: np.ndarray, delays: np.ndarray) -> None:
PhilippPlank marked this conversation as resolved.
Show resolved Hide resolved
if np.min(delays) < 0:
raise ValueError("DelayDense Process 'delays' expects only "
f"positive values, got {delays}.")
if not isinstance(delays, int):
if np.shape(weights) != np.shape(delays):
raise ValueError("DelayDense Process 'delays' expects same "
f"shape than the weight matrix or int, got "
f"{delays}.")
if delays.dtype != int:
raise ValueError("DelayDense Process 'delays' expects integer "
f"value(s), got {delays}.")
Loading