diff --git a/src/lava/proc/dense/models.py b/src/lava/proc/dense/models.py index 2d7b30502..f3d354263 100644 --- a/src/lava/proc/dense/models.py +++ b/src/lava/proc/dense/models.py @@ -14,7 +14,7 @@ from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires, tag from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.proc.dense.process import Dense, LearningDense +from lava.proc.dense.process import Dense, LearningDense, DelayDense from lava.utils.weightutils import SignMode, determine_sign_mode,\ truncate_weights, clip_weights @@ -188,3 +188,152 @@ def run_spk(self): ) self.recv_traces(s_in) + + +class AbstractPyDelayDenseModel(PyLoihiProcessModel): + """Abstract Conn Process with Dense synaptic connections which incorporates + delays into the Conn Process. + """ + + @staticmethod + def get_del_wgts(weights, delays) -> np.ndarray: + """ + Use self.weights and self.delays to create a matrix where the + weights are separated by delay. Returns 2D matrix of form + (num_flat_output_neurons * max_delay + 1, num_flat_input_neurons) where + del_wgts[ + k * num_flat_output_neurons : (k + 1) * num_flat_output_neurons, : + ] + contains the weights for all connections with a delay equal to k. + This allows for the updating of the activation buffer and updating + weights. + """ + return np.vstack([ + np.where(delays == k, weights, 0) + for k in range(np.max(delays) + 1) + ]) + + def calc_act(self, s_in) -> np.ndarray: + """ + Calculate the activations by performing del_wgts * s_in. This matrix + is then summed across each row to get the activations to the output + neurons for different delays. This activation vector is reshaped to a + matrix of the form + (n_flat_output_neurons * (max_delay + 1), n_flat_output_neurons) + which is then transposed to get the activation matrix. + """ + return np.reshape( + np.sum(self.get_del_wgts(self.weights, + self.delays) * s_in, axis=1), + (np.max(self.delays) + 1, self.weights.shape[0])).T + + def update_act(self, s_in): + """ + Updates the activations for the connection. + Clears first column of a_buff and rolls them to the last column. + Finally, calculates the activations for the current time step and adds + them to a_buff. + This order of operations ensures that delays of 0 correspond to + the next time step. + """ + self.a_buff[:, 0] = 0 + self.a_buff = np.roll(self.a_buff, -1) + self.a_buff += self.calc_act(s_in) + + +@implements(proc=DelayDense, protocol=LoihiProtocol) +@requires(CPU) +@tag("floating_pt") +class PyDelayDenseModelFloat(AbstractPyDelayDenseModel): + """Implementation of Conn Process with Dense synaptic connections in + floating point precision. This short and simple ProcessModel can be used + for quick algorithmic prototyping, without engaging with the nuances of a + fixed point implementation. DelayDense incorporates delays into the Conn + Process. + """ + s_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, bool, precision=1) + a_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float) + a_buff: np.ndarray = LavaPyType(np.ndarray, float) + # weights is a 2D matrix of form (num_flat_output_neurons, + # num_flat_input_neurons) in C-order (row major). + weights: np.ndarray = LavaPyType(np.ndarray, float) + # delays is a 2D matrix of form (num_flat_output_neurons, + # num_flat_input_neurons) in C-order (row major). + delays: np.ndarray = LavaPyType(np.ndarray, int) + num_message_bits: np.ndarray = LavaPyType(np.ndarray, int, precision=5) + + def run_spk(self): + # The a_out sent on a each timestep is a buffered value from dendritic + # accumulation at timestep t-1. This prevents deadlocking in + # networks with recurrent connectivity structures. + self.a_out.send(self.a_buff[:, 0]) + if self.num_message_bits.item() > 0: + s_in = self.s_in.recv() + else: + s_in = self.s_in.recv().astype(bool) + self.update_act(s_in) + + +@implements(proc=DelayDense, protocol=LoihiProtocol) +@requires(CPU) +@tag("bit_accurate_loihi", "fixed_pt") +class PyDelayDenseModelBitAcc(AbstractPyDelayDenseModel): + """Implementation of Conn Process with Dense synaptic connections that is + bit-accurate with Loihi's hardware implementation of Dense, which means, + it mimics Loihi behaviour bit-by-bit. DelayDense incorporates delays into + the Conn Process. Loihi 2 has a maximum of 6 bits for delays, meaning a + spike can be delayed by 0 to 63 time steps.""" + + s_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, bool, precision=1) + a_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32, precision=16) + a_buff: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=16) + # weights is a 2D matrix of form (num_flat_output_neurons, + # num_flat_input_neurons) in C-order (row major). + weights: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=8) + delays: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=6) + num_message_bits: np.ndarray = LavaPyType(np.ndarray, int, precision=5) + + def __init__(self, proc_params): + super().__init__(proc_params) + # Flag to determine whether weights have already been scaled. + self.weights_set = False + + def run_spk(self): + self.weight_exp: int = self.proc_params.get("weight_exp", 0) + + # Since this Process has no learning, weights are assumed to be static + # and only require scaling on the first timestep of run_spk(). + if not self.weights_set: + num_weight_bits: int = self.proc_params.get("num_weight_bits", 8) + sign_mode: SignMode = self.proc_params.get("sign_mode") \ + or determine_sign_mode(self.weights) + + self.weights = clip_weights(self.weights, sign_mode, num_bits=8) + self.weights = truncate_weights(self.weights, + sign_mode, + num_weight_bits) + self.weights_set = True + + # Check if delays are within Loihi 2 constraints + if np.max(self.delays) > 63: + raise ValueError("DelayDense Process 'delays' expects values " + f"between 0 and 63 for Loihi, got " + f"{self.delays}.") + + # The a_out sent at each timestep is a buffered value from dendritic + # accumulation at timestep t-1. This prevents deadlocking in + # networks with recurrent connectivity structures. + self.a_out.send(self.a_buff[:, 0]) + if self.num_message_bits.item() > 0: + s_in = self.s_in.recv() + else: + s_in = self.s_in.recv().astype(bool) + + a_accum = self.calc_act(s_in) + self.a_buff[:, 0] = 0 + self.a_buff = np.roll(self.a_buff, -1) + self.a_buff += ( + np.left_shift(a_accum, self.weight_exp) + if self.weight_exp > 0 + else np.right_shift(a_accum, -self.weight_exp) + ) diff --git a/src/lava/proc/dense/process.py b/src/lava/proc/dense/process.py index b160110f6..d80b5d2fc 100644 --- a/src/lava/proc/dense/process.py +++ b/src/lava/proc/dense/process.py @@ -144,3 +144,93 @@ def __init__(self, log_config=log_config, learning_rule=learning_rule, **kwargs) + + +class DelayDense(Dense): + def __init__(self, + *, + weights: np.ndarray, + delays: ty.Union[np.ndarray, int], + max_delay: ty.Optional[int] = 0, + name: ty.Optional[str] = None, + num_message_bits: ty.Optional[int] = 0, + log_config: ty.Optional[LogConfig] = None, + **kwargs) -> None: + """Dense, delayed connections between neurons. Realizes the following + abstract behavior: a_out = weights * s_in + + Parameters + ---------- + weights : numpy.ndarray + 2D connection weight matrix of form (num_flat_output_neurons, + num_flat_input_neurons) in C-order (row major). + + delays : numpy.ndarray, int + 2D connection delay matrix of form (num_flat_output_neurons, + num_flat_input_neurons) in C-order (row major) or integer value if + the same delay should be used for all synapses. + + max_delay: int, optional + Maximum expected delay. Should be set if delays change during + execution. Default value is 0, in this case the maximum delay + will be determined from the values given in 'delays'. + + weight_exp : int, optional + Shared weight exponent of base 2 used to scale magnitude of + weights, if needed. Mostly for fixed point implementations. + Unnecessary for floating point implementations. + Default value is 0. + + num_weight_bits : int, optional + Shared weight width/precision used by weight. Mostly for fixed + point implementations. Unnecessary for floating point + implementations. + Default is for weights to use full 8 bit precision. + + sign_mode : SignMode, optional + Shared indicator whether synapse is of type SignMode.NULL, + SignMode.MIXED, SignMode.EXCITATORY, or SignMode.INHIBITORY. If + SignMode.MIXED, the sign of the weight is + included in the weight bits and the fixed point weight used for + inference is scaled by 2. + Unnecessary for floating point implementations. + + In the fixed point implementation, weights are scaled according to + the following equations: + w_scale = 8 - num_weight_bits + weight_exp + isMixed() + weights = weights * (2 ** w_scale) + + num_message_bits : int, optional + Determines whether the Dense Process deals with the incoming + spikes as binary spikes (num_message_bits = 0) or as graded + spikes (num_message_bits > 0). Default is 0. + """ + + super().__init__(weights=weights, + num_message_bits=num_message_bits, + name=name, + log_config=log_config, + **kwargs) + + self._validate_delays(weights, delays) + shape = weights.shape + if max_delay == 0: + max_delay = int(np.max(delays)) + + # Variables + self.delays = Var(shape=shape, init=delays) + self.a_buff = Var(shape=(shape[0], max_delay + 1) , init=0) + + @staticmethod + def _validate_delays(weights: np.ndarray, delays: np.ndarray) -> None: + if np.min(delays) < 0: + raise ValueError("DelayDense Process 'delays' expects only " + f"positive values, got {delays}.") + if not isinstance(delays, int): + if np.shape(weights) != np.shape(delays): + raise ValueError("DelayDense Process 'delays' expects same " + f"shape than the weight matrix or int, got " + f"{delays}.") + if delays.dtype != int: + raise ValueError("DelayDense Process 'delays' expects integer " + f"value(s), got {delays}.") diff --git a/tests/lava/proc/dense/test_models.py b/tests/lava/proc/dense/test_models.py index 2613ce571..dd97a6adb 100644 --- a/tests/lava/proc/dense/test_models.py +++ b/tests/lava/proc/dense/test_models.py @@ -15,8 +15,9 @@ from lava.magma.core.run_configs import RunConfig from lava.magma.core.run_conditions import RunSteps from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.proc.dense.process import Dense +from lava.proc.dense.process import Dense, DelayDense from lava.utils.weightutils import SignMode +from lava.proc.dense.models import AbstractPyDelayDenseModel class DenseRunConfig(RunConfig): @@ -553,3 +554,504 @@ def test_bitacc_pm_recurrence(self): rcfg = DenseRunConfig(select_tag='fixed_pt') dense.run(condition=rcnd, run_cfg=rcfg) dense.stop() + + +class TestDelayDenseProcessModel(unittest.TestCase): + """Tests for ProcessModels of Dense with synaptic delay.""" + + def test_matrix_weight_delay_expansion(self): + """""" + shape = (3, 4) + weights = np.zeros(shape, dtype=float) + weights[2, 2] = 1 + delays = np.zeros(shape, dtype=int) + delays[2, 2] = 2 + max_delay = np.max(delays) + wgt_dly = AbstractPyDelayDenseModel.get_del_wgts(weights, delays) + # Expected shape is maximum delay=2 + 1 = 3 times first dimension of + # original shape (3, 4) => (9, 4) + expected_shape = ((max_delay + 1) * 3, 4) + self.assertTrue(np.shape(wgt_dly) == expected_shape) + # Expected matrix stacks n zero matrices of original shape of the + # weights matrix vertically to create the wgt_dly matrix. n being the + # maximum delay in the delay matrix. + expected_wgt_dly = np.zeros(expected_shape) + expected_wgt_dly[8, 2] = 1 + self.assertTrue(np.all(wgt_dly == expected_wgt_dly)) + + def test_float_pm_buffer_delay(self): + """Tests floating point Dense ProcessModel connectivity and temporal + dynamics. All input 'neurons' from the VecSendandRcv fire + once at time t=4, and only 1 connection weight + in the Dense Process is non-zero. The value of the delay matrix for + this weight is 2. The non-zero connection should have an activation of + 1 at timestep t=7. + """ + shape = (3, 4) + num_steps = 8 + # Set up external input to emulate every neuron spiking once on + # timestep 4 + vec_to_send = np.ones((shape[1],), dtype=float) + send_at_times = np.repeat(False, (num_steps,)) + send_at_times[3] = True + sps = VecSendandRecvProcess(shape=(shape[1],), num_steps=num_steps, + vec_to_send=vec_to_send, + send_at_times=send_at_times) + # Set up Dense Process with a single non-zero connection weight at + # entry [2, 2] of the connectivity matrix and a delay of 2 at entry + # [2, 2] in the delay matrix. + weights = np.zeros(shape, dtype=float) + weights[2, 2] = 1 + delays = np.zeros(shape, dtype=int) + delays[2, 2] = 2 + dense = DelayDense(weights=weights, delays=delays) + # Receive neuron spikes + spr = VecRecvProcess(shape=(num_steps, shape[0])) + sps.s_out.connect(dense.s_in) + dense.a_out.connect(spr.s_in) + # Configure execution and run + rcnd = RunSteps(num_steps=num_steps) + rcfg = DenseRunConfig(select_tag='floating_pt') + dense.run(condition=rcnd, run_cfg=rcfg) + # Gather spike data and stop + spk_data_through_run = spr.spk_data.get() + dense.stop() + # Gold standard for the test + # a_out will be equal to 1 at timestep 7, because the dendritic + # accumulators work on inputs from the previous timestep + 2. + expected_spk_data = np.zeros((num_steps, shape[0])) + expected_spk_data[6, 2] = 1. + self.assertTrue(np.all(expected_spk_data == spk_data_through_run)) + + def test_float_pm_fan_in_delay(self): + """ + Tests floating point Dense ProcessModel dendritic accumulation + behavior when the fan-in to a receiving neuron is greater than 1 + and synaptic delays are configured. + """ + shape = (3, 4) + num_steps = 10 + # Set up external input to emulate every neuron spiking once on + # timestep 4 + vec_to_send = np.ones((shape[1],), dtype=float) + send_at_times = np.repeat(False, (num_steps,)) + send_at_times[3] = True + sps = VecSendandRecvProcess(shape=(shape[1],), num_steps=num_steps, + vec_to_send=vec_to_send, + send_at_times=send_at_times) + # Set up a Dense Process where all input layer neurons project to a + # single output layer neuron with varying delays. + weights = np.zeros(shape, dtype=float) + weights[2, :] = [2, -3, 4, -5] + delays = np.zeros(shape, dtype=int) + delays[2, :] = [1, 2, 2, 4] + dense = DelayDense(weights=weights, delays=delays) + # Receive neuron spikes + spr = VecRecvProcess(shape=(num_steps, shape[0])) + sps.s_out.connect(dense.s_in) + dense.a_out.connect(spr.s_in) + # Configure execution and run + rcnd = RunSteps(num_steps=num_steps) + rcfg = DenseRunConfig(select_tag='floating_pt') + dense.run(condition=rcnd, run_cfg=rcfg) + # Gather spike data and stop + spk_data_through_run = spr.spk_data.get() + dense.stop() + # Gold standard for the test + # Expected behavior is that a_out corresponding to output + # neuron 3 will be equal to 2 at timestep 6, 1=-3+4 at timestep 7 and + # -5 at timestep 9 + expected_spk_data = np.zeros((num_steps, shape[0])) + expected_spk_data[5, 2] = 2 + expected_spk_data[6, 2] = 1 + expected_spk_data[8, 2] = -5 + self.assertTrue(np.all(expected_spk_data == spk_data_through_run)) + + def test_float_pm_fan_out_delay(self): + """ + Tests floating point Dense ProcessModel dendritic accumulation + behavior when the fan-out of a projecting neuron is greater than 1 + and synaptic delays are configured. + """ + shape = (3, 4) + num_steps = 8 + # Set up external input to emulate every neuron spiking once on + # timestep t=4. + vec_to_send = np.ones((shape[1],), dtype=float) + send_at_times = np.repeat(False, (num_steps,)) + send_at_times[3] = True + sps = VecSendandRecvProcess(shape=(shape[1],), num_steps=num_steps, + vec_to_send=vec_to_send, + send_at_times=send_at_times) + # Set up a Dense Process where a single input layer neuron projects to + # all output layer neurons with a delay of 2 for all synapses. + weights = np.zeros(shape, dtype=float) + weights[:, 2] = [3, 4, 5] + delays = np.zeros(shape, dtype=int) + delays = 2 + dense = DelayDense(weights=weights, delays=delays) + # Receive neuron spikes + spr = VecRecvProcess(shape=(num_steps, shape[0])) + sps.s_out.connect(dense.s_in) + dense.a_out.connect(spr.s_in) + # Configure execution and run + rcnd = RunSteps(num_steps=num_steps) + rcfg = DenseRunConfig(select_tag='floating_pt') + dense.run(condition=rcnd, run_cfg=rcfg) + # Gather spike data and stop + spk_data_through_run = spr.spk_data.get() + dense.stop() + # Gold standard for the test + # Expected behavior is that a_out corresponding to output + # neurons 1-3 will be equal to 3, 4, and 5, respectively, at timestep 7. + expected_spk_data = np.zeros((num_steps, shape[0])) + expected_spk_data[6, :] = [3, 4, 5] + self.assertTrue(np.all(expected_spk_data == spk_data_through_run)) + + def test_float_pm_fan_out_delay_2(self): + """ + Tests floating point Dense ProcessModel dendritic accumulation + behavior when the fan-out of a projecting neuron is greater than 1 + and synaptic delays are configured. + """ + shape = (3, 4) + num_steps = 8 + # Set up external input to emulate every neuron spiking once on + # timestep t=4. + vec_to_send = np.ones((shape[1],), dtype=float) + send_at_times = np.repeat(False, (num_steps,)) + send_at_times[3] = True + sps = VecSendandRecvProcess(shape=(shape[1],), num_steps=num_steps, + vec_to_send=vec_to_send, + send_at_times=send_at_times) + # Set up a Dense Process where a single input layer neuron projects to + # all output layer neurons with varying delays. + weights = np.zeros(shape, dtype=float) + weights[:, 2] = [3, 4, 5] + delays = np.zeros(shape, dtype=int) + delays[:, 2] = [0, 1, 2] + dense = DelayDense(weights=weights, delays=delays) + # Receive neuron spikes + spr = VecRecvProcess(shape=(num_steps, shape[0])) + sps.s_out.connect(dense.s_in) + dense.a_out.connect(spr.s_in) + # Configure execution and run + rcnd = RunSteps(num_steps=num_steps) + rcfg = DenseRunConfig(select_tag='floating_pt') + dense.run(condition=rcnd, run_cfg=rcfg) + # Gather spike data and stop + spk_data_through_run = spr.spk_data.get() + dense.stop() + # Gold standard for the test + # Expected behavior is that a_out corresponding to output + # neurons 1-3 will be equal to 3, 4, and 5, respectively, at timestep + # 5, 6 and 7, respectively. + expected_spk_data = np.zeros((num_steps, shape[0])) + expected_spk_data[4, 0] = 3 + expected_spk_data[5, 1] = 4 + expected_spk_data[6, 2] = 5 + self.assertTrue(np.all(expected_spk_data == spk_data_through_run)) + + def test_float_pm_recurrence_delays(self): + """ + Tests that floating Dense ProcessModel has non-blocking dynamics for + recurrent connectivity architectures and synaptic delays are + configured. + """ + shape = (3, 3) + num_steps = 8 + # Set up external input to emulate every neuron spiking once on + # timestep 4. + vec_to_send = np.ones((shape[1],), dtype=float) + send_at_times = np.repeat(True, (num_steps,)) + sps = VecSendandRecvProcess(shape=(shape[1],), num_steps=num_steps, + vec_to_send=vec_to_send, + send_at_times=send_at_times) + # Set up Dense Process with fully connected recurrent connectivity + # architecture + weights = np.ones(shape, dtype=float) + delays = np.zeros(shape, dtype=int) + delays = 2 + dense = DelayDense(weights=weights, delays=delays) + # Receive neuron spikes + sps.s_out.connect(dense.s_in) + dense.a_out.connect(sps.a_in) + # Configure execution and run + rcnd = RunSteps(num_steps=num_steps) + rcfg = DenseRunConfig(select_tag='floating_pt') + dense.run(condition=rcnd, run_cfg=rcfg) + dense.stop() + + def test_bitacc_pm_fan_out_excitatory_delay(self): + """ + Tests fixed-point Dense ProcessModel dendritic accumulation + behavior when the fan-out of a projecting neuron is greater than 1 + and all connections are excitatory (sign_mode = 2) and synaptic delays + are configured. + """ + shape = (3, 4) + num_steps = 8 + # Set up external input to emulate every neuron spiking once on + # timestep 4. + vec_to_send = np.ones((shape[1],), dtype=float) + send_at_times = np.repeat(False, (num_steps,)) + send_at_times[3] = True + sps = VecSendandRecvProcess(shape=(shape[1],), num_steps=num_steps, + vec_to_send=vec_to_send, + send_at_times=send_at_times) + # Set up Dense Process in which a single input neuron projects to all + # output neurons. + weights = np.zeros(shape, dtype=float) + weights[:, 2] = [0.5, 300, 40] + delays = np.zeros(shape, dtype=int) + delays[:, 2] = [0, 1, 2] + dense = DelayDense(weights=weights, delays=delays, + sign_mode=SignMode.EXCITATORY) + # Receive neuron spikes + spr = VecRecvProcess(shape=(num_steps, shape[0])) + sps.s_out.connect(dense.s_in) + dense.a_out.connect(spr.s_in) + # Configure execution and run + rcnd = RunSteps(num_steps=num_steps) + rcfg = DenseRunConfig(select_tag='fixed_pt') + dense.run(condition=rcnd, run_cfg=rcfg) + # Gather spike data and stop + spk_data_through_run = spr.spk_data.get() + dense.stop() + # Gold standard for the test + # Expected behavior is that a_out corresponding to output + # neurons 1-3 will be equal to 0, 255, and 40, respectively, + # at timestep 5, 6 and 7, because a_out can only have integer values + # between 0 and 255 and we have a delay of 0, 1, 2 on the synapses, + # respectively. + expected_spk_data = np.zeros((num_steps, shape[0])) + expected_spk_data[4, 0] = 0 + expected_spk_data[5, 1] = 255 + expected_spk_data[6, 2] = 40 + self.assertTrue(np.all(expected_spk_data == spk_data_through_run)) + + def test_bitacc_pm_fan_out_mixed_sign_delay(self): + """ + Tests fixed-point Dense ProcessModel dendritic accumulation + behavior when the fan-out of a projecting neuron is greater than 1 + and connections are both excitatory and inhibitory (sign_mode = 1). + When using mixed sign weights and full 8 bit weight precision, + a_out can take even values from -256 to 254. A delay of 2 for all + synapses is configured. + """ + shape = (3, 4) + num_steps = 8 + # Set up external input to emulate every neuron spiking once on + # timestep 4. + vec_to_send = np.ones((shape[1],), dtype=float) + send_at_times = np.repeat(False, (num_steps,)) + send_at_times[3] = True + sps = VecSendandRecvProcess(shape=(shape[1],), num_steps=num_steps, + vec_to_send=vec_to_send, + send_at_times=send_at_times) + # Set up Dense Process in which a single input neuron projects to all + # output neurons with both excitatory and inhibitory weights. + weights = np.zeros(shape, dtype=float) + weights[:, 2] = [300, -300, 39] + delays = np.zeros(shape, dtype=int) + delays = 2 + dense = DelayDense(weights=weights, delays=delays, + sign_mode=SignMode.MIXED) + # Receive neuron spikes + spr = VecRecvProcess(shape=(num_steps, shape[0])) + sps.s_out.connect(dense.s_in) + dense.a_out.connect(spr.s_in) + # Configure execution and run + rcnd = RunSteps(num_steps=num_steps) + rcfg = DenseRunConfig(select_tag='fixed_pt') + dense.run(condition=rcnd, run_cfg=rcfg) + # Gather spike data and stop + spk_data_through_run = spr.spk_data.get() + dense.stop() + # Gold standard for the test + # Expected behavior is that a_out corresponding to output + # neurons 1-3 will be equal to 254, -256, and 38, respectively, + # at timestep 7, because a_out can only have even values between -256 + # and 254 and a delay of 2 is configured. + expected_spk_data = np.zeros((num_steps, shape[0])) + expected_spk_data[6, :] = [254, -256, 38] + self.assertTrue(np.all(expected_spk_data == spk_data_through_run)) + + def test_bitacc_pm_fan_out_weight_exp_delay(self): + """ + Tests fixed-point Dense ProcessModel dendritic accumulation + behavior when the fan-out of a projecting neuron is greater than 1 + , connections are both excitatory and inhibitory (sign_mode = 1), + and weight_exp = 1. + When using mixed sign weights, full 8 bit weight precision, + and weight_exp = 1, a_out can take even values from -512 to 508. + As a result of setting weight_exp = 1, the expected a_out result is 2x + that of the previous unit test. A delay of 0, 1, 2 is configured for + respective synapses. + """ + + shape = (3, 4) + num_steps = 8 + # Set up external input to emulate every neuron spiking once on + # timestep 4. + vec_to_send = np.ones((shape[1],), dtype=float) + send_at_times = np.repeat(False, (num_steps,)) + send_at_times[3] = True + sps = VecSendandRecvProcess(shape=(shape[1],), num_steps=num_steps, + vec_to_send=vec_to_send, + send_at_times=send_at_times) + # Set up Dense Process in which all input neurons project to a single + # output neuron with mixed sign connection weights. + weights = np.zeros(shape, dtype=float) + weights[:, 2] = [300, -300, 39] + delays = np.zeros(shape, dtype=int) + delays[:, 2] = [0, 1, 2] + # Set weight_exp = 1. This affects weight scaling. + dense = DelayDense(weights=weights, weight_exp=1, delays=delays) + # Receive neuron spikes + spr = VecRecvProcess(shape=(num_steps, shape[0])) + sps.s_out.connect(dense.s_in) + dense.a_out.connect(spr.s_in) + # Configure execution and run + rcnd = RunSteps(num_steps=num_steps) + rcfg = DenseRunConfig(select_tag='fixed_pt') + dense.run(condition=rcnd, run_cfg=rcfg) + # Gather spike data and stop + spk_data_through_run = spr.spk_data.get() + dense.stop() + # Gold standard for the test + # Expected behavior is that a_out corresponding to output + # neurons 1-3 will be equal to 508, -512, and 76, respectively, + # at timestep 5, 6, and 7, respectively, because a_out can only + # have values between -512 and 508 such that a_out % 4 = 0. + expected_spk_data = np.zeros((num_steps, shape[0])) + expected_spk_data[4, 0] = 508 + expected_spk_data[5, 1] = -512 + expected_spk_data[6, 2] = 76 + self.assertTrue(np.all(expected_spk_data == spk_data_through_run)) + + def test_bitacc_pm_fan_out_weight_precision_delay(self): + """ + Tests fixed-point Dense ProcessModel dendritic accumulation + behavior when the fan-out of a projecting neuron is greater than 1 + , connections are both excitatory and inhibitory (sign_mode = 1), + and num_weight_bits = 7. + When using mixed sign weights and 7 bit weight precision, + a_out can take values from -256 to 252 such that a_out % 4 = 0. + All synapses have a delay of 2 configured. + """ + + shape = (3, 4) + num_steps = 8 + # Set up external input to emulate every neuron spiking once on + # timestep 4. + vec_to_send = np.ones((shape[1],), dtype=float) + send_at_times = np.repeat(False, (num_steps,)) + send_at_times[3] = True + sps = VecSendandRecvProcess(shape=(shape[1],), num_steps=num_steps, + vec_to_send=vec_to_send, + send_at_times=send_at_times) + # Set up Dense Process in which all input neurons project to a single + # output neuron with mixed sign connection weights. + weights = np.zeros(shape, dtype=float) + weights[:, 2] = [300, -300, 39] + delays = np.zeros(shape, dtype=int) + delays = 2 + # Set num_weight_bits = 7. This affects weight scaling. + dense = DelayDense(weights=weights, num_weight_bits=7, delays=delays) + # Receive neuron spikes + spr = VecRecvProcess(shape=(num_steps, shape[0])) + sps.s_out.connect(dense.s_in) + dense.a_out.connect(spr.s_in) + # Configure execution and run + rcnd = RunSteps(num_steps=num_steps) + rcfg = DenseRunConfig(select_tag='fixed_pt') + dense.run(condition=rcnd, run_cfg=rcfg) + # Gather spike data and stop + spk_data_through_run = spr.spk_data.get() + dense.stop() + # Gold standard for the test + # Expected behavior is that a_out corresponding to output + # neurons 1-3 will be equal to 252, -256, and 36, respectively, + # at timestep 7, because a_out can only have values between -256 + # and 252 such that a_out % 4 = 0. + expected_spk_data = np.zeros((num_steps, shape[0])) + expected_spk_data[6, :] = [252, -256, 36] + self.assertTrue(np.all(expected_spk_data == spk_data_through_run)) + + def test_bitacc_pm_fan_in_mixed_sign_delay(self): + """ + Tests fixed-point Dense ProcessModel dendritic accumulation + behavior when the fan-in of a receiving neuron is greater than 1 + and connections are both excitatory and inhibitory (sign_mode = 1). + When using mixed sign weights and full 8 bit weight precision, + a_out can take even values from -256 to 254. All synapses have a + delay of 2 configured. + """ + shape = (3, 4) + num_steps = 8 + # Set up external input to emulate every neuron spiking once on + # timestep 4. + vec_to_send = np.ones((shape[1],), dtype=float) + send_at_times = np.repeat(False, (num_steps,)) + send_at_times[3] = True + sps = VecSendandRecvProcess(shape=(shape[1],), num_steps=num_steps, + vec_to_send=vec_to_send, + send_at_times=send_at_times) + # Set up Dense Process in which all input layer neurons project to a + # single output layer neuron with both excitatory and inhibitory + # weights. + weights = np.zeros(shape, dtype=float) + weights[2, :] = [300, -300, 39, -0.4] + delays = np.zeros(shape, dtype=int) + delays = 2 + dense = DelayDense(weights=weights, sign_mode=SignMode.MIXED, + delays=delays) + # Receive neuron spikes + spr = VecRecvProcess(shape=(num_steps, shape[0])) + sps.s_out.connect(dense.s_in) + dense.a_out.connect(spr.s_in) + # Configure execution and run + rcnd = RunSteps(num_steps=num_steps) + rcfg = DenseRunConfig(select_tag='fixed_pt') + dense.run(condition=rcnd, run_cfg=rcfg) + # Gather spike data and stop + spk_data_through_run = spr.spk_data.get() + dense.stop() + # Gold standard for the test + # Expected behavior is that a_out corresponding to output + # neuron 3 will be equal to 36=254-256+38-0 at timestep 7, because + # weights can only have even values between -256 and 254. + expected_spk_data = np.zeros((num_steps, shape[0])) + expected_spk_data[6, 2] = 36 + self.assertTrue(np.all(expected_spk_data == spk_data_through_run)) + + def test_bitacc_pm_recurrence_delay(self): + """ + Tests that bit accurate Dense ProcessModel has non-blocking dynamics for + recurrent connectivity architectures. All synapses have a delay of 2 + configured. + """ + shape = (3, 3) + num_steps = 6 + # Set up external input to emulate every neuron spiking once on + # timestep 4. + vec_to_send = np.ones((shape[1],), dtype=float) + send_at_times = np.repeat(True, (num_steps,)) + sps = VecSendandRecvProcess(shape=(shape[1],), num_steps=num_steps, + vec_to_send=vec_to_send, + send_at_times=send_at_times) + # Set up Dense Process with fully connected recurrent connectivity + # architecture. + weights = np.ones(shape, dtype=float) + delays = np.zeros(shape, dtype=int) + delays = 2 + dense = DelayDense(weights=weights, delays=delays) + # Receive neuron spikes + sps.s_out.connect(dense.s_in) + dense.a_out.connect(sps.a_in) + # Configure execution and run + rcnd = RunSteps(num_steps=num_steps) + rcfg = DenseRunConfig(select_tag='fixed_pt') + dense.run(condition=rcnd, run_cfg=rcfg) + dense.stop() diff --git a/tests/lava/proc/dense/test_process.py b/tests/lava/proc/dense/test_process.py index ba0772d90..569766209 100644 --- a/tests/lava/proc/dense/test_process.py +++ b/tests/lava/proc/dense/test_process.py @@ -5,7 +5,7 @@ import unittest import numpy as np -from lava.proc.dense.process import Dense, LearningDense +from lava.proc.dense.process import Dense, LearningDense, DelayDense from lava.proc.learning_rules.stdp_learning_rule import STDPLoihi @@ -49,3 +49,55 @@ def test_init(self): self.assertEqual(np.shape(conn.weights.init), shape) np.testing.assert_array_equal(conn.weights.init, weights) + + +class TestDelayDenseProcess(unittest.TestCase): + """Tests for DelayDense class""" + + def test_init(self): + """Tests instantiation of DelayDense""" + shape = (100, 200) + weights = np.random.randint(100, size=shape) + delays = np.random.randint(10, size=shape) + + conn = DelayDense(weights=weights, delays=delays) + + self.assertEqual(np.shape(conn.weights.init), shape) + np.testing.assert_array_equal(conn.weights.init, weights) + np.testing.assert_array_equal(conn.delays.init, delays) + + def test_init_max_delay(self): + """Tests that the parameter 'max_delay' creates an appropriate buffer + 'a_buff'. If 'max_delay'=15 and 'delays'=5, the dimension of a_buff + should be [: 15+1]. + """ + shape = (100, 200) + weights = np.random.randint(100, size=shape) + delays = 5 + max_delay = 15 + expected_a_buff_shape = (shape[0], max_delay + 1) + + conn = DelayDense(weights=weights, delays=delays, max_delay=max_delay) + + self.assertEqual(np.shape(conn.weights.init), shape) + np.testing.assert_array_equal(conn.weights.init, weights) + np.testing.assert_array_equal(conn.delays.init, delays) + np.testing.assert_array_equal(conn.a_buff.shape, expected_a_buff_shape) + + def test_input_validation_delays(self): + """Tests input validation on the dimensions and values of 'delays'. + (Must be 2D and positive values.)""" + weights = np.random.randint(100, size=(2, 4)) + delays = np.random.randint(10, size=(3, 4)) + + with self.assertRaises(ValueError): + DelayDense(weights=weights, delays=delays) + delays = -1 + with self.assertRaises(ValueError): + DelayDense(weights=weights, delays=delays) + delays = 1.2 + with self.assertRaises(ValueError): + DelayDense(weights=weights, delays=delays) + delays = np.random.rand(3, 4) + with self.assertRaises(ValueError): + DelayDense(weights=weights, delays=delays)