Skip to content

Commit

Permalink
Linting fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
elvinhajizada committed Jul 4, 2023
1 parent 3e4a4fc commit e24f412
Show file tree
Hide file tree
Showing 5 changed files with 60 additions and 47 deletions.
9 changes: 5 additions & 4 deletions src/lava/proc/clp/nsm/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,12 +93,13 @@ def run_spk(self) -> None:
print("Correct")
infer_check = 1
else:
# If the error occurs, trigger allocation by sending an allocation signal
# If the error occurs, trigger allocation by sending an
# allocation signal
print("False")
infer_check = -1
allocation_trigger = True

# If this prototpye has a pseudo-label, then we label it with
# If this prototype has a pseudo-label, then we label it with
# the user-provided label and do not send any feedback (because
# we did not have an actual prediction)

Expand Down Expand Up @@ -131,13 +132,13 @@ class PyAllocatorModel(PyLoihiProcessModel):
def __init__(self, proc_params):
super().__init__(proc_params)
self.n_protos = proc_params['n_protos']
self.next_alloc_id = 0 # The id of the next neuron to be allocated

def run_spk(self) -> None:
# Allocation signal, initialized to a vector of zeros
alloc_signal = np.zeros(shape=self.allocate_out.shape)

# Check the input, if a trigger for allocation is received then we send allocation signal to the next neuron
# Check the input, if a trigger for allocation is received then we
# send allocation signal to the next neuron
allocating = self.trigger_in.recv()[0]
if allocating:
# Choose the specific element of the OutPort to send allocate
Expand Down
19 changes: 11 additions & 8 deletions src/lava/proc/clp/nsm/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,27 +58,30 @@ def __init__(self, *,


class Allocator(AbstractProcess):
""" Allocator process of CLP system. When triggered by other processes it will send a one-hot-encoded allocation
signal to the prototype population, specifically targeting next neuron to be allocated. It holds the reference to
the id of the next neuron to be allocated.
""" Allocator process of CLP system. When triggered by other processes
it will send a one-hot-encoded allocation signal to the prototype
population, specifically targeting next neuron to be allocated. It holds
the reference to the id of the next neuron to be allocated.
Parameters
----------
n_protos : int
n_protos: int
The number of prototypes that this Allocator process can
target. Each time a allocation trigger input is received the next unallocated
prototype will be targeted by the output of the Allocator
process.
target. Each time a allocation trigger input is received the
next unallocated prototype will be targeted by the output of the
Allocator process.
"""

def __init__(self, *,
n_protos: int) -> None:

super().__init__(n_protos=n_protos)

self.trigger_in = InPort(shape=(1,)) # input for triggering allocation
self.allocate_out = OutPort(shape=(n_protos,)) # one-hot-encoded output for allocating specific prototype
# input for triggering allocation
self.trigger_in = InPort(shape=(1,))
# one-hot-encoded output for allocating specific prototype
self.allocate_out = OutPort(shape=(n_protos,))

# The id of the next prototype to be allocated
self.next_alloc_id = Var(shape=(1,), init=0)
58 changes: 30 additions & 28 deletions src/lava/proc/clp/prototype_lif/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@

from lava.magma.core.process.variable import Var
from lava.proc.lif.process import LearningLIF
from lava.magma.core.learning.learning_rule import Loihi2FLearningRule, Loihi3FLearningRule
from lava.magma.core.learning.learning_rule import Loihi2FLearningRule, \
Loihi3FLearningRule
from lava.magma.core.process.process import LogConfig


Expand All @@ -17,35 +18,36 @@ class PrototypeLIF(LearningLIF):
Prototypes (CLP) algorithm. """

def __init__(
self,
*,
shape: ty.Tuple[int, ...],
u: ty.Optional[ty.Union[float, list, np.ndarray]] = 0,
v: ty.Optional[ty.Union[float, list, np.ndarray]] = 0,
du: ty.Optional[float] = 0,
dv: ty.Optional[float] = 0,
bias_mant: ty.Optional[ty.Union[float, list, np.ndarray]] = 0,
bias_exp: ty.Optional[ty.Union[float, list, np.ndarray]] = 0,
vth: ty.Optional[float] = 10,
name: ty.Optional[str] = None,
log_config: ty.Optional[LogConfig] = None,
learning_rule: ty.Union[Loihi2FLearningRule, Loihi3FLearningRule] = None,
lr_init: ty.Optional[float] = 127,
**kwargs,
self,
*,
shape: ty.Tuple[int, ...],
u: ty.Optional[ty.Union[float, list, np.ndarray]] = 0,
v: ty.Optional[ty.Union[float, list, np.ndarray]] = 0,
du: ty.Optional[float] = 0,
dv: ty.Optional[float] = 0,
bias_mant: ty.Optional[ty.Union[float, list, np.ndarray]] = 0,
bias_exp: ty.Optional[ty.Union[float, list, np.ndarray]] = 0,
vth: ty.Optional[float] = 10,
name: ty.Optional[str] = None,
log_config: ty.Optional[LogConfig] = None,
learning_rule: ty.Union[
Loihi2FLearningRule, Loihi3FLearningRule] = None,
lr_init: ty.Optional[float] = 127,
**kwargs,
) -> None:
super().__init__(
shape=shape,
u=u,
v=v,
du=du,
dv=dv,
vth=vth,
bias_mant=bias_mant,
bias_exp=bias_exp,
name=name,
log_config=log_config,
learning_rule=learning_rule,
**kwargs,
shape=shape,
u=u,
v=v,
du=du,
dv=dv,
vth=vth,
bias_mant=bias_mant,
bias_exp=bias_exp,
name=name,
log_config=log_config,
learning_rule=learning_rule,
**kwargs,
)

self.lr = Var(shape=(1,), init=lr_init)
12 changes: 8 additions & 4 deletions tests/lava/proc/clp/integration/novelty_det_prototypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,8 @@ def test_nvl_detection_triggers_one_shot_learning(self):

print(expected_weights)

np.testing.assert_array_almost_equal(expected_weights, result_weights, decimal=0)
np.testing.assert_array_almost_equal(expected_weights, result_weights,
decimal=0)

def test_allocation_triggered_by_erroneous_classification(self):
# General params
Expand Down Expand Up @@ -541,7 +542,8 @@ def test_allocation_triggered_by_erroneous_classification(self):
# Sending y1 spike
prototypes.s_out_y1.connect(dense_proto.s_in_y1)

# Prototype Neurons' outputs connect to the inference input of the Readout process
# Prototype Neurons' outputs connect to the inference input of the
# Readout process
prototypes.s_out.connect(readout.inference_in)

# Label input to the Readout proces
Expand Down Expand Up @@ -575,7 +577,8 @@ def test_allocation_triggered_by_erroneous_classification(self):
result_protos = result_protos[prototypes.name][prototypes.s_out.name].T

result_alloc = monitor_alloc.get_data()
result_alloc = result_alloc[allocator.name][allocator.allocate_out.name].T
result_alloc = result_alloc[allocator.name][
allocator.allocate_out.name].T

print("Readout layer allocation trigger:", result_alloc)

Expand All @@ -592,7 +595,8 @@ def test_allocation_triggered_by_erroneous_classification(self):
expected_alloc[2, 30] = 1

expected_proto_out = np.zeros((n_protos, t_run))
expected_proto_out[0, [9, 24]] = 1 # 1) novelty-based allocation triggered, 2) erroneous prediction
# 1) novelty-based allocation triggered, 2) erroneous prediction
expected_proto_out[0, [9, 24]] = 1
expected_proto_out[1, 19] = 1 # novelty-based allocation triggered
expected_proto_out[2, 30] = 1 # error-based allocation triggerd

Expand Down
9 changes: 6 additions & 3 deletions tests/lava/proc/clp/nsm/test_nsm.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,8 @@ def test_feedback_and_allocation_output(self):
result_fb = result_fb[readout_layer.name][readout_layer.feedback.name].T

result_alloc = monitor_alloc.get_data()
result_alloc = result_alloc[readout_layer.name][readout_layer.trigger_alloc.name].T
result_alloc = result_alloc[readout_layer.name][
readout_layer.trigger_alloc.name].T

infer_in.stop()
# Validate the novelty detection output
Expand All @@ -194,7 +195,8 @@ def test_feedback_and_allocation_output(self):
np.testing.assert_array_equal(result_fb, expected_fb)

expected_alloc = np.zeros(shape=(1, t_run))
expected_alloc[0, 17] = 1 # We expect allocation trigger output when there is a mismatch
# We expect allocation trigger output when there is a mismatch
expected_alloc[0, 17] = 1
np.testing.assert_array_equal(result_alloc, expected_alloc)


Expand Down Expand Up @@ -226,7 +228,8 @@ def test_allocator_output(self):
allocator.run(condition=run_cond, run_cfg=run_cfg)

result_alloc = monitor_alloc.get_data()
result_alloc = result_alloc[allocator.name][allocator.allocate_out.name].T
result_alloc = result_alloc[allocator.name][
allocator.allocate_out.name].T

allocator.stop()
# Validate the allocation output
Expand Down

0 comments on commit e24f412

Please sign in to comment.