From e24f412b80ad591e017387acff5987f526ca7a4b Mon Sep 17 00:00:00 2001 From: Elvin Hajizada Date: Tue, 4 Jul 2023 13:03:58 +0200 Subject: [PATCH] Linting fixes --- src/lava/proc/clp/nsm/models.py | 9 +-- src/lava/proc/clp/nsm/process.py | 19 +++--- src/lava/proc/clp/prototype_lif/process.py | 58 ++++++++++--------- .../clp/integration/novelty_det_prototypes.py | 12 ++-- tests/lava/proc/clp/nsm/test_nsm.py | 9 ++- 5 files changed, 60 insertions(+), 47 deletions(-) diff --git a/src/lava/proc/clp/nsm/models.py b/src/lava/proc/clp/nsm/models.py index 77cdae4ad..3a0a2c2a0 100644 --- a/src/lava/proc/clp/nsm/models.py +++ b/src/lava/proc/clp/nsm/models.py @@ -93,12 +93,13 @@ def run_spk(self) -> None: print("Correct") infer_check = 1 else: - # If the error occurs, trigger allocation by sending an allocation signal + # If the error occurs, trigger allocation by sending an + # allocation signal print("False") infer_check = -1 allocation_trigger = True - # If this prototpye has a pseudo-label, then we label it with + # If this prototype has a pseudo-label, then we label it with # the user-provided label and do not send any feedback (because # we did not have an actual prediction) @@ -131,13 +132,13 @@ class PyAllocatorModel(PyLoihiProcessModel): def __init__(self, proc_params): super().__init__(proc_params) self.n_protos = proc_params['n_protos'] - self.next_alloc_id = 0 # The id of the next neuron to be allocated def run_spk(self) -> None: # Allocation signal, initialized to a vector of zeros alloc_signal = np.zeros(shape=self.allocate_out.shape) - # Check the input, if a trigger for allocation is received then we send allocation signal to the next neuron + # Check the input, if a trigger for allocation is received then we + # send allocation signal to the next neuron allocating = self.trigger_in.recv()[0] if allocating: # Choose the specific element of the OutPort to send allocate diff --git a/src/lava/proc/clp/nsm/process.py b/src/lava/proc/clp/nsm/process.py index 0710ae936..b19da39fd 100644 --- a/src/lava/proc/clp/nsm/process.py +++ b/src/lava/proc/clp/nsm/process.py @@ -58,18 +58,19 @@ def __init__(self, *, class Allocator(AbstractProcess): - """ Allocator process of CLP system. When triggered by other processes it will send a one-hot-encoded allocation - signal to the prototype population, specifically targeting next neuron to be allocated. It holds the reference to - the id of the next neuron to be allocated. + """ Allocator process of CLP system. When triggered by other processes + it will send a one-hot-encoded allocation signal to the prototype + population, specifically targeting next neuron to be allocated. It holds + the reference to the id of the next neuron to be allocated. Parameters ---------- n_protos : int n_protos: int The number of prototypes that this Allocator process can - target. Each time a allocation trigger input is received the next unallocated - prototype will be targeted by the output of the Allocator - process. + target. Each time a allocation trigger input is received the + next unallocated prototype will be targeted by the output of the + Allocator process. """ def __init__(self, *, @@ -77,8 +78,10 @@ def __init__(self, *, super().__init__(n_protos=n_protos) - self.trigger_in = InPort(shape=(1,)) # input for triggering allocation - self.allocate_out = OutPort(shape=(n_protos,)) # one-hot-encoded output for allocating specific prototype + # input for triggering allocation + self.trigger_in = InPort(shape=(1,)) + # one-hot-encoded output for allocating specific prototype + self.allocate_out = OutPort(shape=(n_protos,)) # The id of the next prototype to be allocated self.next_alloc_id = Var(shape=(1,), init=0) diff --git a/src/lava/proc/clp/prototype_lif/process.py b/src/lava/proc/clp/prototype_lif/process.py index 19f8d8b6b..3011e9988 100644 --- a/src/lava/proc/clp/prototype_lif/process.py +++ b/src/lava/proc/clp/prototype_lif/process.py @@ -7,7 +7,8 @@ from lava.magma.core.process.variable import Var from lava.proc.lif.process import LearningLIF -from lava.magma.core.learning.learning_rule import Loihi2FLearningRule, Loihi3FLearningRule +from lava.magma.core.learning.learning_rule import Loihi2FLearningRule, \ + Loihi3FLearningRule from lava.magma.core.process.process import LogConfig @@ -17,35 +18,36 @@ class PrototypeLIF(LearningLIF): Prototypes (CLP) algorithm. """ def __init__( - self, - *, - shape: ty.Tuple[int, ...], - u: ty.Optional[ty.Union[float, list, np.ndarray]] = 0, - v: ty.Optional[ty.Union[float, list, np.ndarray]] = 0, - du: ty.Optional[float] = 0, - dv: ty.Optional[float] = 0, - bias_mant: ty.Optional[ty.Union[float, list, np.ndarray]] = 0, - bias_exp: ty.Optional[ty.Union[float, list, np.ndarray]] = 0, - vth: ty.Optional[float] = 10, - name: ty.Optional[str] = None, - log_config: ty.Optional[LogConfig] = None, - learning_rule: ty.Union[Loihi2FLearningRule, Loihi3FLearningRule] = None, - lr_init: ty.Optional[float] = 127, - **kwargs, + self, + *, + shape: ty.Tuple[int, ...], + u: ty.Optional[ty.Union[float, list, np.ndarray]] = 0, + v: ty.Optional[ty.Union[float, list, np.ndarray]] = 0, + du: ty.Optional[float] = 0, + dv: ty.Optional[float] = 0, + bias_mant: ty.Optional[ty.Union[float, list, np.ndarray]] = 0, + bias_exp: ty.Optional[ty.Union[float, list, np.ndarray]] = 0, + vth: ty.Optional[float] = 10, + name: ty.Optional[str] = None, + log_config: ty.Optional[LogConfig] = None, + learning_rule: ty.Union[ + Loihi2FLearningRule, Loihi3FLearningRule] = None, + lr_init: ty.Optional[float] = 127, + **kwargs, ) -> None: super().__init__( - shape=shape, - u=u, - v=v, - du=du, - dv=dv, - vth=vth, - bias_mant=bias_mant, - bias_exp=bias_exp, - name=name, - log_config=log_config, - learning_rule=learning_rule, - **kwargs, + shape=shape, + u=u, + v=v, + du=du, + dv=dv, + vth=vth, + bias_mant=bias_mant, + bias_exp=bias_exp, + name=name, + log_config=log_config, + learning_rule=learning_rule, + **kwargs, ) self.lr = Var(shape=(1,), init=lr_init) diff --git a/tests/lava/proc/clp/integration/novelty_det_prototypes.py b/tests/lava/proc/clp/integration/novelty_det_prototypes.py index 4909bba12..8106e7e27 100644 --- a/tests/lava/proc/clp/integration/novelty_det_prototypes.py +++ b/tests/lava/proc/clp/integration/novelty_det_prototypes.py @@ -424,7 +424,8 @@ def test_nvl_detection_triggers_one_shot_learning(self): print(expected_weights) - np.testing.assert_array_almost_equal(expected_weights, result_weights, decimal=0) + np.testing.assert_array_almost_equal(expected_weights, result_weights, + decimal=0) def test_allocation_triggered_by_erroneous_classification(self): # General params @@ -541,7 +542,8 @@ def test_allocation_triggered_by_erroneous_classification(self): # Sending y1 spike prototypes.s_out_y1.connect(dense_proto.s_in_y1) - # Prototype Neurons' outputs connect to the inference input of the Readout process + # Prototype Neurons' outputs connect to the inference input of the + # Readout process prototypes.s_out.connect(readout.inference_in) # Label input to the Readout proces @@ -575,7 +577,8 @@ def test_allocation_triggered_by_erroneous_classification(self): result_protos = result_protos[prototypes.name][prototypes.s_out.name].T result_alloc = monitor_alloc.get_data() - result_alloc = result_alloc[allocator.name][allocator.allocate_out.name].T + result_alloc = result_alloc[allocator.name][ + allocator.allocate_out.name].T print("Readout layer allocation trigger:", result_alloc) @@ -592,7 +595,8 @@ def test_allocation_triggered_by_erroneous_classification(self): expected_alloc[2, 30] = 1 expected_proto_out = np.zeros((n_protos, t_run)) - expected_proto_out[0, [9, 24]] = 1 # 1) novelty-based allocation triggered, 2) erroneous prediction + # 1) novelty-based allocation triggered, 2) erroneous prediction + expected_proto_out[0, [9, 24]] = 1 expected_proto_out[1, 19] = 1 # novelty-based allocation triggered expected_proto_out[2, 30] = 1 # error-based allocation triggerd diff --git a/tests/lava/proc/clp/nsm/test_nsm.py b/tests/lava/proc/clp/nsm/test_nsm.py index cf365f098..3b2d42588 100644 --- a/tests/lava/proc/clp/nsm/test_nsm.py +++ b/tests/lava/proc/clp/nsm/test_nsm.py @@ -183,7 +183,8 @@ def test_feedback_and_allocation_output(self): result_fb = result_fb[readout_layer.name][readout_layer.feedback.name].T result_alloc = monitor_alloc.get_data() - result_alloc = result_alloc[readout_layer.name][readout_layer.trigger_alloc.name].T + result_alloc = result_alloc[readout_layer.name][ + readout_layer.trigger_alloc.name].T infer_in.stop() # Validate the novelty detection output @@ -194,7 +195,8 @@ def test_feedback_and_allocation_output(self): np.testing.assert_array_equal(result_fb, expected_fb) expected_alloc = np.zeros(shape=(1, t_run)) - expected_alloc[0, 17] = 1 # We expect allocation trigger output when there is a mismatch + # We expect allocation trigger output when there is a mismatch + expected_alloc[0, 17] = 1 np.testing.assert_array_equal(result_alloc, expected_alloc) @@ -226,7 +228,8 @@ def test_allocator_output(self): allocator.run(condition=run_cond, run_cfg=run_cfg) result_alloc = monitor_alloc.get_data() - result_alloc = result_alloc[allocator.name][allocator.allocate_out.name].T + result_alloc = result_alloc[allocator.name][ + allocator.allocate_out.name].T allocator.stop() # Validate the allocation output