Skip to content

Commit

Permalink
The Initial version of CLP in CPU backend (lava-nc#707)
Browse files Browse the repository at this point in the history
* CLP initial commit: PrototypeLIF, NoveltyDetector, Readout procs/tests

* small linting fix

* Novelty detector upgraded to target next neuron; codacy errors fixed

* integration test; small fixes

* removed duplicate code in prototypeLIF process; linting fixes

* linting fixes

* Linting and codacy fixes

* remove duplicate test; some more codacy fixes

* PrototypeLIF spikes when it recieves a 3rd factor input

* a test for PrototypeLIF output spike after 3rd factor input

* Allocation & prototype id tracking is abstracted away from
NoveltyDetector

* Allocator process; Readout proc sends allocation trigger if error

* introduce learning rate Var in PrototypeLIF

* updated integration tests; full system test included

* Linting fixes

* Another small lintint fix

* PrototypeLIF hard reset capability to enable faster temporal  WTA

* allocation mechanism changed; proc interfaces changes; dense conns
added; lr var removed

* small linting fix

* small codacy fix

* prints removed, spelling mistakes fixed

* ignoring one check in an integration test

* Revert "small linting fix"

This reverts commit bde4fa9.

* Fix linting in test_models.py

* Test fix in utils.py

* Fix test of bug fix in utils.py

* Fix utils.py

* Implemented individual threadsafe random call

Signed-off-by: bamsumit <[email protected]>

---------

Signed-off-by: bamsumit <[email protected]>
Co-authored-by: PhilippPlank <[email protected]>
Co-authored-by: Marcus G K Williams <[email protected]>
Co-authored-by: bamsumit <[email protected]>
  • Loading branch information
4 people committed Jul 18, 2023
1 parent 3c10654 commit bcbd465
Show file tree
Hide file tree
Showing 16 changed files with 1,747 additions and 22 deletions.
71 changes: 71 additions & 0 deletions src/lava/proc/clp/novelty_detector/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

import numpy as np

from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol
from lava.magma.core.model.py.ports import PyInPort, PyOutPort
from lava.magma.core.model.py.type import LavaPyType
from lava.magma.core.resources import CPU
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.model.py.model import PyLoihiProcessModel

from lava.proc.clp.novelty_detector.process import NoveltyDetector


@implements(proc=NoveltyDetector, protocol=LoihiProtocol)
@requires(CPU)
@tag("fixed_pt", 'bit_accurate_loihi')
class PyNoveltyDetectorModel(PyLoihiProcessModel):
"""Python implementation of the NoveltyDetector process
"""
input_aval_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32)
output_aval_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32)

novelty_detected_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32)
t_wait: np.int32 = LavaPyType(np.ndarray, np.int32, precision=32)

def __init__(self, proc_params):
super().__init__(proc_params)
self.waiting = False # A variable to know if we are waiting for output
self.t_passed = 0 # The time passed since the injection of the input
self.novelty_detected = False

def run_spk(self) -> None:

# If input is available, we start to clock for waiting the output.
a_in = self.input_aval_in.recv()
if a_in != 0:
self.waiting = True
self.t_passed = 0

# If output available, that means the input is a known pattern,
# so we turn off waiting and reset
a_in = self.output_aval_in.recv()
if a_in != 0:
self.waiting = False
self.t_passed = 0

# If not, then we check whether the time limit has been passed for
# waiting. If so, we assume this is a novel pattern
elif self.t_passed > self.t_wait:
self.novelty_detected = True
self.waiting = False
self.t_passed = 0

# If we are still waiting, increment the time counter
if self.waiting:
self.t_passed += 1

# If we have detected novelty, send this signal downstream, and set
# the flag back to the False
if self.novelty_detected:
self.novelty_detected_out.send(np.array([1]))
self.novelty_detected = False
self.waiting = False

else:
# Otherwise, just send zeros (i.e. no signal)
self.novelty_detected_out.send(np.array([0]))
44 changes: 44 additions & 0 deletions src/lava/proc/clp/novelty_detector/process.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

from lava.magma.core.process.ports.ports import InPort, OutPort
from lava.magma.core.process.process import AbstractProcess
from lava.magma.core.process.variable import Var


class NoveltyDetector(AbstractProcess):
"""Novelty detection process.
This process detect the mismatch between the input injection to the
system and the output generation by the systems. If the system processes
an input, but does not generate an output (i.e. all the Prototype
neurons are silent) during a given time window after the beginning of
the input processing, then NoveltyDetector process will generate a
signal. This signal means that a novel (unknown) input is detected.
Parameters
----------
t_wait : int
The amount of time the process will wait after receiving
signal about input injection to the system before sending out
novelty detection signal. If in this time window the system (the
Prototype neurons) generates an output, then the process will be
reset and NO novelty detection signal will be sent out.
"""

def __init__(self, *,
t_wait: int
) -> None:
super().__init__()

# An input is being processed by the system
self.input_aval_in = InPort(shape=(1,))

# An output is generated by the system
self.output_aval_in = InPort(shape=(1,))

# OutPort for sending out the novelty detection signal
self.novelty_detected_out = OutPort(shape=(1,))

self.t_wait = Var(shape=(1,), init=t_wait)
149 changes: 149 additions & 0 deletions src/lava/proc/clp/nsm/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

import numpy as np

from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol
from lava.magma.core.model.py.ports import PyInPort, PyOutPort
from lava.magma.core.model.py.type import LavaPyType
from lava.magma.core.resources import CPU
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.model.py.model import PyLoihiProcessModel

from lava.proc.clp.nsm.process import Readout
from lava.proc.clp.nsm.process import Allocator


@implements(proc=Readout, protocol=LoihiProtocol)
@requires(CPU)
@tag("fixed_pt")
class PyReadoutModel(PyLoihiProcessModel):
"""Python implementation of the Readout process.
This process will run in super host and will be the main interface
process with the user.
"""
inference_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32,
precision=24)
label_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32)

user_output: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32)
trigger_alloc: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32)
feedback: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32)
proto_labels: np.ndarray = LavaPyType(np.ndarray, np.int32)
last_winner_id: np.int32 = LavaPyType(np.ndarray, np.int32)

def run_spk(self) -> None:
# Read the output of the prototype neurons
output_vec = self.inference_in.recv()
# Read the user-provided label
user_label = self.label_in.recv()[0]
# Feedback about the correctness of prediction. +1 if correct,
# -1 if incorrect, 0 if no label is provided by the user at this point.
infer_check = 0

# If there is an active prototype neuron, this will temporarily store
# the label of that neuron
inferred_label = 0

# Flag for allocation trigger
allocation_trigger = False

# If any prototype neuron is active, then we go here. We assume there
# is only one neuron active in the prototype population
if output_vec.any():

# Find the id of the winner neuron and store it
winner_proto_id = np.nonzero(output_vec)[0][0]
self.last_winner_id = winner_proto_id

# Get the label of this neuron from the labels' list
inferred_label = self.proto_labels[winner_proto_id]

# If this label is zero, that means this prototype is not labeled.
if inferred_label == 0:
# So, we give a pseudo label to the unknown winner.
# These are negative temporary labels that is based on the id
# of the prototype and generated as follows.
self.proto_labels[winner_proto_id] = -1 * (winner_proto_id + 1)

# So now this pseudo-label is our inferred label.
inferred_label = self.proto_labels[winner_proto_id]

# Next we check if a user-provided label is available.
if user_label != 0:

# If so we need to access the most recent winner's label,
# assuming the temporal causality between the prediction by the
# system and the providence of the label;l by the user
last_inferred_label = self.proto_labels[self.last_winner_id]

# If the most recently predicted label (i.e. the one for the
# current input which is also the user-provided label refer to)
# is an actual label (not a pseudo one), then we check the
# correctness of the predicted label against user-provided one.

if last_inferred_label > 0: # "Known Known class"
if last_inferred_label == user_label:
infer_check = 1
else:
# If the error occurs, trigger allocation by sending an
# allocation signal
infer_check = -1
allocation_trigger = True

# If this prototype has a pseudo-label, then we label it with
# the user-provided label and do not send any feedback (because
# we did not have an actual prediction)

elif last_inferred_label < 0: # "Known Unknown class"
self.proto_labels[self.last_winner_id] = user_label
inferred_label = user_label

# Send out the readout predicted label (if any) and the feedback
# about the correctness of this prediction after user providing the
# actual label
self.user_output.send(np.array([inferred_label]))
self.feedback.send(np.array([infer_check]))
if allocation_trigger:
self.trigger_alloc.send(np.array([1]))
else:
self.trigger_alloc.send(np.array([0]))


@implements(proc=Allocator, protocol=LoihiProtocol)
@requires(CPU)
@tag("fixed_pt")
class PyAllocatorModel(PyLoihiProcessModel):
"""Python implementation of the Allocator process.
"""

trigger_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32)
allocate_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32)
next_alloc_id: np.int32 = LavaPyType(np.ndarray, np.int32)
n_protos: np.int32 = LavaPyType(np.ndarray, np.int32)

def __init__(self, proc_params):
super().__init__(proc_params)

def run_spk(self) -> None:
# Allocation signal, initialized to a vector of zeros
alloc_signal = np.zeros(shape=self.allocate_out.shape, dtype=np.int32)

# Check the input, if a trigger for allocation is received then we
# send allocation signal to the next neuron
allocating = self.trigger_in.recv()[0]
if allocating:
# Choose the specific element of the OutPort to send allocate
# signal. This is a single graded spike that has the payload of
# the id of the next neuron to be allocated. Note that these id's
# are starting from id=1, as the graded value of zero means no
# signal. Hence, the initial value of next_alloc_id is one and
# after each allocation it is incremented by one
alloc_signal[0] = self.next_alloc_id

# Increment this counter to point to the next neuron
self.next_alloc_id += 1

# Otherwise, just send zeros (i.e. no signal)
self.allocate_out.send(alloc_signal)
87 changes: 87 additions & 0 deletions src/lava/proc/clp/nsm/process.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

import typing as ty
import numpy as np

from lava.magma.core.process.ports.ports import InPort, OutPort
from lava.magma.core.process.process import AbstractProcess
from lava.magma.core.process.variable import Var


class Readout(AbstractProcess):
""" Readout process of the CLP system. It receives the output spikes from
PrototypeLIF neurons, look up the label of the winner prototype and
send it out to the user as the inferred label.
Additionally, if the winner neuron does not have a label this process
assigns a pseudo-label (a negative-valued label) for the time-being.
When a user-provided label is available, this process refer to the most
recent predicted label. If that is a pseudo-label, then it assigns the
user-provided label to this neuron. On the other hand if that is a
normal label (i.e. a positive number) then the process will check the
correctness of the predicted label and provide feedback through another
channel
Parameters
----------
n_protos : int
Number of Prototype LIF neurons that this process need to read from.
proto_labels : numpy.ndarray, optional
Initial labels of the Prototype LIF neurons. If not provided,
by default this array will be initialized with zeros, meaning
they are not labelled.
"""

def __init__(self, *,
n_protos: int,
proto_labels: ty.Optional[np.ndarray] = None) -> None:
# If not provided by the user initialize it to the zeros
if proto_labels is None:
proto_labels = np.zeros(shape=(n_protos,), dtype=int)

super().__init__(proto_labels=proto_labels, n_protos=n_protos)

self.inference_in = InPort(shape=(n_protos,)) # To read output spikes
self.label_in = InPort(shape=(1,)) # User-provided labels goes in here
self.user_output = OutPort(shape=(1,)) # Output for predicted labels

# Feedback to user about correctness of the prediction
self.feedback = OutPort(shape=(1,))
self.trigger_alloc = OutPort(shape=(1,))

# The array for the labels of the prototype neurons
self.proto_labels = Var(shape=(n_protos,), init=proto_labels)

# The id of the most recent winner prototype
self.last_winner_id = Var(shape=(1,), init=0)


class Allocator(AbstractProcess):
""" Allocator process of CLP system. When triggered by other processes
it will send a one-hot-encoded allocation signal to the prototype
population, specifically targeting next neuron to be allocated. It holds
the reference to the id of the next neuron to be allocated.
Parameters
----------
n_protos : int
The number of prototypes that this Allocator process can
target. Each time a allocation trigger input is received the
next unallocated prototype will be targeted by the output of the
Allocator process.
"""

def __init__(self, *,
n_protos: int) -> None:

super().__init__()

# Input for triggering allocation
self.trigger_in = InPort(shape=(1,))
# One-hot-encoded output for allocating specific prototype
self.allocate_out = OutPort(shape=(1,))

# The id of the next prototype to be allocated
self.next_alloc_id = Var(shape=(1,), init=1)
self.n_protos = Var(shape=(1,), init=n_protos)
Loading

0 comments on commit bcbd465

Please sign in to comment.