Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add rf and rf_iz neurons to lava #378

Merged
merged 34 commits into from
Nov 7, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
05385c1
Added code for floating point rf and rf_iz neurons. Need to move towa…
Sep 16, 2022
0d10c46
Fixed RF process.
Sep 19, 2022
aea022d
Added fixed point for rf neurons. Floating point rf behavior looks weird
Sep 21, 2022
22eb1cb
Added modified rf activation for floating point
Sep 21, 2022
0060c12
fixed fixed_pt rf_iz neuron
Sep 21, 2022
0a9a675
moved location of scale threshold function to be in line with lif
Sep 26, 2022
a6c9a49
moving scale_threshold back to abstract rf model class since rf_iz us…
Sep 27, 2022
c51fa18
Merge branch 'main' into rf_neurons
PhilippPlank Oct 7, 2022
b61d497
added state_exp. Removed most of the deepcopies. Removed some default…
Oct 10, 2022
3c6709a
removed accidental print statement
Oct 10, 2022
b3c3693
Refactored rf and rf_iz class to look like sdn. Added comments and te…
Oct 18, 2022
4d32aed
Cleaned up test_resonator_process.py
Oct 18, 2022
4784106
Added fixed point unit tests for rf and rf_iz
Oct 21, 2022
80d9143
Merge branch 'main' into rf_neurons
PhilippPlank Oct 23, 2022
d4b52f7
Lava implementation of RF neurons matches lava-dl. RF unit test curre…
Oct 24, 2022
76fa909
Merge branch 'rf_neurons' of https://github.com/Michaeljurado24/lava …
Oct 24, 2022
da8d86b
Removed temporary testing files. Fixed liniting errors
Michaeljurado42 Oct 24, 2022
6a40bd0
Fixed typo
Michaeljurado42 Oct 24, 2022
86322ec
Clarified comments
Michaeljurado42 Oct 24, 2022
558918c
increase specificity of unit tests
Michaeljurado42 Oct 24, 2022
dcaf949
Merge branch 'main' into rf_neurons
Michaeljurado42 Oct 24, 2022
81fb6d8
Merge branch 'main' into rf_neurons
PhilippPlank Oct 26, 2022
f65ed3c
Added comments and cleaned up test code. Added additional fixed point…
Michaeljurado42 Oct 28, 2022
8b22aa9
Merge branch 'main' into rf_neurons
Michaeljurado42 Oct 28, 2022
7ef1818
Rf unit test fails to spike due to floating point error on some machi…
Michaeljurado42 Oct 28, 2022
c6fbf68
Merge branch 'main' into rf_neurons
PhilippPlank Oct 28, 2022
7cdb458
Increase magnitude of input to rf float no decay unit test to encoura…
Michaeljurado42 Oct 31, 2022
b12a3af
Merge branch 'main' into rf_neurons
Michaeljurado42 Oct 31, 2022
6b118b4
Made floating point rf test possibly more robust to floating point er…
Nov 4, 2022
dcd6cba
fixed linting errors
Nov 4, 2022
e810d4b
Removed useless abs() op in unit test. Added small enhacement to make…
Nov 4, 2022
876187d
Slight change to test float no decay to test for periodic spiking
Michaeljurado42 Nov 6, 2022
8275d26
Add abs value to statement checking for proper spike periodicity for …
Michaeljurado42 Nov 6, 2022
a565b76
Simplify unit test and see if it passes run ci
Michaeljurado42 Nov 6, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
145 changes: 145 additions & 0 deletions src/lava/proc/rf/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
# Copyright (C) 2021-22 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

import numpy as np
from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol
from lava.magma.core.model.py.ports import PyInPort, PyOutPort
from lava.magma.core.model.py.type import LavaPyType
from lava.magma.core.resources import CPU
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.model.py.model import PyLoihiProcessModel
from lava.proc.rf.process import RF


class AbstractPyRFModelFloat(PyLoihiProcessModel):
a_real_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, float)
a_imag_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, float)
s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, bool, precision=1)
real: np.ndarray = LavaPyType(np.ndarray, float)
imag: np.ndarray = LavaPyType(np.ndarray, float)
sin_decay: float = LavaPyType(float, float)
cos_decay: float = LavaPyType(float, float)
state_exp: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=3)
decay_bits: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=3)
vth: float = LavaPyType(float, float)

def scale_volt(self, voltage):
"""No downscale of voltage needed for floating point implementation"""
return voltage

def resonator_dynamics(self, a_real_in_data, a_imag_in_data, real, imag):
"""Resonate and Fire real and imaginary voltage dynamics

Parameters
----------
a_real_in_data : np.ndarray
Real component input current
a_imag_in_data : np.ndarray
Imaginary component input current
real : np.ndarray
Real component voltage to be updated
imag : np.ndarray
Imag component voltage to be updated

Returns
-------
np.ndarray, np.ndarray
updated real and imaginary components

"""

decayed_real = self.scale_volt(self.cos_decay * real) \
- self.scale_volt(self.sin_decay * imag) \
+ a_real_in_data
decayed_imag = self.scale_volt(self.sin_decay * real) \
+ self.scale_volt(self.cos_decay * imag) \
+ a_imag_in_data

return decayed_real, decayed_imag

def run_spk(self):
raise NotImplementedError("spiking activation() cannot be called from "
"an abstract ProcessModel")


@implements(proc=RF, protocol=LoihiProtocol)
@requires(CPU)
@tag('floating_pt')
class PyRFModelFloat(AbstractPyRFModelFloat):
"""Implementation of Resonate-and-Fire neural process in floating
point precision. This short and simple ProcessModel can be used for quick
algorithmic prototyping, without engaging with the nuances of a fixed
point implementation.
"""

def run_spk(self):
a_real_in_data = self.a_real_in.recv()
a_imag_in_data = self.a_imag_in.recv()

new_real, new_imag = self.resonator_dynamics(a_real_in_data,
a_imag_in_data,
self.real,
self.imag)

s_out = (new_real >= self.vth) * (new_imag >= 0) * (self.imag < 0)
self.real[:], self.imag[:] = new_real, new_imag
self.s_out.send(s_out)


class AbstractPyRFModelFixed(AbstractPyRFModelFloat):
a_real_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE,
np.int16, precision=24)
a_imag_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE,
np.int16, precision=24)
s_out = LavaPyType(PyOutPort.VEC_DENSE, np.int32, precision=24)
real: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
imag: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
sin_decay: int = LavaPyType(int, np.uint16, precision=12)
cos_decay: int = LavaPyType(int, np.uint16, precision=12)
state_exp: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=3)
decay_bits: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=3)
vth: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)

def __init__(self, proc_params):
super(AbstractPyRFModelFixed, self).__init__(proc_params)

# real, imaginary bitwidth
self.ri_bitwidth = self.sin_decay.precision * 2
max_ri_val = 2 ** (self.ri_bitwidth - 1)
self.neg_voltage_limit = -np.int32(max_ri_val) + 1
self.pos_voltage_limit = np.int32(max_ri_val) - 1

def scale_volt(self, voltage):
return np.sign(voltage) * np.right_shift(np.abs(
voltage), self.decay_bits)

def run_spk(self):
raise NotImplementedError("spiking activation() cannot be called from "
"an abstract ProcessModel")


@implements(proc=RF, protocol=LoihiProtocol)
@requires(CPU)
@tag('fixed_pt')
class PyRFModelFixed(AbstractPyRFModelFixed):
"""Fixed point implementation of Resonate and Fire neuron."""
def run_spk(self):
a_real_in_data = np.left_shift(self.a_real_in.recv(),
self.state_exp)
a_imag_in_data = np.left_shift(self.a_imag_in.recv(),
self.state_exp)

new_real, new_imag = self.resonator_dynamics(a_real_in_data,
a_imag_in_data,
np.int64(self.real),
np.int64(self.imag))

new_real = np.clip(new_real,
self.neg_voltage_limit, self.pos_voltage_limit)
new_imag = np.clip(new_imag,
self.neg_voltage_limit, self.pos_voltage_limit)
s_out = (new_real >= self.vth) * (new_imag >= 0) * (self.imag < 0)
self.real[:], self.imag[:] = new_real, new_imag

self.s_out.send(s_out)
88 changes: 88 additions & 0 deletions src/lava/proc/rf/process.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
# Copyright (C) 2021-22 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

import typing as ty
import numpy as np
from lava.magma.core.process.process import AbstractProcess, LogConfig
from lava.magma.core.process.variable import Var
from lava.magma.core.process.ports.ports import InPort, OutPort


class RF(AbstractProcess):
def __init__(self,
shape: ty.Tuple[int, ...],
period: float,
alpha: float,
state_exp: ty.Optional[int] = 0,
decay_bits: ty.Optional[int] = 0,
vth: ty.Optional[float] = 1,
name: ty.Optional[str] = None,
log_config: ty.Optional[LogConfig] = None):
"""Resonate and Fire (RF) neural Process.

RF dynamics abstracts to:
Re[t] = (1 - a) * (cos(theta)* Re[t-1] - sin(theta) * Im[t-1]) + re_inp
Im[t] = (1 - a) * (sin(theta)* Re[t-1] + cos(theta) * Im[t-1]) + im_inp
s[t] = (Re[t] >= vth) & (Im[t] >= 0) & (Im[t -1] < 0)

Re[t]: real component/voltage
Im[t]: imaginary component/voltage
re_inp: real input at timestep t
im_inp: imag input at timestep t
a: alpha decay
s[t]: output spikes

Parameters
----------
shape : tuple(int)
Number and topology of RF neurons.
period : float, list, numpy.ndarray, optional
Neuron's internal resonator frequency
alpha : float, list, numpy.ndarray, optional
Decay real and imaginary voltage
state_exp : int, list, numpy.ndarray, optional
Scaling exponent with base 2 for the spike message.
Note: This should only be used for fixed point models.
Default is 0.
decay_bits : float, list, numpy.ndarray, optional
Desired bit precision of neuronal decay
Default is 0.
vth : float, optional
Neuron threshold voltage, exceeding which, the neuron will spike.
Currently, only a single threshold can be set for the entire
population of neurons.

Example
-------
>>> rf = RF(shape=(200, 15), period=10, alpha=.07)
This will create 200x15 RF neurons that all have the same period decay
of 10 and alpha decay of .07
"""
sin_decay = (1 - alpha) * np.sin(np.pi * 2 * 1 / period)
cos_decay = (1 - alpha) * np.cos(np.pi * 2 * 1 / period)
super().__init__(shape=shape, sin_decay=sin_decay, cos_decay=cos_decay,
state_exp=state_exp, decay_bits=decay_bits, vth=vth,
name=name, log_config=log_config)

if state_exp > 0:
vth = int(vth * (1 << state_exp))
if decay_bits > 0:
sin_decay = int(sin_decay * (1 << decay_bits))
cos_decay = int(cos_decay * (1 << decay_bits))

self.a_real_in = InPort(shape=shape)
self.a_imag_in = InPort(shape=shape)
self.s_out = OutPort(shape=shape)
self.real = Var(shape=shape, init=0)
self.imag = Var(shape=shape, init=0)
self.sin_decay = Var(shape=(1,), init=sin_decay)
self.cos_decay = Var(shape=(1,), init=cos_decay)
self.state_exp = Var(shape=(1,), init=state_exp)
self.decay_bits = Var(shape=(1,), init=decay_bits)
self.vth = Var(shape=(1,), init=vth)

@property
def shape(self) -> ty.Tuple[int, ...]:
"""Return shape of the Process."""
return self.proc_params['shape']
58 changes: 58 additions & 0 deletions src/lava/proc/rf_iz/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Copyright (C) 2021-22 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol
from lava.magma.core.resources import CPU
import numpy as np
from lava.magma.core.decorator import implements, requires, tag
from lava.proc.rf_iz.process import RF_IZ
from lava.proc.rf.models import AbstractPyRFModelFloat, AbstractPyRFModelFixed


@implements(proc=RF_IZ, protocol=LoihiProtocol)
@requires(CPU)
@tag('floating_pt')
class PyRF_IZModelFloat(AbstractPyRFModelFloat):
"""Float point implementation of Resonate and Fire Izhikevich Neuron"""
def run_spk(self):
a_real_in_data = self.a_real_in.recv()
a_imag_in_data = self.a_imag_in.recv()

new_real, new_imag = self.resonator_dynamics(a_real_in_data,
a_imag_in_data,
self.real,
self.imag)
s_out = new_imag >= self.vth
self.real[:] = new_real * (1 - s_out) # reset dynamics

# the 1e-5 insures we don't spike again
self.imag[:] = s_out * (self.vth - 1e-5) + (1 - s_out) * new_imag
self.s_out.send(s_out)


@implements(proc=RF_IZ, protocol=LoihiProtocol)
@requires(CPU)
@tag('fixed_pt')
class PyRF_IZModelFixed(AbstractPyRFModelFixed):
"""Fixed point implementation of Resonate and Fire Izhikevich Neuron"""
def run_spk(self):
a_real_in_data = np.left_shift(self.a_real_in.recv(),
self.state_exp)
a_imag_in_data = np.left_shift(self.a_imag_in.recv(),
self.state_exp)

new_real, new_imag = self.resonator_dynamics(a_real_in_data,
a_imag_in_data,
np.int64(self.real),
np.int64(self.imag))

new_real = np.clip(new_real,
self.neg_voltage_limit, self.pos_voltage_limit)
new_imag = np.clip(new_imag,
self.neg_voltage_limit, self.pos_voltage_limit)

s_out = new_imag >= self.vth
self.real[:] = new_real * (1 - s_out) # reset dynamics
self.imag[:] = s_out * (self.vth - 1) + (1 - s_out) * new_imag
self.s_out.send(s_out)
9 changes: 9 additions & 0 deletions src/lava/proc/rf_iz/process.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Copyright (C) 2021-22 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/

from lava.proc.rf.process import RF


class RF_IZ(RF):
pass
Empty file added tests/lava/proc/rf/__init__.py
Empty file.
Loading