Skip to content

Commit

Permalink
Merge branch 'main' into dev/sdn_error_tracking_updated
Browse files Browse the repository at this point in the history
  • Loading branch information
bamsumit authored Aug 16, 2024
2 parents 0dd87c3 + f71eaaa commit a1296a3
Show file tree
Hide file tree
Showing 11 changed files with 197 additions and 108 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ jobs:
runs-on: ${{ matrix.operating-system }}
strategy:
matrix:
operating-system: [ubuntu-latest, windows-latest, macos-13]
operating-system: [ubuntu-latest, macos-13]
steps:
- uses: actions/checkout@v3
with:
Expand Down
82 changes: 0 additions & 82 deletions RELEASE.md

This file was deleted.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ packages = [
{include = "tests"}
]
include = ["tutorials"]
version = "0.9.0.dev0"
version = "0.11.0.dev0"
readme = "README.md"
description = "A Software Framework for Neuromorphic Computing"
homepage = "https://lava-nc.org/"
Expand Down
13 changes: 10 additions & 3 deletions src/lava/magma/compiler/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,9 @@ def _compile_proc_groups(
f"Cache {cache_dir}\n")
return proc_builders, channel_map

# Get manual partitioning, if available
partitioning = self._compile_config.get("partitioning", None)

# Create the global ChannelMap that is passed between
# SubCompilers to communicate about Channels between Processes.

Expand All @@ -266,7 +269,8 @@ def _compile_proc_groups(
subcompilers.append(pg_subcompilers)

# Compile this ProcGroup.
self._compile_proc_group(pg_subcompilers, channel_map)
self._compile_proc_group(pg_subcompilers, channel_map,
partitioning)

# Flatten the list of all SubCompilers.
subcompilers = list(itertools.chain.from_iterable(subcompilers))
Expand Down Expand Up @@ -403,7 +407,8 @@ def _create_subcompilers(

@staticmethod
def _compile_proc_group(
subcompilers: ty.List[AbstractSubCompiler], channel_map: ChannelMap
subcompilers: ty.List[AbstractSubCompiler], channel_map: ChannelMap,
partitioning: ty.Dict[str, ty.Dict]
) -> None:
"""For a given list of SubCompilers that have been initialized with
the Processes of a single ProcGroup, iterate through the compilation
Expand All @@ -419,6 +424,8 @@ def _compile_proc_group(
channel_map : ChannelMap
The global ChannelMap that contains information about Channels
between Processes.
partitioning: ty.Dict
Optional manual mapping dictionary used by ncproc compiler.
"""
channel_map_prev = None

Expand All @@ -431,7 +438,7 @@ def _compile_proc_group(
for subcompiler in subcompilers:
# Compile the Processes registered with each SubCompiler and
# update the ChannelMap.
channel_map = subcompiler.compile(channel_map)
channel_map = subcompiler.compile(channel_map, partitioning)

@staticmethod
def _extract_proc_builders(
Expand Down
3 changes: 2 additions & 1 deletion src/lava/magma/compiler/subcompilers/py/pyproc_compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,8 @@ def __init__(
super().__init__(proc_group, compile_config)
self._spike_io_counter_offset: Offset = Offset()

def compile(self, channel_map: ChannelMap) -> ChannelMap:
def compile(self, channel_map: ChannelMap,
partitioning: ty.Dict = None) -> ChannelMap:
return self._update_channel_map(channel_map)

def __del__(self):
Expand Down
40 changes: 39 additions & 1 deletion src/lava/proc/graded/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.model.py.model import PyLoihiProcessModel

from lava.proc.graded.process import GradedVec, NormVecDelay, InvSqrt
from lava.proc.graded.process import (GradedVec, GradedReluVec,
NormVecDelay, InvSqrt)


class AbstractGradedVecModel(PyLoihiProcessModel):
Expand Down Expand Up @@ -51,6 +52,43 @@ class PyGradedVecModelFixed(AbstractGradedVecModel):
exp: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)


class AbstractGradedReluVecModel(PyLoihiProcessModel):
"""Implementation of GradedReluVec"""

a_in = None
s_out = None
v = None
vth = None
exp = None

def run_spk(self) -> None:
"""The run function that performs the actual computation during
execution orchestrated by a PyLoihiProcessModel using the
LoihiProtocol.
"""
a_in_data = self.a_in.recv()
self.v += a_in_data

is_spike = self.v > self.vth
sp_out = self.v * is_spike

self.v[:] = 0

self.s_out.send(sp_out)


@implements(proc=GradedReluVec, protocol=LoihiProtocol)
@requires(CPU)
@tag('fixed_pt')
class PyGradedReluVecModelFixed(AbstractGradedReluVecModel):
"""Fixed point implementation of GradedVec"""
a_in = LavaPyType(PyInPort.VEC_DENSE, np.int32, precision=24)
s_out = LavaPyType(PyOutPort.VEC_DENSE, np.int32, precision=24)
vth: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
v: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
exp: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)


@implements(proc=NormVecDelay, protocol=LoihiProtocol)
@requires(CPU)
@tag('fixed_pt')
Expand Down
39 changes: 39 additions & 0 deletions src/lava/proc/graded/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,45 @@ class GradedVec(AbstractProcess):
Graded spike vector layer. Transmits accumulated input as
graded spike with no dynamics.
v[t] = a_in
s_out = v[t] * (|v[t]| > vth)
Parameters
----------
shape: tuple(int)
number and topology of neurons
vth: int
threshold for spiking
exp: int
fixed point base
"""

def __init__(
self,
shape: ty.Tuple[int, ...],
vth: ty.Optional[int] = 1,
exp: ty.Optional[int] = 0) -> None:

super().__init__(shape=shape)

self.a_in = InPort(shape=shape)
self.s_out = OutPort(shape=shape)

self.v = Var(shape=shape, init=0)
self.vth = Var(shape=(1,), init=vth)
self.exp = Var(shape=(1,), init=exp)

@property
def shape(self) -> ty.Tuple[int, ...]:
"""Return shape of the Process."""
return self.proc_params['shape']


class GradedReluVec(AbstractProcess):
"""GradedReluVec
Graded spike vector layer. Transmits accumulated input as
graded spike with no dynamics.
v[t] = a_in
s_out = v[t] * (v[t] > vth)
Expand Down
9 changes: 3 additions & 6 deletions src/lava/proc/io/extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,25 +130,22 @@ def run_spk(self) -> None:
@requires(CPU)
class PyLoihiExtractorModelAsync(PyAsyncProcessModel):
in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, float)
out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float)

def __init__(self, proc_params: dict) -> None:
super().__init__(proc_params=proc_params)

channel_config = self.proc_params["channel_config"]
self._pm_to_p_src_port = self.proc_params["pm_to_p_src_port"]
self._pm_to_p_src_port.start()

self._send = channel_config.get_send_full_function()
self.time_step = 1

def run_async(self) -> None:
while self.time_step != self.num_steps + 1:
self._send(self._pm_to_p_src_port, self.in_port.recv())
self._send(self.out_port.csp_ports[-1],
self.in_port.recv())
self.time_step += 1

def __del__(self) -> None:
self._pm_to_p_src_port.join()


class VarWire(AbstractProcess):
"""VarWire allows non-Lava code, such as a third-party Python library
Expand Down
12 changes: 6 additions & 6 deletions src/lava/proc/io/injector.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ class Injector(AbstractProcess):
buffer is full and how the dst_port behaves when the buffer is empty
and not empty.
"""

def __init__(self,
shape: ty.Tuple[int, ...],
buffer_size: ty.Optional[int] = 50,
Expand Down Expand Up @@ -133,16 +134,14 @@ def __del__(self) -> None:
@requires(CPU)
class PyLoihiInjectorModelAsync(PyAsyncProcessModel):
"""PyAsyncProcessModel for the Injector Process."""
in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, float)
out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float)

def __init__(self, proc_params: dict) -> None:
super().__init__(proc_params=proc_params)

shape = self.proc_params["shape"]
channel_config = self.proc_params["channel_config"]
self._p_to_pm_dst_port = self.proc_params["p_to_pm_dst_port"]
self._p_to_pm_dst_port.start()

self._zeros = np.zeros(shape)

self._receive_when_empty = channel_config.get_receive_empty_function()
Expand All @@ -153,17 +152,18 @@ def __init__(self, proc_params: dict) -> None:
def run_async(self) -> None:
while self.time_step != self.num_steps + 1:
self._zeros.fill(0)
elements_in_buffer = self._p_to_pm_dst_port._queue.qsize()
elements_in_buffer = self.in_port.csp_ports[-1]._queue.qsize()

if elements_in_buffer == 0:
data = self._receive_when_empty(
self._p_to_pm_dst_port,
self.in_port,
self._zeros)
else:
data = self._receive_when_not_empty(
self._p_to_pm_dst_port,
self.in_port,
self._zeros,
elements_in_buffer)

self.out_port.send(data)
self.time_step += 1

Expand Down
13 changes: 8 additions & 5 deletions tests/lava/magma/compiler/test_compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,8 @@ def create_patches(
and the compile() method returns the given ChannelMap unchanged.
."""

def compile_return(channel_map: ChannelMap) -> ChannelMap:
def compile_return(channel_map: ChannelMap,
partitioning=None) -> ChannelMap:
return channel_map

py_patch = patch(
Expand Down Expand Up @@ -391,13 +392,13 @@ def test_compile_proc_group_single_loop(self) -> None:
subcompilers = [py_proc_compiler]

# Call the method to be tested.
self.compiler._compile_proc_group(subcompilers, channel_map)
self.compiler._compile_proc_group(subcompilers, channel_map, None)

# Check that it called compile() on every SubCompiler instance
# exactly once. After that, the while loop should exit because the
# ChannelMap instance has not changed.
for sc in subcompilers:
sc.compile.assert_called_once_with({})
sc.compile.assert_called_once_with({}, None)

def test_compile_proc_group_multiple_loops(self) -> None:
"""Test whether the correct methods are called on all objects when
Expand All @@ -424,13 +425,15 @@ def test_compile_proc_group_multiple_loops(self) -> None:
subcompilers = [py_proc_compiler]

# Call the method to be tested.
self.compiler._compile_proc_group(subcompilers, channel_map)
self.compiler._compile_proc_group(subcompilers, channel_map,
None)

# Check that it called compile() on every SubCompiler instance
# exactly once. After that, the while loop should exit because the
# ChannelMap instance has not changed.
for sc in subcompilers:
sc.compile.assert_called_with({**channel_map1, **channel_map2})
sc.compile.assert_called_with({**channel_map1, **channel_map2},
None)
self.assertEqual(sc.compile.call_count, 3)

def test_extract_proc_builders(self) -> None:
Expand Down
Loading

0 comments on commit a1296a3

Please sign in to comment.