From a82abc146bc4e6c93d8953b7f82c7e756751f531 Mon Sep 17 00:00:00 2001 From: PhilippPlank <32519998+PhilippPlank@users.noreply.github.com> Date: Thu, 1 Aug 2024 17:31:50 +0200 Subject: [PATCH 1/5] Enable manual partitioning (#876) * enable manual partitioning * fix codacy complaint * fix unit tests * fixed unit tests * fixed linting --- src/lava/magma/compiler/compiler.py | 13 ++++++++++--- .../compiler/subcompilers/py/pyproc_compiler.py | 3 ++- tests/lava/magma/compiler/test_compiler.py | 13 ++++++++----- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/lava/magma/compiler/compiler.py b/src/lava/magma/compiler/compiler.py index 4a45b5fe5..e447b488c 100644 --- a/src/lava/magma/compiler/compiler.py +++ b/src/lava/magma/compiler/compiler.py @@ -247,6 +247,9 @@ def _compile_proc_groups( f"Cache {cache_dir}\n") return proc_builders, channel_map + # Get manual partitioning, if available + partitioning = self._compile_config.get("partitioning", None) + # Create the global ChannelMap that is passed between # SubCompilers to communicate about Channels between Processes. @@ -266,7 +269,8 @@ def _compile_proc_groups( subcompilers.append(pg_subcompilers) # Compile this ProcGroup. - self._compile_proc_group(pg_subcompilers, channel_map) + self._compile_proc_group(pg_subcompilers, channel_map, + partitioning) # Flatten the list of all SubCompilers. subcompilers = list(itertools.chain.from_iterable(subcompilers)) @@ -403,7 +407,8 @@ def _create_subcompilers( @staticmethod def _compile_proc_group( - subcompilers: ty.List[AbstractSubCompiler], channel_map: ChannelMap + subcompilers: ty.List[AbstractSubCompiler], channel_map: ChannelMap, + partitioning: ty.Dict[str, ty.Dict] ) -> None: """For a given list of SubCompilers that have been initialized with the Processes of a single ProcGroup, iterate through the compilation @@ -419,6 +424,8 @@ def _compile_proc_group( channel_map : ChannelMap The global ChannelMap that contains information about Channels between Processes. + partitioning: ty.Dict + Optional manual mapping dictionary used by ncproc compiler. """ channel_map_prev = None @@ -431,7 +438,7 @@ def _compile_proc_group( for subcompiler in subcompilers: # Compile the Processes registered with each SubCompiler and # update the ChannelMap. - channel_map = subcompiler.compile(channel_map) + channel_map = subcompiler.compile(channel_map, partitioning) @staticmethod def _extract_proc_builders( diff --git a/src/lava/magma/compiler/subcompilers/py/pyproc_compiler.py b/src/lava/magma/compiler/subcompilers/py/pyproc_compiler.py index 9c74c92d8..c5948a399 100644 --- a/src/lava/magma/compiler/subcompilers/py/pyproc_compiler.py +++ b/src/lava/magma/compiler/subcompilers/py/pyproc_compiler.py @@ -89,7 +89,8 @@ def __init__( super().__init__(proc_group, compile_config) self._spike_io_counter_offset: Offset = Offset() - def compile(self, channel_map: ChannelMap) -> ChannelMap: + def compile(self, channel_map: ChannelMap, + partitioning: ty.Dict = None) -> ChannelMap: return self._update_channel_map(channel_map) def __del__(self): diff --git a/tests/lava/magma/compiler/test_compiler.py b/tests/lava/magma/compiler/test_compiler.py index 0d4e6d07f..876def248 100644 --- a/tests/lava/magma/compiler/test_compiler.py +++ b/tests/lava/magma/compiler/test_compiler.py @@ -217,7 +217,8 @@ def create_patches( and the compile() method returns the given ChannelMap unchanged. .""" - def compile_return(channel_map: ChannelMap) -> ChannelMap: + def compile_return(channel_map: ChannelMap, + partitioning=None) -> ChannelMap: return channel_map py_patch = patch( @@ -391,13 +392,13 @@ def test_compile_proc_group_single_loop(self) -> None: subcompilers = [py_proc_compiler] # Call the method to be tested. - self.compiler._compile_proc_group(subcompilers, channel_map) + self.compiler._compile_proc_group(subcompilers, channel_map, None) # Check that it called compile() on every SubCompiler instance # exactly once. After that, the while loop should exit because the # ChannelMap instance has not changed. for sc in subcompilers: - sc.compile.assert_called_once_with({}) + sc.compile.assert_called_once_with({}, None) def test_compile_proc_group_multiple_loops(self) -> None: """Test whether the correct methods are called on all objects when @@ -424,13 +425,15 @@ def test_compile_proc_group_multiple_loops(self) -> None: subcompilers = [py_proc_compiler] # Call the method to be tested. - self.compiler._compile_proc_group(subcompilers, channel_map) + self.compiler._compile_proc_group(subcompilers, channel_map, + None) # Check that it called compile() on every SubCompiler instance # exactly once. After that, the while loop should exit because the # ChannelMap instance has not changed. for sc in subcompilers: - sc.compile.assert_called_with({**channel_map1, **channel_map2}) + sc.compile.assert_called_with({**channel_map1, **channel_map2}, + None) self.assertEqual(sc.compile.call_count, 3) def test_extract_proc_builders(self) -> None: From d22e8292e41cdb94aec9500dd252ca6814c49bf1 Mon Sep 17 00:00:00 2001 From: "E. Paxon Frady" Date: Mon, 5 Aug 2024 13:55:37 -0400 Subject: [PATCH 2/5] Graded relu (#860) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * GradedReluVec process and tests. * changed test to use thresh not 0. * removed duplicate docstring line. * Bump tornado from 6.4 to 6.4.1 (#863) Bumps [tornado](https://github.com/tornadoweb/tornado) from 6.4 to 6.4.1. - [Changelog](https://github.com/tornadoweb/tornado/blob/master/docs/releases.rst) - [Commits](https://github.com/tornadoweb/tornado/compare/v6.4.0...v6.4.1) --- updated-dependencies: - dependency-name: tornado dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Fix: subthreshold dynamics equation of refractory lif (#842) * Fix: subthreshold dynamics equation of refractory lif * Fix: RefractoryLIF unit test to test the voltage dynamics * Bump urllib3 from 2.2.1 to 2.2.2 (#865) Bumps [urllib3](https://github.com/urllib3/urllib3) from 2.2.1 to 2.2.2. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst) - [Commits](https://github.com/urllib3/urllib3/compare/2.2.1...2.2.2) --- updated-dependencies: - dependency-name: urllib3 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: PhilippPlank <32519998+PhilippPlank@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: João Gil Co-authored-by: PhilippPlank <32519998+PhilippPlank@users.noreply.github.com> Co-authored-by: Marcus G K Williams <168222+mgkwill@users.noreply.github.com> --- src/lava/proc/graded/models.py | 40 +++++++++++- src/lava/proc/graded/process.py | 39 ++++++++++++ tests/lava/proc/graded/test_graded.py | 90 ++++++++++++++++++++++++++- 3 files changed, 166 insertions(+), 3 deletions(-) diff --git a/src/lava/proc/graded/models.py b/src/lava/proc/graded/models.py index 969caa1d4..870b0b301 100644 --- a/src/lava/proc/graded/models.py +++ b/src/lava/proc/graded/models.py @@ -11,7 +11,8 @@ from lava.magma.core.decorator import implements, requires, tag from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.proc.graded.process import GradedVec, NormVecDelay, InvSqrt +from lava.proc.graded.process import (GradedVec, GradedReluVec, + NormVecDelay, InvSqrt) class AbstractGradedVecModel(PyLoihiProcessModel): @@ -51,6 +52,43 @@ class PyGradedVecModelFixed(AbstractGradedVecModel): exp: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24) +class AbstractGradedReluVecModel(PyLoihiProcessModel): + """Implementation of GradedReluVec""" + + a_in = None + s_out = None + v = None + vth = None + exp = None + + def run_spk(self) -> None: + """The run function that performs the actual computation during + execution orchestrated by a PyLoihiProcessModel using the + LoihiProtocol. + """ + a_in_data = self.a_in.recv() + self.v += a_in_data + + is_spike = self.v > self.vth + sp_out = self.v * is_spike + + self.v[:] = 0 + + self.s_out.send(sp_out) + + +@implements(proc=GradedReluVec, protocol=LoihiProtocol) +@requires(CPU) +@tag('fixed_pt') +class PyGradedReluVecModelFixed(AbstractGradedReluVecModel): + """Fixed point implementation of GradedVec""" + a_in = LavaPyType(PyInPort.VEC_DENSE, np.int32, precision=24) + s_out = LavaPyType(PyOutPort.VEC_DENSE, np.int32, precision=24) + vth: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24) + v: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24) + exp: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24) + + @implements(proc=NormVecDelay, protocol=LoihiProtocol) @requires(CPU) @tag('fixed_pt') diff --git a/src/lava/proc/graded/process.py b/src/lava/proc/graded/process.py index ed7c41b3f..25c881b50 100644 --- a/src/lava/proc/graded/process.py +++ b/src/lava/proc/graded/process.py @@ -34,6 +34,45 @@ class GradedVec(AbstractProcess): Graded spike vector layer. Transmits accumulated input as graded spike with no dynamics. + v[t] = a_in + s_out = v[t] * (|v[t]| > vth) + + Parameters + ---------- + shape: tuple(int) + number and topology of neurons + vth: int + threshold for spiking + exp: int + fixed point base + """ + + def __init__( + self, + shape: ty.Tuple[int, ...], + vth: ty.Optional[int] = 1, + exp: ty.Optional[int] = 0) -> None: + + super().__init__(shape=shape) + + self.a_in = InPort(shape=shape) + self.s_out = OutPort(shape=shape) + + self.v = Var(shape=shape, init=0) + self.vth = Var(shape=(1,), init=vth) + self.exp = Var(shape=(1,), init=exp) + + @property + def shape(self) -> ty.Tuple[int, ...]: + """Return shape of the Process.""" + return self.proc_params['shape'] + + +class GradedReluVec(AbstractProcess): + """GradedReluVec + Graded spike vector layer. Transmits accumulated input as + graded spike with no dynamics. + v[t] = a_in s_out = v[t] * (v[t] > vth) diff --git a/tests/lava/proc/graded/test_graded.py b/tests/lava/proc/graded/test_graded.py index b2acbe882..3c65b6087 100644 --- a/tests/lava/proc/graded/test_graded.py +++ b/tests/lava/proc/graded/test_graded.py @@ -6,7 +6,8 @@ import numpy as np from scipy.sparse import csr_matrix -from lava.proc.graded.process import GradedVec, NormVecDelay, InvSqrt +from lava.proc.graded.process import (GradedVec, GradedReluVec, + NormVecDelay, InvSqrt) from lava.proc.graded.models import inv_sqrt from lava.proc.dense.process import Dense from lava.proc.sparse.process import Sparse @@ -59,7 +60,7 @@ def test_gradedvec_dot_dense(self): self.assertTrue(np.all(out_data[:, (3, 7)] == expected_out[:, (2, 6)])) def test_gradedvec_dot_sparse(self): - """Tests that GradedVec and Dense computes dot product.""" + """Tests that GradedVec and Sparse computes dot product""" num_steps = 10 v_thresh = 1 @@ -99,6 +100,91 @@ def test_gradedvec_dot_sparse(self): self.assertTrue(np.all(out_data[:, (3, 7)] == expected_out[:, (2, 6)])) +class TestGradedReluVecProc(unittest.TestCase): + """Tests for GradedReluVec""" + + def test_gradedreluvec_dot_dense(self): + """Tests that GradedReluVec and Dense computes dot product""" + num_steps = 10 + v_thresh = 1 + + weights1 = np.zeros((10, 1)) + weights1[:, 0] = (np.arange(10) - 5) * 0.2 + + inp_data = np.zeros((weights1.shape[1], num_steps)) + inp_data[:, 2] = 1000 + inp_data[:, 6] = 20000 + + weight_exp = 7 + weights1 *= 2**weight_exp + weights1 = weights1.astype('int') + + dense1 = Dense(weights=weights1, num_message_bits=24, + weight_exp=-weight_exp) + vec1 = GradedReluVec(shape=(weights1.shape[0],), + vth=v_thresh) + + generator = io.source.RingBuffer(data=inp_data) + logger = io.sink.RingBuffer(shape=(weights1.shape[0],), + buffer=num_steps) + + generator.s_out.connect(dense1.s_in) + dense1.a_out.connect(vec1.a_in) + vec1.s_out.connect(logger.a_in) + + vec1.run(condition=RunSteps(num_steps=num_steps), + run_cfg=Loihi2SimCfg(select_tag='fixed_pt')) + out_data = logger.data.get().astype('int') + vec1.stop() + + ww = np.floor(weights1 / 2) * 2 + expected_out = np.floor((ww @ inp_data) / 2**weight_exp) + expected_out *= expected_out > v_thresh + + self.assertTrue(np.all(out_data[:, (3, 7)] == expected_out[:, (2, 6)])) + + def test_gradedreluvec_dot_sparse(self): + """Tests that GradedReluVec and Sparse computes dot product""" + num_steps = 10 + v_thresh = 1 + + weights1 = np.zeros((10, 1)) + weights1[:, 0] = (np.arange(10) - 5) * 0.2 + + inp_data = np.zeros((weights1.shape[1], num_steps)) + inp_data[:, 2] = 1000 + inp_data[:, 6] = 20000 + + weight_exp = 7 + weights1 *= 2**weight_exp + weights1 = weights1.astype('int') + + sparse1 = Sparse(weights=csr_matrix(weights1), + num_message_bits=24, + weight_exp=-weight_exp) + vec1 = GradedReluVec(shape=(weights1.shape[0],), + vth=v_thresh) + + generator = io.source.RingBuffer(data=inp_data) + logger = io.sink.RingBuffer(shape=(weights1.shape[0],), + buffer=num_steps) + + generator.s_out.connect(sparse1.s_in) + sparse1.a_out.connect(vec1.a_in) + vec1.s_out.connect(logger.a_in) + + vec1.run(condition=RunSteps(num_steps=num_steps), + run_cfg=Loihi2SimCfg(select_tag='fixed_pt')) + out_data = logger.data.get().astype('int') + vec1.stop() + + ww = np.floor(weights1 / 2) * 2 + expected_out = np.floor((ww @ inp_data) / 2**weight_exp) + expected_out *= expected_out > v_thresh + + self.assertTrue(np.all(out_data[:, (3, 7)] == expected_out[:, (2, 6)])) + + class TestInvSqrtProc(unittest.TestCase): """Tests for inverse square process.""" From efbe872616492e62d44f74ed96f731f4f052386d Mon Sep 17 00:00:00 2001 From: bamsumit Date: Wed, 7 Aug 2024 18:55:05 -0700 Subject: [PATCH 3/5] Extractor and Injector async models fixed (#881) * Fixes to injector and extractor async models Signed-off-by: bamsumit * Deletd redundant time increase Signed-off-by: bamsumit --------- Signed-off-by: bamsumit --- src/lava/proc/io/extractor.py | 9 +++------ src/lava/proc/io/injector.py | 12 ++++++------ 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/src/lava/proc/io/extractor.py b/src/lava/proc/io/extractor.py index 053bcf4b1..48ea87de6 100644 --- a/src/lava/proc/io/extractor.py +++ b/src/lava/proc/io/extractor.py @@ -130,25 +130,22 @@ def run_spk(self) -> None: @requires(CPU) class PyLoihiExtractorModelAsync(PyAsyncProcessModel): in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, float) + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float) def __init__(self, proc_params: dict) -> None: super().__init__(proc_params=proc_params) channel_config = self.proc_params["channel_config"] - self._pm_to_p_src_port = self.proc_params["pm_to_p_src_port"] - self._pm_to_p_src_port.start() self._send = channel_config.get_send_full_function() self.time_step = 1 def run_async(self) -> None: while self.time_step != self.num_steps + 1: - self._send(self._pm_to_p_src_port, self.in_port.recv()) + self._send(self.out_port.csp_ports[-1], + self.in_port.recv()) self.time_step += 1 - def __del__(self) -> None: - self._pm_to_p_src_port.join() - class VarWire(AbstractProcess): """VarWire allows non-Lava code, such as a third-party Python library diff --git a/src/lava/proc/io/injector.py b/src/lava/proc/io/injector.py index 381e4d646..65ad0c8d0 100644 --- a/src/lava/proc/io/injector.py +++ b/src/lava/proc/io/injector.py @@ -46,6 +46,7 @@ class Injector(AbstractProcess): buffer is full and how the dst_port behaves when the buffer is empty and not empty. """ + def __init__(self, shape: ty.Tuple[int, ...], buffer_size: ty.Optional[int] = 50, @@ -133,6 +134,7 @@ def __del__(self) -> None: @requires(CPU) class PyLoihiInjectorModelAsync(PyAsyncProcessModel): """PyAsyncProcessModel for the Injector Process.""" + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, float) out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float) def __init__(self, proc_params: dict) -> None: @@ -140,9 +142,6 @@ def __init__(self, proc_params: dict) -> None: shape = self.proc_params["shape"] channel_config = self.proc_params["channel_config"] - self._p_to_pm_dst_port = self.proc_params["p_to_pm_dst_port"] - self._p_to_pm_dst_port.start() - self._zeros = np.zeros(shape) self._receive_when_empty = channel_config.get_receive_empty_function() @@ -153,17 +152,18 @@ def __init__(self, proc_params: dict) -> None: def run_async(self) -> None: while self.time_step != self.num_steps + 1: self._zeros.fill(0) - elements_in_buffer = self._p_to_pm_dst_port._queue.qsize() + elements_in_buffer = self.in_port.csp_ports[-1]._queue.qsize() if elements_in_buffer == 0: data = self._receive_when_empty( - self._p_to_pm_dst_port, + self.in_port, self._zeros) else: data = self._receive_when_not_empty( - self._p_to_pm_dst_port, + self.in_port, self._zeros, elements_in_buffer) + self.out_port.send(data) self.time_step += 1 From 223307ecfad126442e59bedf36322401a38adf3b Mon Sep 17 00:00:00 2001 From: Marcus G K Williams <168222+mgkwill@users.noreply.github.com> Date: Wed, 7 Aug 2024 21:22:37 -0700 Subject: [PATCH 4/5] Release 0.10.0 (#882) * Update pyproject.toml * Delete RELEASE.md * Update ci.yml --- .github/workflows/ci.yml | 2 +- RELEASE.md | 82 ---------------------------------------- pyproject.toml | 2 +- 3 files changed, 2 insertions(+), 84 deletions(-) delete mode 100644 RELEASE.md diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 200c8dd8e..497a33c34 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,7 +55,7 @@ jobs: runs-on: ${{ matrix.operating-system }} strategy: matrix: - operating-system: [ubuntu-latest, windows-latest, macos-13] + operating-system: [ubuntu-latest, macos-13] steps: - uses: actions/checkout@v3 with: diff --git a/RELEASE.md b/RELEASE.md deleted file mode 100644 index 1852313be..000000000 --- a/RELEASE.md +++ /dev/null @@ -1,82 +0,0 @@ -_Lava_ v0.4.0 brings initial support to compile and run models on Loihi 2 via Intel’s cloud hosted Loihi systems through participation in the Intel Neuromorphic Research Community (INRC). In addition, new tutorials and documentation explain how to build Lava Processes written in Python or C for CPU and Loihi backends (C and Loihi tutorials available via the INRC). - -While this release offers few high-level application examples, Lava v0.4.0 provides major enhancements to the overall Lava architecture. It forms the basis for the open-source community to enable the full Loihi feature set, such as on-chip learning, convolutional connectivity, or accelerated spike IO. The Lava Compiler and Runtime architecture has also been generalized allowing extension to other backends or neuromorphic processors. - -## New Features and Improvements -Features marked with * are available as part of the Loihi 2 extension. -- *Extended Process library including new ProcessModels and additional improvements: - - LIF, Sigma-Delta, and Dense Processes execute on Loihi NeuroCores. - - Prototype Convolutional Process added. - - Sending and receiving spikes to NeuroCores via embedded processes that can be programmed in C with examples included. - - All Lava Processes now list all constructor arguments explicitly with type annotations. -- *Added high-level API to develop custom ProcessModels that use Loihi 2 features: - - Loihi NeuroCores can be programmed in Python by allocating neural network resources like Axons, Synapses or Neurons. In particular, Loihi 2 NeuroCore Neurons can be configured by writing highly flexible assembly programs. - - Loihi embedded processors can be programmed in C. But unlike the prior NxSDK, no knowledge of low-level registers details is required anymore. Instead, the C API mirrors the high-level Python API to interact with other processes via channels. -- Compiler and Runtime support for Loihi 2: - - General redesign of Compiler and Runtime architecture to support compilation of Processes that execute across a heterogenous backend of different compute resources. CPU and Loihi are supported via separate sub compilers. - - *The Loihi NeuroCore sub compiler automatically distributes neural network resources across multiple cores. - - *The Runtime supports direct channel-based communication between Processes running on Loihi NeuroCores, embedded CPUs or host CPUs written in Python or C. Of all combinations, only Python<->C and C<->NeuroCore are currently supported. - - *Added support to access Process Variables on Loihi NeuroCores at runtime via Var.set and Var.get(). -- New tutorials and improved class and method docstrings explain how new Lava features can be used such as *NeuroCore and *embedded processor programming. -- An extended suite of unit tests and new *integration tests validate the correctness of the Lava framework. - - -## Bug Fixes and Other Changes - -- Support for virtual ports on multiple incoming connections (Python Processes only) (Issue [#223](https://github.com/lava-nc/lava/issues/223), PR [#224](https://github.com/lava-nc/lava/pull/224)) -- Added conda install instructions (PR [#225](https://github.com/lava-nc/lava/pull/225)) -- Var.set/get() works when RunContinuous RunMode is used (Issue [#255](https://github.com/lava-nc/lava/issues/255), PR [#256](https://github.com/lava-nc/lava/pull/256)) -- Successful execution of tutorials now covered by unit tests (Issue [#243](https://github.com/lava-nc/lava/issues/243), PR [#244](https://github.com/lava-nc/lava/pull/244)) -- Fixed PYTHONPATH in tutorial_01 (Issue [#45](https://github.com/lava-nc/lava/issues/45), PR [#239](https://github.com/lava-nc/lava/pull/239)) -- Fixed output of tutorial_07 (Issue [#249](https://github.com/lava-nc/lava/issues/249), PR [#253](https://github.com/lava-nc/lava/pull/253)) - -## Breaking Changes - -- Process constructors for standard library processes now require explicit keyword/value pairs and do not accept arbitrary input arguments via **kwargs anymore. This might break some workloads. -- use_graded_spike kwarg has been changed to num_message_bits for all the built-in processes. -- shape kwarg has been removed from Dense process. It is automatically inferred from the weight parameter’s shape. -- Conv Process has additional arguments weight_exp and num_weight_bits that are relevant for fixed-point implementations. -- The sign_mode argument in the Dense Process is now an enum rather than an integer. -- New parameters u and v in the LIF Process enable setting initial values for current and voltage. -- The bias parameter in the LIF Process has been renamed to bias_mant. - - -## Known Issues - -- Lava does currently not support on-chip learning, Loihi 1 and a variety of connectivity compression features such as convolutional encoding. -- All Processes in a network must currently be connected via channels. Running unconnected Processes using NcProcessModels in parallel currently gives incorrect results. -- Only one instance of a Process targeting an embedded processor (using CProcessModel) can currently be created. Creating multiple instances in a network, results in an error. As a workaround, the behavior of multiple Processes can be fused into a single CProcessModel. -- Direct channel connections between Processes using a PyProcessModel and NcProcessModel are not supported. -- In the scenario that InputAxons are duplicated across multiple cores and users expect to inject spikes based on the declared port size, then the current implementation leads to buffer overflows and memory corruption. -- Channel communication between PyProcessModels is slow. -- The Lava Compiler is still inefficient and in need of improvement to performance and memory utilization. -- Virtual ports are only supported between Processes using PyProcModels, but not between Processes when CProcModels or NcProcModels are involved. In addition, VirtualPorts do not support concatenation yet. -- Joining and forking of virtual ports is not supported. -- The Monitor Process does currently only support probing of a single Var per Process implemented via a PyProcessModel. The Monitor Process does currently not support probing of Vars mapped to NeuroCores. -- Despite new docstrings, type annotations, and parameter descriptions to most of the public user-facing API, some parts of the code still have limited documentation and are missing type annotations. - - -## What's Changed -* Virtual ports on multiple incoming connections by @mathisrichter in https://github.com/lava-nc/lava/pull/224 -* Add conda install to README by @Tobias-Fischer in https://github.com/lava-nc/lava/pull/225 -* PYTHONPATH fix in tutorial by @jlubo in https://github.com/lava-nc/lava/pull/239 -* Fix tutorial04_execution.ipynb by @mgkwill in https://github.com/lava-nc/lava/pull/241 -* Tutorial tests by @mgkwill in https://github.com/lava-nc/lava/pull/244 -* Update README.md remove vlab instructions by @mgkwill in https://github.com/lava-nc/lava/pull/248 -* Tutorial bug fix by @PhilippPlank in https://github.com/lava-nc/lava/pull/253 -* Fix get set var by @PhilippPlank in https://github.com/lava-nc/lava/pull/256 -* Update runtime_service.py by @PhilippPlank in https://github.com/lava-nc/lava/pull/258 -* Release/v0.4.0 by @mgkwill in https://github.com/lava-nc/lava/pull/265 - -## Thanks to our Contributors - -- Intel Corporation: All contributing members of the Intel Neuromorphic Computing Lab - -### Open-source community: -- [Tobias-Fischer](https://github.com/Tobias-Fischer), Tobias Fischer -- [jlubo](https://github.com/jlubo), Jannik Luboeinski - -## New Contributors -* @jlubo made their first contribution in https://github.com/lava-nc/lava/pull/239 - -**Full Changelog**: https://github.com/lava-nc/lava/compare/v0.3.0...v0.4.0 diff --git a/pyproject.toml b/pyproject.toml index 6751f518e..a95e711c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ packages = [ {include = "tests"} ] include = ["tutorials"] -version = "0.9.0.dev0" +version = "0.10.0" readme = "README.md" description = "A Software Framework for Neuromorphic Computing" homepage = "https://lava-nc.org/" From f71eaaaccc16debe26e01b84589e8bf0a439db08 Mon Sep 17 00:00:00 2001 From: Marcus G K Williams <168222+mgkwill@users.noreply.github.com> Date: Wed, 7 Aug 2024 22:04:12 -0700 Subject: [PATCH 5/5] Set version back to dev0 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a95e711c5..2f27f3f75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ packages = [ {include = "tests"} ] include = ["tutorials"] -version = "0.10.0" +version = "0.11.0.dev0" readme = "README.md" description = "A Software Framework for Neuromorphic Computing" homepage = "https://lava-nc.org/"