Skip to content

Commit

Permalink
Merge branch 'lava-nc:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
bamsumit authored Sep 26, 2023
2 parents d180d97 + 05f51e7 commit 272a217
Show file tree
Hide file tree
Showing 9 changed files with 821 additions and 71 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ $ pip install lava-dl-0.2.0.tar.gz

**End to end training tutorials**
* [Oxford spike train regression](https://github.com/lava-nc/lava-dl/blob/main/tutorials/lava/lib/dl/slayer/oxford/train.ipynb)
* [XOR regression](https://github.com/lava-nc/lava-dl/blob/main/tutorials/lava/lib/dl/slayer/xor_regression/xor_regression.ipynb)
* [MNIST digit classification](https://github.com/lava-nc/lava-dl/blob/main/tutorials/lava/lib/dl/bootstrap/mnist/train.ipynb)
* [NMNIST digit classification](https://github.com/lava-nc/lava-dl/blob/main/tutorials/lava/lib/dl/slayer/nmnist/train.ipynb)
* [PilotNet steering angle prediction](https://github.com/lava-nc/lava-dl/blob/main/tutorials/lava/lib/dl/slayer/pilotnet/train.ipynb)
Expand Down
104 changes: 64 additions & 40 deletions poetry.lock

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ packages = [
]
include = ["tutorials"]
version = "0.4.0.dev0"
readme = "README.md"
description = "A library of deep learning tools, which consists of lava.lib.dl.slayer and lava.lib.dl.netx for training and deployment of event-based deep neural networks on traditional as well as neuromorphic backends. Lava-DL is part of Lava Framework"
homepage = "https://lava-nc.org/"
repository = "https://github.com/lava-nc/lava-dl"
Expand Down
7 changes: 5 additions & 2 deletions src/lava/lib/dl/slayer/block/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,8 +391,11 @@ def delay(d):
self.delay.clamp() # clamp the delay value
handle.create_dataset('delay', data=delay(self.delay))

# for key, value in self.neuron.device_params.items():
# handle.create_dataset(f'neuron/{key}', data=value)
if self.dynamics is True:
for key, value in self.neuron.device_params.items():
if key == 'vThMant':
value = (1 << 18) - 1 # set the maximum possible threshold
handle.create_dataset(f'neuron/{key}', data=value)


class AbstractTimeDecimation(torch.nn.Module):
Expand Down
9 changes: 8 additions & 1 deletion src/lava/lib/dl/slayer/block/cuba.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

"""CUBA-LIF layer blocks"""

import numpy as np
import torch

from . import base
Expand Down Expand Up @@ -68,7 +69,13 @@ def __init__(self, *args, **kwargs):
self.synapse = synapse.Dense(**self.synapse_params)
if 'pre_hook_fx' not in kwargs.keys():
self.synapse.pre_hook_fx = self.neuron.quantize_8bit
self.neuron._threshold = None
# if 'dynamics=True', set threshold to not 'none' value
if self.dynamics:
self.neuron._threshold = -1
else:
self.neuron._threshold = None
# set the shape according to synapse output
self.neuron.shape = torch.Size([self.synapse.out_channels])
# this disables spike and reset in dynamics
del self.synapse_params

Expand Down
76 changes: 76 additions & 0 deletions src/lava/lib/dl/slayer/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,3 +210,79 @@ def forward(self, input, label):
label.flatten(),
reduction=self.reduction,
)


class SpikeMoid(torch.nn.Module):
"""
SpikeMoid (BCE) loss.
.. math::
\\text{if sliding window:} \\quad
p(t) = \\sigma\\left(\\frac{r(t) - \\theta}{\\alpha}\\right) \\\\
\\text{otherwise:} \\quad
p = \\sigma\\left(\\frac{r - \\theta}{\\alpha}\\right)
r signifies a spike rate calculated over the time dimension
.. math::
\\mathcal{L} = \\begin{cases}
-\\int_T \\hat{y}(t) \\cdot \\log{p(t)}
+ (1 - \\hat{y}(t)) \\cdot \\log{(1 - p(t))}\\,\\text{d}t
&\\text{if sliding window} \\\\
-\\left(\\hat{y} \\cdot \\log{p}
+ (1 - \\hat{y}) \\cdot \\log{(1 - p)}\\right)
&\\text{otherwise}
\\end{cases}
Note: input is always collapsed in the spatial dimension.
r signifies a spike rate calculated over the time dimension
Parameters
----------
moving_window : int
size of moving window. If not None, assumes label to be specified
at every time step. Defaults to None.
reduction : str
loss reduction method. One of 'sum'|'mean'. Defaults to 'sum'.
alpha : int
Sigmoid temperature parameter. Defaults to 1.
theta : int
Bias term for logits. Defaults to 1.
"""
def __init__(
self, moving_window=None, reduction='sum', alpha=1, theta=0
):
super(SpikeMoid, self).__init__()
if moving_window is not None:
self.window = MovingWindow(moving_window)
else:
self.window = None
self.reduction = reduction
self.alpha = alpha
self.theta = theta

def forward(self, input, label):
"""Forward computation of loss.
"""
input = input.reshape(input.shape[0], -1, input.shape[-1])
if self.window is None: # one label for each sample in a batch
scaled_input = (input - self.theta) / self.alpha
probs = torch.sigmoid(scaled_input.mean(-1)).flatten(0, 1)
return F.binary_cross_entropy(
probs,
label.flatten(),
reduction=self.reduction
)
else:
# assume label is in (batch, num_classes, time) form
if len(label.shape) == 2:
label = replicate(label, input.shape[-1])
float_label = label[..., None]
rates = self.window.rate(input)
probs = torch.sigmoid((rates - self.theta) / self.alpha)
return F.binary_cross_entropy(
probs.flatten(),
label.flatten(),
reduction=self.reduction
)
117 changes: 89 additions & 28 deletions tests/lava/lib/dl/slayer/block/test_cuba.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@
from lava.proc.conv import utils
from lava.proc import io

verbose = True if (('-v' in sys.argv) or ('--verbose' in sys.argv)) else False
# Enabling torch sometimes causes multiprocessing error, especially in unittests
verbose = True if (("-v" in sys.argv) or ("--verbose" in sys.argv)) else False
# Enabling torch sometimes causes multiprocessing error,
# especially in unittests
utils.TORCH_IS_AVAILABLE = False

# seed = np.random.randint(1000)
Expand All @@ -25,27 +26,81 @@
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if verbose:
print(f'{seed=}')
print(f"{seed=}")

if torch.cuda.is_available():
device = torch.device('cuda')
device = torch.device("cuda")
else:
if verbose:
print('CUDA is not available in the system. '
'Testing for CPU version only.')
device = torch.device('cpu')
print(
"CUDA is not available in the system. "
"Testing for CPU version only."
)
device = torch.device("cpu")

tempdir = os.path.dirname(__file__) + '/temp'
tempdir = os.path.dirname(__file__) + "/temp"
os.makedirs(tempdir, exist_ok=True)

neuron_param = {'threshold': 0.5,
'current_decay': 0.5,
'voltage_decay': 0.5}
neuron_param = {"threshold": 0.5, "current_decay": 0.5, "voltage_decay": 0.5}


class TestCUBA(unittest.TestCase):
"""Test CUBA blocks"""

def test_affine_block_hdf5_export_dynamics_false(self):
"""Test affine block hdf5 export in dynamics=false mode."""
in_features = 10
out_features = 5

net = slayer.block.cuba.Affine(
neuron_params=neuron_param,
in_neurons=in_features,
out_neurons=out_features,
dynamics=False,
count_log=False,
)

# export slayer network
h = h5py.File(tempdir + "/cuba_affine_dynamics_false.net", "w")
net.export_hdf5(h.create_group("layer/0"))

# reload net from h5 and check if 'neuron' exists.
lava_net = netx.hdf5.Network(
net_config=tempdir + "/cuba_affine_dynamics_false.net"
)

self.assertTrue("neuron" not in lava_net.net_config["layer"][0].keys())

def test_affine_block_hdf5_export_dynamics_true(self):
"""Test affine block hdf5 export in dynamics=true mode."""
in_features = 10
out_features = 5

net = slayer.block.cuba.Affine(
neuron_params=neuron_param,
in_neurons=in_features,
out_neurons=out_features,
dynamics=True,
count_log=False,
)

# export slayer network
h = h5py.File(tempdir + "/cuba_affine_dynamics_true.net", "w")
net.export_hdf5(h.create_group("layer/0"))

# reload net from h5 and check if 'vThMant' is '(1 << 17)'.
# lava_net = netx.hdf5.Network(
# net_config=tempdir + "/cuba_affine_dynamics_true.net"
# )
# layer = lava_net.layers[0]
# neuron = layer.__dict__["neuron"].__dict__

# load network file and check neuron
with h5py.File(tempdir + "/cuba_affine_dynamics_true.net", "r") as hf:
vThMant = np.array(hf["layer"]["0"]["neuron"]["vThMant"])

self.assertTrue(vThMant == (1 << 18) - 1)

def test_dense_block(self):
"""Test dense block with lava process implementation."""
in_features = 10
Expand All @@ -58,27 +113,28 @@ def test_dense_block(self):
y = net(x)

# export slayer network
net.export_hdf5(h5py.File(tempdir + '/cuba_dense.net',
'w').create_group('layer/0'))
net.export_hdf5(
h5py.File(tempdir + "/cuba_dense.net", "w").create_group("layer/0")
)

# create equivalent lava network using netx and evaluate output
lava_net = netx.hdf5.Network(net_config=tempdir + '/cuba_dense.net')
lava_net = netx.hdf5.Network(net_config=tempdir + "/cuba_dense.net")
source = io.source.RingBuffer(data=x[0])
sink = io.sink.RingBuffer(shape=lava_net.out.shape, buffer=time_steps)
source.s_out.connect(lava_net.inp)
lava_net.out.connect(sink.a_in)
run_condition = RunSteps(num_steps=time_steps)
run_config = Loihi1SimCfg(select_tag='fixed_pt')
run_config = Loihi1SimCfg(select_tag="fixed_pt")
lava_net.run(condition=run_condition, run_cfg=run_config)
output = sink.data.get()
lava_net.stop()

if verbose:
print()
print(lava_net)
print('slayer output:')
print("slayer output:")
print(y[0])
print('lava output:')
print("lava output:")
print(output)

self.assertTrue(np.abs(y[0].data.numpy() - output).sum() == 0)
Expand All @@ -93,35 +149,40 @@ def test_conv_block(self):
time_steps = 10

# create slayer network and evaluate output
net = slayer.block.cuba.Conv(neuron_param,
in_features, out_features, kernel_size)
x = (torch.rand([1, in_features,
height, width, time_steps]) > 0.5).float()
net = slayer.block.cuba.Conv(
neuron_param, in_features, out_features, kernel_size
)
x = (
torch.rand([1, in_features, height, width, time_steps]) > 0.5
).float()
y = net(x).permute((0, 3, 2, 1, 4))

# export slayer network
net.export_hdf5(h5py.File(tempdir + '/cuba_conv.net',
'w').create_group('layer/0'))
net.export_hdf5(
h5py.File(tempdir + "/cuba_conv.net", "w").create_group("layer/0")
)

# create equivalent lava network using netx and evaluate output
lava_net = netx.hdf5.Network(net_config=tempdir + '/cuba_conv.net',
input_shape=(width, height, in_features))
lava_net = netx.hdf5.Network(
net_config=tempdir + "/cuba_conv.net",
input_shape=(width, height, in_features),
)
source = io.source.RingBuffer(data=x[0].permute((2, 1, 0, 3)))
sink = io.sink.RingBuffer(shape=lava_net.out.shape, buffer=time_steps)
source.s_out.connect(lava_net.inp)
lava_net.out.connect(sink.a_in)
run_condition = RunSteps(num_steps=time_steps)
run_config = Loihi1SimCfg(select_tag='fixed_pt')
run_config = Loihi1SimCfg(select_tag="fixed_pt")
lava_net.run(condition=run_condition, run_cfg=run_config)
output = sink.data.get()
lava_net.stop()

if verbose:
print()
print(lava_net)
print('slayer output:')
print("slayer output:")
print(y[0][0, 0, 0])
print('lava output:')
print("lava output:")
print(output[0, 0, 0])

self.assertTrue(np.abs(y[0].data.numpy() - output).sum() == 0)
Loading

0 comments on commit 272a217

Please sign in to comment.