Skip to content

Commit

Permalink
Merge branch 'lava-nc:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
bamsumit authored Oct 10, 2023
2 parents 9e6219d + 7e7cf5a commit 65543d8
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 10 deletions.
42 changes: 32 additions & 10 deletions src/lava/lib/dl/netx/hdf5.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ class Network(AbstractProcess):
neuron's reset parameter. None means no reset. Defaults to None.
reset_offset: int
determines the phase shift of network reset if enabled. Defaults to 0.
spike_exp: int
determines the decimal place of graded spike. Defaults to 6.
sparse_fc_layer : boolean, optional
If True, all fully-connected layer synapses will be interpreted as
Sparse types in Lava.
Expand All @@ -66,6 +68,7 @@ def __init__(self,
input_shape: Optional[Tuple[int, ...]] = None,
reset_interval: Optional[int] = None,
reset_offset: int = 0,
spike_exp: int = 6,
sparse_fc_layer: bool = False) -> None:
super().__init__(net_config=net_config,
num_layers=num_layers,
Expand All @@ -79,6 +82,7 @@ def __init__(self,
self.input_shape = input_shape
self.reset_interval = reset_interval
self.reset_offset = reset_offset
self.spike_exp = spike_exp
self.sparse_fc_layer = sparse_fc_layer

self.net_str = ''
Expand Down Expand Up @@ -107,7 +111,8 @@ def __len__(self) -> int:
def get_neuron_params(neuron_config: h5py.Group,
input: bool = False,
reset_interval: Optional[int] = None,
reset_offset: int = 0) -> AbstractProcess:
reset_offset: int = 0,
spike_exp: int = 6) -> AbstractProcess:
"""Provides the correct neuron configuration process and parameters
from the neuron description in hdf5 config.
Expand All @@ -123,6 +128,8 @@ def get_neuron_params(neuron_config: h5py.Group,
reset_offset: int
the offset/phase of reset. It is only valid of reset_interval is
not None.
spike_exp: int
determines the decimal place of graded spike. Defaults to 6.
Returns
-------
Expand Down Expand Up @@ -164,7 +171,7 @@ def get_neuron_params(neuron_config: h5py.Group,
neuron_process = Delta
neuron_params = {'neuron_proc': neuron_process,
'vth': neuron_config['vThMant'],
'spike_exp': 6,
'spike_exp': spike_exp,
'state_exp': 6,
'num_message_bits': num_message_bits}
elif 'sigma_output' in neuron_config.keys():
Expand All @@ -175,7 +182,7 @@ def get_neuron_params(neuron_config: h5py.Group,
neuron_process = SigmaDelta
neuron_params = {'neuron_proc': neuron_process,
'vth': neuron_config['vThMant'],
'spike_exp': 6,
'spike_exp': spike_exp,
'state_exp': 6,
'num_message_bits': num_message_bits}
return neuron_params
Expand Down Expand Up @@ -238,7 +245,8 @@ def array_entry(data: Union[int, Tuple[int, int], None]) -> str:
@staticmethod
def create_input(layer_config: h5py.Group,
reset_interval: Optional[int] = None,
reset_offset: int = 0) -> Tuple[Input, str]:
reset_offset: int = 0,
spike_exp: int = 6) -> Tuple[Input, str]:
"""Creates input layer from layer configuration.
Parameters
Expand All @@ -250,6 +258,8 @@ def create_input(layer_config: h5py.Group,
reset_offset: int
the offset/phase of reset. It is only valid of reset_interval is
not None.
spike_exp: int
determines the decimal place of graded spike. Defaults to 6.
Returns
-------
Expand All @@ -262,7 +272,8 @@ def create_input(layer_config: h5py.Group,
neuron_params = Network.get_neuron_params(layer_config['neuron'],
reset_interval=reset_interval,
reset_offset=reset_offset,
input=True)
input=True,
spike_exp=spike_exp)

if 'weight' in layer_config.keys():
weight = int(layer_config['weight'])
Expand Down Expand Up @@ -304,6 +315,7 @@ def create_dense(layer_config: h5py.Group,
input_message_bits: int = 0,
reset_interval: Optional[int] = None,
reset_offset: int = 0,
spike_exp: int = 6,
sparse_synapse: bool = 0) -> Tuple[Dense, str]:
"""Creates dense layer from layer configuration
Expand All @@ -319,6 +331,8 @@ def create_dense(layer_config: h5py.Group,
reset_offset: int
the offset/phase of reset. It is only valid of reset_interval is
not None.
spike_exp: int
determines the decimal place of graded spike. Defaults to 6.
Returns
-------
Expand All @@ -341,7 +355,8 @@ def create_dense(layer_config: h5py.Group,
'gradedSpike': False}
neuron_params = Network.get_neuron_params(neuron_config,
reset_interval=reset_interval,
reset_offset=reset_offset)
reset_offset=reset_offset,
spike_exp=spike_exp)
if "weight/imag" in layer_config.f:
weight_real = layer_config['weight/real']
weight_imag = layer_config['weight/imag']
Expand Down Expand Up @@ -430,7 +445,8 @@ def create_conv(layer_config: h5py.Group,
input_shape: Tuple[int, int, int],
input_message_bits: int = 0,
reset_interval: Optional[int] = None,
reset_offset: int = 0) -> Tuple[Conv, str]:
reset_offset: int = 0,
spike_exp: int = 6) -> Tuple[Conv, str]:
"""Creates conv layer from layer configuration
Parameters
Expand All @@ -447,6 +463,8 @@ def create_conv(layer_config: h5py.Group,
reset_offset: int
the offset/phase of reset. It is only valid of reset_interval is
not None.
spike_exp: int
determines the decimal place of graded spike. Defaults to 6.
Returns
-------
Expand All @@ -462,7 +480,8 @@ def expand(x):
shape = tuple(layer_config['shape'][::-1]) # WHC (XYZ)
neuron_params = Network.get_neuron_params(layer_config['neuron'],
reset_interval=reset_interval,
reset_offset=reset_offset)
reset_offset=reset_offset,
spike_exp=spike_exp)
weight = layer_config['weight'][:, :, ::-1, ::-1]
weight = weight.reshape(weight.shape[:4]).transpose((0, 3, 2, 1))
stride = expand(layer_config['stride'])
Expand Down Expand Up @@ -546,7 +565,8 @@ def _create(self) -> List[AbstractProcess]:
layer, table = self.create_input(
layer_config[i],
reset_interval=reset_interval,
reset_offset=reset_offset)
reset_offset=reset_offset,
spike_exp=self.spike_exp)
if i >= self.skip_layers:
layers.append(layer)
reset_offset += 1
Expand All @@ -571,7 +591,8 @@ def _create(self) -> List[AbstractProcess]:
input_shape=input_shape,
input_message_bits=input_message_bits,
reset_interval=reset_interval,
reset_offset=reset_offset)
reset_offset=reset_offset,
spike_exp=self.spike_exp)
if i >= self.skip_layers:
layers.append(layer)
reset_offset += 1
Expand Down Expand Up @@ -600,6 +621,7 @@ def _create(self) -> List[AbstractProcess]:
input_message_bits=input_message_bits,
reset_interval=reset_interval,
reset_offset=reset_offset,
spike_exp=self.spike_exp,
sparse_synapse=self.sparse_fc_layer)
if i >= self.skip_layers:
layers.append(layer)
Expand Down
34 changes: 34 additions & 0 deletions tests/lava/lib/dl/netx/test_hdf5.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,40 @@ def test_pilotnet_sdnn(self) -> None:
f'Error was {error}.'
)

def test_pilotnet_sdnn_spike_exp(self) -> None:
"""Tests the output of pilotnet sdnn with spike exp."""
net_config = root + '/gts/pilotnet_sdnn/network.net'
net = netx.hdf5.Network(net_config=net_config, spike_exp=0)
input = np.load(root + '/gts/pilotnet_sdnn/input.npy')
source = io.source.RingBuffer(data=input)
sink = io.sink.RingBuffer(shape=net.out_layer.shape,
buffer=len(net.layers))
source.s_out.connect(net.in_layer.neuron.a_in)
net.out_layer.out.connect(sink.a_in)

num_steps = len(net.layers)
run_condition = RunSteps(num_steps=num_steps)
run_config = TestRunConfig(select_tag='fixed_pt')
net.run(condition=run_condition, run_cfg=run_config)
output = sink.data.get()
net.stop()

scale = (1 << (6 - net.spike_exp))
gt = np.load(root + '/gts/pilotnet_sdnn/output.npy') / scale
error = np.abs(output - gt).mean()
if verbose:
print('Network:')
print(net)
print(f'{output=}')
print('PilotNet SDNN spike error:', error)

self.assertTrue(
error < 2 * scale,
f'Output spike and ground truth do not match for PilotNet SDNN. '
f'Found {output[output != gt] = } and {gt[output != gt] = }. '
f'Error was {error}.'
)

def test_sparse_pilotnet_sdnn(self) -> None:
"""Tests sparse_fc_layer Network arg on Dense blocks"""
net_config = root + '/gts/pilotnet_sdnn/network.net'
Expand Down

0 comments on commit 65543d8

Please sign in to comment.