% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@INPROCEEDINGS{ObersteFrielinghaus:1044235,
author = {Oberste-Frielinghaus, Jonas and Kurth, Anno and Göltz,
Julian and Kriener, Laura and Ito, Junji and Petrovici,
Mihai A. and Grün, Sonja},
title = {{T}ime-to-first-spike encoding in layered networks evokes
label-specific synfire chain activity},
reportid = {FZJ-2025-03123},
year = {2025},
abstract = {INTRODUCTIONWhile artificial neural networks (ANNs) have
achieved remarkable success in various tasks, they lack two
major characteristic features of biological neural networks:
spiking activity and operation in continuous time.This makes
it difficult to leverage knowledge about ANNs to gain
insights into the computational principles of the real
brains.However, training methods for spiking neural networks
(SNNs) have recently been developed to create functional SNN
models [1].In this study we analyze the activity of a
multilayer feedforward SNN trained for image classification
and uncover the structures in both connectivity and dynamics
that underlie its functional performance.METHODSOur network
is composed of an input layer (784 neurons), 4 hidden layers
(300 excitatory and 100 inhibitory neurons in each layer),
and an output layer (10 neurons).We trained it with
backpropagation to classify the MNIST dataset, based on
time-to-first-spike coding: each neuron encodes information
in the timing of its first spike; the first neuron to spike
in the output layer defines the inferred input image class
[1].The MNIST input is also provided as spike timing: dark
pixels spike early, lighter pixels later. Based on the
connection weights after training, neurons that have strong
excitatory effects on each of the output neurons are
identified in each layer. Note that one neuron can have
strong effects on multiple output neurons.RESULTSIn response
to a sample, the input layer generates a volley of spikes,
identified as a pulse packet (PP) [2], which propagates
through the hidden layers (Fig. 1).In deeper layers, spikes
in a PP get more synchronized and the neurons providing
spikes to the PP become more specific to the sample
label.This leads to a characteristic sparse representation
of the sample label in deep layers.The analysis of
connection weights reveals that a correct classification is
achieved by propagating spikes through a specific pathway
across layers, composed of neurons with strong excitatory
effects on the correct output neuron.Pathways for different
output neurons become more separate in deeper layers, with
less overlap of neurons between pathways.DISCUSSIONThe
revealed connectivity structure and the propagation of
spikes as a PP agree with the notion of the synfire chain
(SFC) [3,4].To our knowledge, this is the first example of
SFC formation by training of a functional network. In our
network, multiple parallel SFCs emerge through the training
for MNIST classification, representing each input label by
activation of one particular SFC.Such a representation
naturally leads to sparser encoding of the input label in
deeper layers, and also increases the linear separability of
layer-wise activity.Thus, the use of SFCs for information
representation can have multiple advantages for achieving
efficient computation, besides the stable transmission of
information through the network.REFERENCES1. Göltz et
al. (2021). Fast and energy-efficient neuromorphic deep
learning with first-spike times. Nature Machine
Intelligence, 3(9),
823–835.https://doi.org/10.1038/s42256-021-00388-x2.
Diesmann, Gewaltig, $\&$ Aertsen (1999). Stable propagation
of synchronous spiking in cortical neural networks. Nature,
402(6761), 529–533. https://doi.org/10.1038/9901013.
Abeles (1982). Local Cortical Circuits: An
Electrophysiological Study. Springer-Verlag.4. Abeles
(1991). Corticonics: Neural Circuits of the Cerebral Cortex.
Cambridge University Press.},
month = {Jul},
date = {2025-07-05},
organization = {34th Annual Computational Neuroscience
Meeting, Florence (Italy), 5 Jul 2025 -
9 Jul 2025},
subtyp = {After Call},
cin = {IAS-6 / INM-10},
cid = {I:(DE-Juel1)IAS-6-20130828 / I:(DE-Juel1)INM-10-20170113},
pnm = {5231 - Neuroscientific Foundations (POF4-523) / HBP SGA2 -
Human Brain Project Specific Grant Agreement 2 (785907) /
HBP SGA3 - Human Brain Project Specific Grant Agreement 3
(945539) / Algorithms of Adaptive Behavior and their
Neuronal Implementation in Health and Disease
(iBehave-20220812) / JL SMHB - Joint Lab Supercomputing and
Modeling for the Human Brain (JL SMHB-2021-2027)},
pid = {G:(DE-HGF)POF4-5231 / G:(EU-Grant)785907 /
G:(EU-Grant)945539 / G:(DE-Juel-1)iBehave-20220812 /
G:(DE-Juel1)JL SMHB-2021-2027},
typ = {PUB:(DE-HGF)24},
url = {https://juser.fz-juelich.de/record/1044235},
}