% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@INPROCEEDINGS{Bouss:1041674,
author = {Bouss, Peter and Nestler, Sandra and Fischer, Kirsten and
Merger, Claudia Lioba and Rene, Alexandre and Helias,
Moritz},
title = {{E}xploring {N}eural {M}anifold {C}haracteristics {U}sing
{A}dapted {N}ormalizing {F}lows},
reportid = {FZJ-2025-02379},
year = {2024},
abstract = {Despite the large number of active neurons in the cortex,
the activity of neural populations fordifferent brain
regions is expected to live on a low-dimensional manifold
[1]. Variants of principalcomponent analysis (PCA) are
frequently employed to estimate this manifold. However,
thesemethods are limited by the assumption that the data
conforms to a Gaussian distribution, neglectingadditional
features such as the curvature of the manifold.
Consequently, their performance asgenerative models tends to
be subpar.To fully learn the statistics of neural activity
and to generate artificial samples, we use NormalizingFlows
(NFs) [2, 3]. These neural networks learn a
dimension-preserving estimator of the
probabilitydistribution of the data. They differ from other
generative networks by their simplicity and by theirability
to compute the likelihood exactly.Our adaptation of NFs
focuses on distinguishing between relevant (in manifold) and
noisedimensions (out of manifold). This is achieved by
training the NF to represent maximal datavariance
representation in minimal dimensions, akin to PCA's linear
model but allowing fornonlinear mappings. Our adaptation
allows us to estimate the dimensionality of the neural
manifold.As every layer is a bijective mapping, the network
can describe the manifold without losinginformation – a
distinctive advantage of NFs.We validate our adaptation on
artificial datasets of varying complexity where the
underlyingdimensionality is known. Our approach can
reconstruct data using only a few latent variables, and
ismore efficient than linear methods, such as PCA.Following
this approach, we identify manifolds in electrophysiological
recordings from macaqueV1 and V4 [4]. Our approach
faithfully represents not only the variance but also higher
orderfeatures, such as the skewness and kurtosis of the
data, using fewer dimensions than PCA.[1] J. Gallego et al.,
Neuron, 94, 5, 978-984, 2017.[2] L. Dinh et al., ICLR,
2015.[3] L. Dinh et al., ICLR, 2017.[4] X. Chen et al., Sci.
Data, 9, 1, 77, 2022.},
month = {Jun},
date = {2024-06-03},
organization = {International Conference on
Neuromorphic Computing and Engineering,
Aachen (Germany), 3 Jun 2024 - 6 Jun
2024},
subtyp = {After Call},
cin = {IAS-6},
cid = {I:(DE-Juel1)IAS-6-20130828},
pnm = {5232 - Computational Principles (POF4-523) / 5234 -
Emerging NC Architectures (POF4-523) / GRK 2416 - GRK 2416:
MultiSenses-MultiScales: Neue Ansätze zur Aufklärung
neuronaler multisensorischer Integration (368482240) /
RenormalizedFlows - Transparent Deep Learning with
Renormalized Flows (BMBF-01IS19077A)},
pid = {G:(DE-HGF)POF4-5232 / G:(DE-HGF)POF4-5234 /
G:(GEPRIS)368482240 / G:(DE-Juel-1)BMBF-01IS19077A},
typ = {PUB:(DE-HGF)24},
url = {https://juser.fz-juelich.de/record/1041674},
}