% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@INPROCEEDINGS{Bouss:1041677,
author = {Bouss, Peter and Nestler, Sandra and Fischer, Kirsten and
Merger, Claudia Lioba and Rene, Alexandre and Helias,
Moritz},
title = {{N}ormalizing flows for nonlinear dimensionality reduction
ofelectrophysiological recordings},
reportid = {FZJ-2025-02382},
year = {2023},
abstract = {Even though the cortex has many active neurons, neuronal
populations for different brain areasshould dwell on a
low-dimensional manifold [1]. Principal component analysis
versions are used toestimate this manifold and its
dimension. Although successful, these methods assume that
the data iswell described by a Gaussian distribution and
ignore features like skewness and bimodality. Therefore,they
perform poorly as generative models.Normalizing Flows (NFs)
allow us to learn neural activity statistics and generate
artificial samples [2,3]. These neural networks learn a
dimension-preserving estimator of the data’s probability
distribution.They are simpler than generative adversarial
networks (GANs) and variational autoencoders (VAEs)since
they learn only one bijective mapping and can compute the
likelihood correctly due to tractableJacobians at each
building block.NFs are trained to distinguish relevant (in
manifold) from noisy dimensions (out of manifold). To
dothis, we break the original symmetry of the latent space
by pushing maximal variance of the data to becaptured by as
few dimensions as possible — the same idea underpinning
PCA, a linear model, adoptedhere for nonlinear mappings.
NFs’ unique characteristics allows us to estimate the
neural manifold’sdimensions and describe the underlying
manifold without discarding any information.Our adaptation
is validated on simulated datasets of various complexity
created using a hidden man-ifold model with specified
dimensions. Reconstructing data with a few latent NF
dimensions shows ourapproach’s capability. In this case,
our nonlinear approaches outperform linear ones. We identify
mani-folds in high-gamma EEG recordings using the
aforementioned technique. In the experiment of [4],
128electrodes recorded during four movement tasks. These
data show a heavy-tailed distribution along someof the first
principal components. NFs can learn higher-order
correlations while linear models like PCAare limited to
Gaussian statistics. We can also better match features to
latent dimensions by flatteningthe latent space. We now have
fewer latent dimensions that explain most data
variance.References[1] J. A. Gallego, M. G. Perich, L. E.
Miller, and S. A. Solla, Neuron, 94(5), 978-984 (2017).[2]
L. Dinh, D. Krueger, and Y. Bengio, In Conference on
Learning Representations, ICLR (2015).[3] L. Dinh, J.
Sohl-Dickstein, and S. Bengio, In Conference on Learning
Representations, ICLR (2017).[4] R. T. Schirrmeister, J. T.
Springenberg, L. D. J. Fiederer, M. Glasstetter, K.
Eggensperger, M. Tanger-mann, ... and T. Ball, Human brain
mapping, 38(11), 5391-5420 (2017).},
month = {Jul},
date = {2025-07-17},
organization = {Computational Neuroscience Academy,
Krakow (Poland), 17 Jul 2025 - 23 Jul
2025},
subtyp = {After Call},
cin = {INM-6 / IAS-6},
cid = {I:(DE-Juel1)INM-6-20090406 / I:(DE-Juel1)IAS-6-20130828},
pnm = {5232 - Computational Principles (POF4-523) / 5234 -
Emerging NC Architectures (POF4-523) / GRK 2416 - GRK 2416:
MultiSenses-MultiScales: Neue Ansätze zur Aufklärung
neuronaler multisensorischer Integration (368482240) /
RenormalizedFlows - Transparent Deep Learning with
Renormalized Flows (BMBF-01IS19077A)},
pid = {G:(DE-HGF)POF4-5232 / G:(DE-HGF)POF4-5234 /
G:(GEPRIS)368482240 / G:(DE-Juel-1)BMBF-01IS19077A},
typ = {PUB:(DE-HGF)24},
url = {https://juser.fz-juelich.de/record/1041677},
}