% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@INPROCEEDINGS{Bouss:1009719,
author = {Bouss, Peter and Nestler, Sandra and Fischer, Kirsten and
Merger, Claudia Lioba and Rene, Alexandre and Helias,
Moritz},
title = {{N}onlinear dimensionality reduction with normalizing flows
for analysis of electrophysiological recordings},
reportid = {FZJ-2023-02951},
year = {2023},
abstract = {Despite the large number of active neurons in the cortex,
the activity of neural populations for different brain
regions is expected to live on a low-dimensional manifold
[1]. Among the most common tools to estimate the mapping to
this manifold, along with its dimension, are variants of
principal component analysis. Although their success is
undisputed, these methods still have the disadvantage of
assuming that the data is well described by a Gaussian
distribution; any additional features such as skewness or
bimodality are neglected. Their performance when used as a
generative model is therefore often poor.To fully learn the
statistics of neural activity and to generate artificial
samples, we use Normalizing Flows (NFs) [2, 3]. These neural
networks learn a dimension-preserving estimator of the
probability distribution of the data (left part of Fig. 1).
They differ from generative adversarial networks (GANs) and
variational autoencoders (VAEs) by their simplicity – only
one bijective mapping is learned – and by their ability to
compute the likelihood exactly due to tractable Jacobians at
each building block.We adapt the training objective of NFs
to discriminate between relevant (in manifold) and noise
dimensions (out of manifold). To do this, we break the
original symmetry of the latent space by enforcing maximal
variance of the data to be encoded by as few dimensions as
possible (right part of Fig. 1) - the same idea underlying
PCA, a linear model, adapted here for nonlinear mappings.
This allows us to estimate the dimensionality of the neural
manifold and even to describe the underlying manifold
without discarding any information, a unique feature of
NFs.We prove the validity of our adaptation on artificial
datasets of varying complexity generated by a hidden
manifold model where the underlying dimensionality is known.
We illustrate the power of our approach by reconstructing
data using only a few latent NF dimensions. In this setting,
we show the advantage of such a nonlinear approach over
linear methods.Following this approach, we identify
manifolds in EEG recordings from a dataset featuring high
gamma activity. As described in [4], these recordings are
obtained from 128 electrodes during four movement tasks.
When plotted along the first principal components obtained
by PCA, these data show for some PCs a heavy-tailed
distribution. While linear models such as PCA are limited to
Gaussian statistics and hence suboptimal in such a case, the
nonlinearity of NFs enable to learn higher-order
correlations. Moreover, by flattening out the curvature in
latent space, we can better associate features with latent
dimensions. Especially, we have now a reduced set of latent
dimensions that explain most of the data
variance.References1. Gallego J, Perich M, Miller L, et al.
Neural manifolds for the control of movement. 2017. Neuron,
94(5), 978-984.2. Dinh L, Krueger D, Bengio Y. Nice:
Non-linear Independent Components Estimation. ICLR 2015.3.
Dinh L, Sohl-Dickstein J, Bengio S. Density estimation using
Real NVP. ICLR 2017.4. Schirrmeister R, Springenberg J,
Fiederer L, et al. Deep learning with convolutional neural
networks for EEG decoding and visualization. 2017. Hum Brain
Mapp, 38(11), 5391-5420.},
month = {Jul},
date = {2023-07-15},
organization = {32nd Annual Computational Neuroscience
Meeting, Leipzig (Germany), 15 Jul 2023
- 19 Jul 2023},
subtyp = {After Call},
cin = {INM-6 / IAS-6 / INM-10},
cid = {I:(DE-Juel1)INM-6-20090406 / I:(DE-Juel1)IAS-6-20130828 /
I:(DE-Juel1)INM-10-20170113},
pnm = {5231 - Neuroscientific Foundations (POF4-523) / 5232 -
Computational Principles (POF4-523) / 5234 - Emerging NC
Architectures (POF4-523) / GRK 2416 - GRK 2416:
MultiSenses-MultiScales: Neue Ansätze zur Aufklärung
neuronaler multisensorischer Integration (368482240) /
RenormalizedFlows - Transparent Deep Learning with
Renormalized Flows (BMBF-01IS19077A)},
pid = {G:(DE-HGF)POF4-5231 / G:(DE-HGF)POF4-5232 /
G:(DE-HGF)POF4-5234 / G:(GEPRIS)368482240 /
G:(DE-Juel-1)BMBF-01IS19077A},
typ = {PUB:(DE-HGF)24},
doi = {10.34734/FZJ-2023-02951},
url = {https://juser.fz-juelich.de/record/1009719},
}