% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@INPROCEEDINGS{Bouss:1007805,
author = {Bouss, Peter and Nestler, Sandra and Rene, Alexandre and
Helias, Moritz},
title = {{D}imensionality reduction with normalizing flows},
school = {RWTH Aachen},
reportid = {FZJ-2023-02199},
year = {2022},
note = {Copyright: © (2022) Bouss P, Nestler S, René A, Helias M},
abstract = {Despite the large number of active neurons in the cortex,
for various brain regions, the activity of neural
populations is expected to live on a low-dimensional
manifold [1]. Among the most common tools to estimate the
mapping to this manifold, along with its dimension, are many
variants of principal component analysis [2]. Despite their
apparent success, these procedures have the disadvantage
that they assume only linear correlations and that their
performance, when used as a generative model, is poor.To be
able to fully learn the statistics of neural activity and to
generate artificial samples, we make use of normalizing
flows (NFs) [3, 4, 5]. These neural networks learn a
dimension-preserving estimator of the data probability
distribution. They are outstanding in comparison to
generative adversarial networks (GANs) and variational
autoencoders (VAEs) for their simplicity ‒ only one
invertible network is learned ‒ and for their exact
estimation of the likelihood due to tractable Jacobians at
each building block.We aim to modify NFs such that they can
discriminate relevant (in manifold) from noise (out of
manifold) dimensions. To this end, we penalize the
participation of each single latent variable in the
reconstruction of the data through the inverse mapping
(following a different reasoning than [6]). We can thus not
only give an estimate of the dimensionality of the activity
sub-space but also describe the underlying manifold without
the need to discard any information.We prove the validity of
our modification on controlled data sets of different
complexity. We emphasize, in particular, differences between
affine and additive coupling layers in normalizing flows
[7], and show that the former lead to pathologies when the
data topology is non-trivial, or when the data set is
composed of classes with different volumes. We further
illustrate the power of our modified NFs by reconstructing
data using only a few dimensions.We finally apply this
technique to identify manifolds in EEG recordings from a
dataset showing high gamma activity (described in [8]),
obtained from 128 electrodes during four different movement
tasks.AcknowledgementsThis project is funded by the Deutsche
Forschungsgemeinschaft (DFG, German Research Foundation) -
368482240/GRK2416; and by the German Federal Ministry for
Education and Research (BMBF Grant 01IS19077A to
Jülich).References [1] Gao, P., Trautmann, E., Yu, B.,
Santhanam, G., Ryu, S., Shenoy, K., $\&$ Ganguli, S. (2017).
A theory of multineuronal dimensionality, dynamics and
measurement. BioRxiv, 214262., 10.1101/214262 [2] Gallego,
J. A., Perich, M. G., Miller, L. E., $\&$ Solla, S. A.
(2017). Neural manifolds for the control of movement.
Neuron, 94(5), 978-984., 10.1016/j.neuron.2017.05.025 [3]
Dinh, L., Krueger, D., $\&$ Bengio, Y. (2014). Nice:
Non-linear independent components estimation. arXiv preprint
arXiv:1410.8516., 10.48550/arXiv.1410.8516 [4] Dinh, L.,
Sohl-Dickstein, J., $\&$ Bengio, S. (2016). Density
estimation using real nvp. arXiv preprint arXiv:1605.08803.,
10.48550/arXiv.1605.08803 [5] Kingma, D. P., $\&$ Dhariwal,
P. (2018). Glow: Generative flow with invertible 1x1
convolutions. Advances in neural information processing
systems, 31. [6] Cunningham, E., Cobb, A., $\&$ Jha, S.
(2022). Principal manifold flows. arXiv preprint
arXiv:2202.07037., 10.48550/arXiv.2202.07037 [7] Behrmann,
J., Vicol, P., Wang, K. C., Grosse, R., $\&$ Jacobsen, J. H.
(2021). Understanding and mitigating exploding inverses in
invertible neural networks. In International Conference on
Artificial Intelligence and Statistics (pp. 1792-1800).
PMLR. [8] Schirrmeister, R. T., Springenberg, J. T.,
Fiederer, L. D. J., Glasstetter, M., Eggensperger, K.,
Tangermann, M., ... $\&$ Ball, T. (2017). Deep learning with
convolutional neural networks for EEG decoding and
visualization. Human brain mapping, 38(11), 5391-5420.,
10.1002/hbm.23730},
month = {Sep},
date = {2022-09-13},
organization = {Bernstein Conference, Berlin
(Germany), 13 Sep 2022 - 16 Sep 2022},
subtyp = {After Call},
keywords = {Computational Neuroscience (Other) / Data analysis, machine
learning, neuroinformatics (Other)},
cin = {INM-6 / IAS-6 / INM-10},
cid = {I:(DE-Juel1)INM-6-20090406 / I:(DE-Juel1)IAS-6-20130828 /
I:(DE-Juel1)INM-10-20170113},
pnm = {5231 - Neuroscientific Foundations (POF4-523) / 5232 -
Computational Principles (POF4-523) / GRK 2416 - GRK 2416:
MultiSenses-MultiScales: Neue Ansätze zur Aufklärung
neuronaler multisensorischer Integration (368482240) /
RenormalizedFlows - Transparent Deep Learning with
Renormalized Flows (BMBF-01IS19077A)},
pid = {G:(DE-HGF)POF4-5231 / G:(DE-HGF)POF4-5232 /
G:(GEPRIS)368482240 / G:(DE-Juel-1)BMBF-01IS19077A},
typ = {PUB:(DE-HGF)24},
doi = {10.12751/NNCN.BC2022.104},
url = {https://juser.fz-juelich.de/record/1007805},
}