% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{Bouss:1041678,
      author       = {Bouss, Peter and Nestler, Sandra and Fischer, Kirsten and
                      Merger, Claudia Lioba and Rene, Alexandre and Helias,
                      Moritz},
      title        = {{N}onlinear dimensionality reduction with normalizing flows
                      for analysis of electrophysiological recordings},
      reportid     = {FZJ-2025-02383},
      year         = {2023},
      abstract     = {Despite the large number of active neurons in the cortex,
                      the activity of neural populations for different brain
                      regions is expected to live on a low-dimensional manifold
                      [1]. Among the most common tools to estimate the mapping to
                      this manifold, along with its dimension, are variants of
                      principal component analysis. Although their success is
                      undisputed, these methods still have the disadvantage of
                      assuming that the data is well described by a Gaussian
                      distribution; any additional features such as skewness or
                      bimodality are neglected. Their performance when used as a
                      generative model is therefore often poor.To fully learn the
                      statistics of neural activity and to generate artificial
                      samples, we use Normalizing Flows (NFs) [2, 3]. These neural
                      networks learn a dimension-preserving estimator of the
                      probability distribution of the data (Fig. 1: Left-hand
                      side). They differ from generative adversarial networks
                      (GANs) and variational autoencoders (VAEs) by their
                      simplicity – only one bijective mapping is learned – and
                      by their ability to compute the likelihood exactly due to
                      tractable Jacobians at each building block.We adapt the
                      training objective of NFs to discriminate between relevant
                      (in manifold) and noise dimensions (out of manifold). To do
                      this, we break the original symmetry of the latent space by
                      enforcing maximal variance of the data to be encoded by as
                      few dimensions as possible (Fig. 1: Right-hand side)—the
                      same idea underlying PCA, a linear model, adapted here for
                      nonlinear mappings. This allows us to estimate the
                      dimensionality of the neural manifold and even to describe
                      the underlying manifold without discarding any information,
                      a unique feature of NFs.We prove the validity of our
                      adaptation on artificial datasets of varying complexity
                      generated by a hidden manifold model where the underlying
                      dimensionality is known. We illustrate the power of our
                      approach by reconstructing data using only a few latent NF
                      dimensions. In this setting, we show the advantage of such a
                      nonlinear approach over linear methods.Following this
                      approach, we identify manifolds in EEG recordings from a
                      dataset featuring high gamma activity. As described in [4],
                      these recordings are obtained from 128 electrodes during
                      four movement tasks. When plotted along the first principal
                      components obtained by PCA, these data show for some PCs a
                      heavy-tailed distribution. While linear models such as PCA
                      are limited to Gaussian statistics and hence suboptimal in
                      such a case, the nonlinearity of NFs enable to learn
                      higher-order correlations. Moreover, by flattening out the
                      curvature in latent space, we can better associate features
                      with latent dimensions. Especially, we have now a reduced
                      set of latent dimensions that explain most of the data
                      variance.References 1. Gallego JA, Perich MG, Miller LE,
                      Solla SA. Neural manifolds for the control of movement.
                      Neuron. 2017 Jun 7;94(5):978–84. 2. Dinh L, Krueger D,
                      Bengio Y. Nice: Non-linear independent components
                      estimation. arXiv preprint arXiv:1410.8516. 2014 Oct 30. 3.
                      Dinh L, Sohl-Dickstein J, Bengio S. Density estimation using
                      real NVP. 5th Int. InConf. Learn. Represent. ICLR 2017. 4.
                      Schirrmeister RT, Springenberg JT, Fiederer LD, Glasstetter
                      M, Eggensperger K, Tangermann M, Hutter F, Burgard W, Ball
                      T. Deep learning with convolutional neural networks for EEG
                      decoding and visualization. Human brain mapping. 2017
                      Nov;38(11):5391–420.},
      month         = {Jul},
      date          = {2023-07-15},
      organization  = {32nd Annual Computational Neuroscience
                       Meeting, Leipzig (Germany), 15 Jul 2023
                       - 19 Jul 2023},
      subtyp        = {After Call},
      cin          = {INM-6 / IAS-6},
      cid          = {I:(DE-Juel1)INM-6-20090406 / I:(DE-Juel1)IAS-6-20130828},
      pnm          = {5232 - Computational Principles (POF4-523) / 5234 -
                      Emerging NC Architectures (POF4-523) / GRK 2416 - GRK 2416:
                      MultiSenses-MultiScales: Neue Ansätze zur Aufklärung
                      neuronaler multisensorischer Integration (368482240) /
                      RenormalizedFlows - Transparent Deep Learning with
                      Renormalized Flows (BMBF-01IS19077A)},
      pid          = {G:(DE-HGF)POF4-5232 / G:(DE-HGF)POF4-5234 /
                      G:(GEPRIS)368482240 / G:(DE-Juel-1)BMBF-01IS19077A},
      typ          = {PUB:(DE-HGF)24},
      url          = {https://juser.fz-juelich.de/record/1041678},
}