% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{Bouss:1041672,
      author       = {Bouss, Peter and Nestler, Sandra and Fischer, Kirsten and
                      Merger, Claudia Lioba and René, Alexandre and Helias,
                      Moritz},
      title        = {{A}ssessing {N}eural {M}anifold {P}roperties {W}ith
                      {A}dapted {N}ormalizing {F}lows},
      reportid     = {FZJ-2025-02377},
      year         = {2024},
      abstract     = {Despite the large number of active neurons in the cortex,
                      the activity of neuronal populations is expected to lie on a
                      low-dimensional manifold for different brain regions [1].
                      Variants of principal component analysis (PCA) are commonly
                      used to assess this manifold. However, these methods are
                      limited by the assumption that the data follows a Gaussian
                      distribution and neglect additional features such as the
                      curvature of the manifold. Hence, their performance as
                      generative models tends to be subpar.To construct a
                      generative model that entirely learns the statistics of
                      neural activity with no assumptions about its distribution,
                      we use Normalizing Flows (NFs) [2, 3]. These neural networks
                      learn an estimator of the probability distribution of the
                      data, based on a latent distribution of the same dimension.
                      Their simplicity and their ability to compute the exact
                      likelihood distinguish them from other generative
                      networks.Our adaptation of NFs focuses on distinguishing
                      between relevant (in manifold) and noise dimensions (out of
                      manifold). We achieve this by identifying principal axes in
                      the latent space. Similar to PCA, we order those axes based
                      on their explanatory power, where we use reconstruction
                      performance instead of explained variance to identify and
                      rank the principal axes. This idea was also explored in [4]
                      with a different loss function. Our adaptation allows us to
                      investigate the behavior of the non-linear principal axes
                      and thus the geometry on which the data lie. This is done by
                      approximating the network for better interpretability as a
                      quadratic mapping around the maximum likelihood modes.We
                      validate our adaptation on artificial data sets of varying
                      complexity where the underlying dimensionality is known.
                      This shows that our approach is able to reconstruct data
                      with only a few latent variables. In this regard it is more
                      efficient than PCA, in addition to achieving a higher
                      likelihood.We apply the method to electrophysiological
                      recordings of V1 and V4 in macaques [5], which have
                      previously been analyzed with a Gaussian Mixture Model [6].
                      We show that the data lie on a manifold that features two
                      distinct regions, each corresponding to one of the two
                      states, eyes-open and eyes-closed. The shape of the manifold
                      significantly deviates from a Gaussian distribution and thus
                      would not be recoverable with PCA. We further analyze how
                      the non-linear interaction between groups of neurons
                      contributes to the shape of the manifolds.Figure 1: We use
                      Normalizing Flows to learn the distribution of data mapping
                      it to a Gaussian distribution in latent space. Thereby, we
                      enforce an alignment of the latent dimensions to the most
                      informative non-linear axes.},
      month         = {Sep},
      date          = {2024-09-29},
      organization  = {Bernstein Conference 2024, Frankfurt
                       (Germany), 29 Sep 2024 - 2 Oct 2024},
      subtyp        = {After Call},
      keywords     = {Computational Neuroscience (Other) / Data analysis, machine
                      learning and neuroinformatics (Other)},
      cin          = {IAS-6},
      cid          = {I:(DE-Juel1)IAS-6-20130828},
      pnm          = {5232 - Computational Principles (POF4-523) / 5234 -
                      Emerging NC Architectures (POF4-523) / GRK 2416 - GRK 2416:
                      MultiSenses-MultiScales: Neue Ansätze zur Aufklärung
                      neuronaler multisensorischer Integration (368482240) /
                      RenormalizedFlows - Transparent Deep Learning with
                      Renormalized Flows (BMBF-01IS19077A)},
      pid          = {G:(DE-HGF)POF4-5232 / G:(DE-HGF)POF4-5234 /
                      G:(GEPRIS)368482240 / G:(DE-Juel-1)BMBF-01IS19077A},
      typ          = {PUB:(DE-HGF)24},
      doi          = {10.12751/NNCN.BC2024.179},
      url          = {https://juser.fz-juelich.de/record/1041672},
}