% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{Gutzen:858244,
      author       = {Gutzen, Robin and Grün, Sonja and Denker, Michael},
      title        = {{V}alidation {M}ethods for {N}eural {N}etwork
                      {S}imulations},
      reportid     = {FZJ-2018-07144},
      year         = {2017},
      abstract     = {Neuroscience as an evolving field is in the quite rare
                      situation that the amount of models and theories about the
                      various functionalities of the brain is contrasted against a
                      constantly growing body of experimental evidence. In this
                      state of research, the role of neural network simulations to
                      link theory and data gains importance. There is a large
                      variety of simulators and simulator frameworks (e.g., NEST,
                      BRIAN, NEURON, SpikeNET) which may differ strongly in their
                      internal models used for of computation and the implications
                      that come with it. Hence there is a high demand for a
                      thorough understanding of these simulation engines that are
                      used to generate simulated network activity data, in
                      particular with respect to their accuracy. For a proper
                      evaluation of simulations, new tools have to be developed in
                      order to perform such validations, in an accessible and
                      readily reproducible fashion.However, the comparison can not
                      simply be done in a spike-to-spike manner for a number of
                      reasons: neuronal spiking isstochastic, and competing
                      implementations of algorithms or differences in the
                      numerical processing may cause deviations in the precise
                      output of the simulations. Instead, the simulations have to
                      be evaluated in a statistical sense and yield quantifiable
                      measures to characterize significant identity or difference
                      of model and experiment or different models. Thus, we deal
                      with the question of how to properly validate neural network
                      simulations?As a test case, we chose the validation of a
                      neuronal network simulated on the neuromorphic hardware
                      SpiNNaker againstthe same simulation carried out using the
                      NEST simulator software as reference [1]. The NEST simulator
                      is an open source software project developed by the NEST
                      initiative (http://www.nest-initiative.org) and features
                      exact numerical integration of the dynamics. The SpiNNaker
                      system, located in Manchester, UK, is a neuromorphic
                      architecture consisting of millions of cores which can
                      perform efficient network simulations on a hardware level.
                      Since this operation mode is inherently different from
                      conventional software simulations and has some constrictions
                      regarding, e.g., the fine temporal resolution of spikes, the
                      validity of such simulations with respect to NEST is not
                      immediately given. The starting point of the validation of
                      SpiNNaker with NEST are the results of a model simulation of
                      the canonical microcircuit model [2] which was performed on
                      both platforms. The results are given in form of recorded
                      spiking activity. We concentrate on validating the results
                      by comparing measures describing the single neuron
                      statistics (firing rate, coefficient of variation of the
                      interspike intervals (CV)) as well as the correlation
                      structure in the simulated network as measured by the
                      pairwise correlation coefficients between all spike trains.
                      In a first approach, we chose to compare the outcomes in
                      form of the distributions of the measures and tested the
                      suitability of a variety of statistical two sample tests
                      (Kolmogorov-Smirnov-Distance, Mann-Whitney-U, and
                      Kullback-Leibler-Divergence) using the network simulations
                      and complemented by stochastic spike train simulations.
                      However, such an analysis of correlation coefficients alone
                      is not able to give insights about which neurons are
                      involved in the correlations and if there are higher order
                      correlations present. Therefore, we assess the correlation
                      structure using an eigenvaluedecomposition of the
                      correlation matrix. We present an approach to use the
                      eigenvalue decomposition to reorder the neurons with respect
                      to their correlation strength to identify groups of highly
                      correlated neurons.The goal of the development and pooling
                      of these validation methods is to provide a flexible toolbox
                      which is not tailored towards one specific application but
                      may be used in a broad group of validation cases. In future
                      work, we aim at describing the dependence of the validation
                      approaches on the type of the network simulation, the number
                      of recorded neurons, the simulation duration, the features
                      of the model, the reference mode, and the scientific
                      question behind the analysis.References:[1] Senk, Johanna,
                      et al. ”A Collaborative Simulation-Analysis Workflow for
                      Computational Neuroscience Using HPC.” JülichAachen
                      Research Alliance (JARA) High-Performance Computing
                      Symposium. Springer, Cham, 2016. [2] Potjans, Tobias C., and
                      Markus Diesmann. ”The cell-type specific cortical
                      microcircuit: relating structure and activity in a
                      full-scale spiking network model.” Cerebral Cortex 24.3
                      (2014): 785-806.},
      month         = {Aug},
      date          = {2017-08-28},
      organization  = {Data Science Summer School, Paris
                       (France), 28 Aug 2017 - 1 Sep 2017},
      subtyp        = {Other},
      cin          = {INM-6 / INM-10 / IAS-6},
      cid          = {I:(DE-Juel1)INM-6-20090406 / I:(DE-Juel1)INM-10-20170113 /
                      I:(DE-Juel1)IAS-6-20130828},
      pnm          = {574 - Theory, modelling and simulation (POF3-574) / 571 -
                      Connectivity and Activity (POF3-571)},
      pid          = {G:(DE-HGF)POF3-574 / G:(DE-HGF)POF3-571},
      typ          = {PUB:(DE-HGF)24},
      url          = {https://juser.fz-juelich.de/record/858244},
}