% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Yegenoglu:910130,
      author       = {Yegenoglu, Alper and Subramoney, Anand and Hater, Thorsten
                      and Jimenez-Romero, Cristian and Klijn, Wouter and Pérez
                      Martín, Aarón and van der Vlag, Michiel and Herty, Michael
                      and Morrison, Abigail and Diaz, Sandra},
      title        = {{E}xploring {P}arameter and {H}yper-{P}arameter {S}paces of
                      {N}euroscience {M}odels on {H}igh {P}erformance {C}omputers
                      {W}ith {L}earning to {L}earn},
      journal      = {Frontiers in computational neuroscience},
      volume       = {16},
      issn         = {1662-5188},
      address      = {Lausanne},
      publisher    = {Frontiers Research Foundation},
      reportid     = {FZJ-2022-03626},
      pages        = {885207},
      year         = {2022},
      abstract     = {Neuroscience models commonly have a high number of degrees
                      of freedom and only specific regions within the parameter
                      space are able to produce dynamics of interest. This makes
                      the development of tools and strategies to efficiently find
                      these regions of high importance to advance brain research.
                      Exploring the high dimensional parameter space using
                      numerical simulations has been a frequently used technique
                      in the last years in many areas of computational
                      neuroscience. Today, high performance computing (HPC) can
                      provide a powerful infrastructure to speed up explorations
                      and increase our general understanding of the behavior of
                      the model in reasonable times. Learning to learn (L2L) is a
                      well-known concept in machine learning (ML) and a specific
                      method for acquiring constraints to improve learning
                      performance. This concept can be decomposed into a two loop
                      optimization process where the target of optimization can
                      consist of any program such as an artificial neural network,
                      a spiking network, a single cell model, or a whole brain
                      simulation. In this work, we present L2L as an easy to use
                      and flexible framework to perform parameter and
                      hyper-parameter space exploration of neuroscience models on
                      HPC infrastructure. Learning to learn is an implementation
                      of the L2L concept written in Python. This open-source
                      software allows several instances of an optimization target
                      to be executed with different parameters in an
                      embarrassingly parallel fashion on HPC. L2L provides a set
                      of built-in optimizer algorithms, which make adaptive and
                      efficient exploration of parameter spaces possible.
                      Different from other optimization toolboxes, L2L provides
                      maximum flexibility for the way the optimization target can
                      be executed. In this paper, we show a variety of examples of
                      neuroscience models being optimized within the L2L framework
                      to execute different types of tasks. The tasks used to
                      illustrate the concept go from reproducing empirical data to
                      learning how to solve a problem in a dynamic environment. We
                      particularly focus on simulations with models ranging from
                      the single cell to the whole brain and using a variety of
                      simulation engines like NEST, Arbor, TVB, OpenAIGym, and
                      NetLogo.},
      cin          = {JSC / IAS-6 / INM-6},
      ddc          = {610},
      cid          = {I:(DE-Juel1)JSC-20090406 / I:(DE-Juel1)IAS-6-20130828 /
                      I:(DE-Juel1)INM-6-20090406},
      pnm          = {5111 - Domain-Specific Simulation $\&$ Data Life Cycle Labs
                      (SDLs) and Research Groups (POF4-511) / HBP SGA2 - Human
                      Brain Project Specific Grant Agreement 2 (785907) / HBP SGA3
                      - Human Brain Project Specific Grant Agreement 3 (945539) /
                      JL SMHB - Joint Lab Supercomputing and Modeling for the
                      Human Brain (JL SMHB-2021-2027) / SLNS - SimLab Neuroscience
                      (Helmholtz-SLNS) / ICEI - Interactive Computing
                      E-Infrastructure for the Human Brain Project (800858) / 5234
                      - Emerging NC Architectures (POF4-523)},
      pid          = {G:(DE-HGF)POF4-5111 / G:(EU-Grant)785907 /
                      G:(EU-Grant)945539 / G:(DE-Juel1)JL SMHB-2021-2027 /
                      G:(DE-Juel1)Helmholtz-SLNS / G:(EU-Grant)800858 /
                      G:(DE-HGF)POF4-5234},
      typ          = {PUB:(DE-HGF)16},
      pubmed       = {35720775},
      UT           = {WOS:000811824200001},
      doi          = {10.3389/fncom.2022.885207},
      url          = {https://juser.fz-juelich.de/record/910130},
}