% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{vanderVlag:907606,
author = {van der Vlag, Michiel and Woodman, Marmaduke and Fousek,
Jan and Diaz, Sandra and Pérez Martín, Aarón and Jirsa ,
Viktor and Morrison, Abigail},
title = {{R}ate{ML}: {A} {C}ode {G}eneration {T}ool for {B}rain
{N}etwork {M}odels},
journal = {Frontiers in network physiology},
volume = {2},
issn = {2674-0109},
address = {Lausanne},
publisher = {Frontiers Media},
reportid = {FZJ-2022-02105},
pages = {826345},
year = {2022},
abstract = {Whole brain network models are now an established tool in
scientific and clinical research, however their use in a
larger workflow still adds significant informatics
complexity. We propose a tool, RateML, that enables users to
generate such models from a succinct declarative
description, in which the mathematics of the model are
described without specifying how their simulation should be
implemented. RateML builds on NeuroML’s Low Entropy Model
Specification (LEMS), an XML based language for specifying
models of dynamical systems, allowing descriptions of neural
mass and discretized neural field models, as implemented by
the Virtual Brain (TVB) simulator: the end user describes
their model’s mathematics once and generates and runs code
for different languages, targeting both CPUs for fast single
simulations and GPUs for parallel ensemble simulations. High
performance parallel simulations are crucial for tuning many
parameters of a model to empirical data such as functional
magnetic resonance imaging (fMRI), with reasonable execution
times on small or modest hardware resources. Specifically,
while RateML can generate Python model code, it enables
generation of Compute Unified Device Architecture C++ code
for NVIDIA GPUs. When a CUDA implementation of a model is
generated, a tailored model driver class is produced,
enabling the user to tweak the driver by hand and perform
the parameter sweep. The model and driver can be executed on
any compute capable NVIDIA GPU with a high degree of
parallelization, either locally or in a compute cluster
environment. The results reported in this manuscript show
that with the CUDA code generated by RateML, it is possible
to explore thousands of parameter combinations with a single
Graphics Processing Unit for different models, substantially
reducing parameter exploration times and resource usage for
the brain network models, in turn accelerating the research
workflow itself. This provides a new tool to create
efficient and broader parameter fitting workflows, support
studies on larger cohorts, and derive more robust and
statistically relevant conclusions about brain dynamics.},
cin = {JSC / INM-6 / IAS-6 / INM-10},
ddc = {610},
cid = {I:(DE-Juel1)JSC-20090406 / I:(DE-Juel1)INM-6-20090406 /
I:(DE-Juel1)IAS-6-20130828 / I:(DE-Juel1)INM-10-20170113},
pnm = {5111 - Domain-Specific Simulation $\&$ Data Life Cycle Labs
(SDLs) and Research Groups (POF4-511) / SLNS - SimLab
Neuroscience (Helmholtz-SLNS) / HDS LEE - Helmholtz School
for Data Science in Life, Earth and Energy (HDS LEE)
(HDS-LEE-20190612) / HBP SGA2 - Human Brain Project Specific
Grant Agreement 2 (785907) / HBP SGA3 - Human Brain Project
Specific Grant Agreement 3 (945539) / 5234 - Emerging NC
Architectures (POF4-523)},
pid = {G:(DE-HGF)POF4-5111 / G:(DE-Juel1)Helmholtz-SLNS /
G:(DE-Juel1)HDS-LEE-20190612 / G:(EU-Grant)785907 /
G:(EU-Grant)945539 / G:(DE-HGF)POF4-5234},
typ = {PUB:(DE-HGF)16},
pubmed = {36926112},
UT = {WOS:001203807400001},
doi = {10.3389/fnetp.2022.826345},
url = {https://juser.fz-juelich.de/record/907606},
}