% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{Ippen:830181,
author = {Ippen, Tammo and Eppler, Jochen M. and Plesser, Hans E. and
Diesmann, Markus},
title = {{C}onstructing {N}euronal {N}etwork {M}odels in {M}assively
{P}arallel {E}nvironments},
journal = {Frontiers in neuroinformatics},
volume = {11},
issn = {1662-5196},
address = {Lausanne},
publisher = {Frontiers Research Foundation},
reportid = {FZJ-2017-03757},
pages = {30},
year = {2017},
abstract = {Recent advances in the development of data structures to
represent spiking neuron network models enable us to exploit
the complete memory of petascale computers for a single
brain-scale network simulation. In this work, we investigate
how well we can exploit the computing power of such
supercomputers for the creation of neuronal networks. Using
an established benchmark, we divide the runtime of
simulation code into the phase of network construction and
the phase during which the dynamical state is advanced in
time. We find that on multi-core compute nodes network
creation scales well with process-parallel code but exhibits
a prohibitively large memory consumption. Thread-parallel
network creation, in contrast, exhibits speedup only up to a
small number of threads but has little overhead in terms of
memory. We further observe that the algorithms creating
instances of model neurons and their connections scale well
for networks of ten thousand neurons, but do not show the
same speedup for networks of millions of neurons. Our work
uncovers that the lack of scaling of thread-parallel network
creation is due to inadequate memory allocation strategies
and demonstrates that thread-optimized memory allocators
recover excellent scaling. An analysis of the loop order
used for network construction reveals that more complex
tests on the locality of operations significantly improve
scaling and reduce runtime by allowing construction
algorithms to step through large networks more efficiently
than in existing code. The combination of these techniques
increases performance by an order of magnitude and harnesses
the increasingly parallel compute power of the compute nodes
in high-performance clusters and supercomputers.},
cin = {INM-6 / IAS-6 / INM-10 / JSC},
ddc = {610},
cid = {I:(DE-Juel1)INM-6-20090406 / I:(DE-Juel1)IAS-6-20130828 /
I:(DE-Juel1)INM-10-20170113 / I:(DE-Juel1)JSC-20090406},
pnm = {574 - Theory, modelling and simulation (POF3-574) / 511 -
Computational Science and Mathematical Methods (POF3-511) /
Brain-Scale Simulations $(jinb33_20121101)$ / SMHB -
Supercomputing and Modelling for the Human Brain
(HGF-SMHB-2013-2017) / HBP - The Human Brain Project
(604102) / HBP SGA1 - Human Brain Project Specific Grant
Agreement 1 (720270) / SLNS - SimLab Neuroscience
(Helmholtz-SLNS)},
pid = {G:(DE-HGF)POF3-574 / G:(DE-HGF)POF3-511 /
$G:(DE-Juel1)jinb33_20121101$ /
G:(DE-Juel1)HGF-SMHB-2013-2017 / G:(EU-Grant)604102 /
G:(EU-Grant)720270 / G:(DE-Juel1)Helmholtz-SLNS},
typ = {PUB:(DE-HGF)16},
UT = {WOS:000401368100001},
pubmed = {pmid:28559808},
doi = {10.3389/fninf.2017.00030},
url = {https://juser.fz-juelich.de/record/830181},
}