% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@INPROCEEDINGS{Feld:892906,
author = {Feld, Christian and Geimer, Markus and Hermanns,
Marc-André and Saviankou, Pavel and Visser, Anke and Mohr,
Bernd},
title = {{D}etecting {D}isaster {B}efore {I}t {S}trikes: {O}n the
{C}hallenges of {A}utomated {B}uilding and {T}esting in
{HPC} {E}nvironments},
address = {Cham},
publisher = {Springer International Publishing},
reportid = {FZJ-2021-02430},
isbn = {978-3-030-66057-4},
pages = {3-26},
year = {2021},
comment = {Tools for High Performance Computing 2018 / 2019 / Mix,
Hartmut (Editor) ; Cham : Springer International Publishing,
2021, Chapter 1 ; ISBN: 978-3-030-66056-7 ;
doi:10.1007/978-3-030-66057-4},
booktitle = {Tools for High Performance Computing
2018 / 2019 / Mix, Hartmut (Editor) ;
Cham : Springer International
Publishing, 2021, Chapter 1 ; ISBN:
978-3-030-66056-7 ;
doi:10.1007/978-3-030-66057-4},
abstract = {Software reliability is one of the cornerstones of any
successful user experience. Software needs to build up the
users’ trust in its fitness for a specific purpose.
Software failures undermine this trust and add to user
frustration that will ultimately lead to a termination of
usage. Even beyond user expectations on the robustness of a
software package, today’s scientific software is more than
a temporary research prototype. It also forms the bedrock
for successful scientific research in the future. A
well-defined software engineering process that includes
automated builds and tests is a key enabler for keeping
software reliable in an agile scientific environment and
should be of vital interest for any scientific software
development team. While automated builds and deployment as
well as systematic software testing have become common
practice when developing software in industry, it is rarely
used for scientific software, including tools. Potential
reasons are that (1) in contrast to computer scientists,
domain scientists from other fields usually never get
exposed to such techniques during their training, (2)
building up the necessary infrastructures is often
considered overhead that distracts from the real science,
(3) interdisciplinary research teams are still rare, and (4)
high-performance computing systems and their programming
environments are less standardized, such that published
recipes can often not be applied without heavy modification.
In this work, we will present the various challenges we
encountered while setting up an automated building and
testing infrastructure for the Score-P, Scalasca, and Cube
projects. We will outline our current approaches,
alternatives that have been considered, and the remaining
open issues that still need to be addressed—to further
increase the software quality and thus, ultimately improve
user experience.},
month = {Sep},
date = {2018-09-17},
organization = {12th International Parallel Tools
Workshop, Stuttgart (Germany), 17 Sep
2018 - 18 Sep 2018},
cin = {JSC},
cid = {I:(DE-Juel1)JSC-20090406},
pnm = {511 - Enabling Computational- $\&$ Data-Intensive Science
and Engineering (POF4-511) / ATMLPP - ATML Parallel
Performance (ATMLPP)},
pid = {G:(DE-HGF)POF4-511 / G:(DE-Juel-1)ATMLPP},
typ = {PUB:(DE-HGF)8 / PUB:(DE-HGF)7},
doi = {10.1007/978-3-030-66057-4_1},
url = {https://juser.fz-juelich.de/record/892906},
}