% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@INPROCEEDINGS{Alvarez:823864,
author = {Alvarez, Damian and O'Cais, Alan and Geimer, Markus and
Hoste, Kenneth},
title = {{S}cientific {S}oftware {M}anagement in {R}eal {L}ife:
{D}eployment of {E}asy{B}uild on a {L}arge {S}cale {S}ystem},
address = {Piscataway, NJ, USA},
publisher = {IEEE Press},
reportid = {FZJ-2016-06504},
pages = {31 - 40},
year = {2016},
comment = {Proceedings of the Third International Workshop on HPC User
Support Tools},
booktitle = {Proceedings of the Third International
Workshop on HPC User Support Tools},
abstract = {Managing scientific software stacks has traditionally been
a manual task that required a sizeable team with knowledge
about the specifics of building each application. Keeping
the software stack up to date also caused a significant
overhead for system administrators as well as support teams.
Furthermore, a flat module view and the manual creation of
modules by different members of the teams can end up
providing a confusing view of the installed software to end
users. In addition, on many HPC clusters the OS images have
to include auxiliary packages to support components of the
scientific software stack, potentially bloating the images
of the cluster nodes and restricting the installation of new
software to a designated maintenance window.To alleviate
this situation, tools like EasyBuild help to manage a large
number of scientific software packages in a structured way,
decoupling the scientific stack from the OS-provided
software and lowering the overall overhead of managing a
complex HPC software infrastructure. However, the relative
novelty of these tools and the variety of requirements from
both users and HPC sites means that such frameworks still
have to evolve and adapt to different environments. In this
paper, we report on how we deployed EasyBuild in a cluster
with 45K+ cores (JURECA). In particular, we discuss which
features were missing in order to meet our requirements, how
we implemented them, how the installation, upgrade, and
retirement of software is managed, and how this approach is
reused for other internal systems. Finally, we outline some
enhancements we would like to see implemented in our setup
and in EasyBuild in the future.},
month = {Nov},
date = {2016-11-13},
organization = {Third International Workshop on HPC
User Support Tools, Salt Lake City
(USA), 13 Nov 2016 - 13 Nov 2016},
cin = {JSC},
cid = {I:(DE-Juel1)JSC-20090406},
pnm = {513 - Supercomputer Facility (POF3-513)},
pid = {G:(DE-HGF)POF3-513},
typ = {PUB:(DE-HGF)8 / PUB:(DE-HGF)7},
doi = {10.1109/HUST.2016.009},
url = {https://juser.fz-juelich.de/record/823864},
}