% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@MISC{Breuer:1018710,
author = {Breuer, Thomas and Wellmann, Julia and Souza Mendes
Guimarães, Filipe and Himmels, Carina and Luehrs,
Sebastian},
title = {{JUBE} (v2.6.1); 2.6.1},
reportid = {FZJ-2023-04998},
year = {2023},
abstract = {Benchmarking a computer system usually involves numerous
tasks, involving several runs of different applications.
Configuring, compiling, and running a benchmark suite on
several platforms with the accompanied tasks of result
verification and analysis needs a lot of administrative work
and produces a lot of data, which has to be analysed and
collected in a central database. Without a benchmarking
environment all these steps have to be performed by hand.
For each benchmark application the benchmark data is written
out in a certain format that enables the benchmarker to
deduct the desired information. This data can be parsed by
automatic pre- and post-processing scripts that draw
information, and store it more densely for manual
interpretation. The JUBE workflow and benchmarking
environment provides a script based framework to easily
create benchmark sets, run those sets on different computer
systems and evaluate the results. It is actively developed
by the Jülich Supercomputing Centre of Forschungszentrum
Jülich, Germany.},
cin = {JSC},
cid = {I:(DE-Juel1)JSC-20090406},
pnm = {5112 - Cross-Domain Algorithms, Tools, Methods Labs (ATMLs)
and Research Groups (POF4-511) / BMBF 01 1H1 6013, NRW 325
– 8.03 – 133340 - SiVeGCS (DB001492) / ATMLAO - ATML
Application Optimization and User Service Tools (ATMLAO)},
pid = {G:(DE-HGF)POF4-5112 / G:(DE-Juel-1)DB001492 /
G:(DE-Juel-1)ATMLAO},
typ = {PUB:(DE-HGF)33},
doi = {10.5281/ZENODO.10228432},
url = {https://juser.fz-juelich.de/record/1018710},
}