% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@MISC{Villamar:1052977,
author = {Villamar, Jose and Kelbling, Matthias and Thober, Stephan},
othercontributors = {More, Heather and Denker, Michael and Tetzlaff, Tom and
Senk, Johanna},
title = {{M}etadata {A}rchivist - {P}roof of {C}oncept},
reportid = {FZJ-2026-01327},
year = {2025},
abstract = {This archive contains an example implementation of a
knowledge generation workflow. Here the questions to answer
relate to benchmarking and validation of a given simulator
and model. In this workflow, 10 simulation runs of a model
are performed while collecting metadata. Then the generated
data is post processed to ensure an aggregation of
simulation statistics over the 10 different runs. These
statistics alongside system information are used to
structure the metadata. Both raw and post-processed, data
and metadata are stored in a MongoDB instance. This instance
can then be queried to plot the benchmarks and validation
results.},
keywords = {Snakemake workflow (Other) / Metadata processing (Other) /
Simulation workflow (Other)},
cin = {IAS-6},
cid = {I:(DE-Juel1)IAS-6-20130828},
pnm = {5231 - Neuroscientific Foundations (POF4-523) / 5232 -
Computational Principles (POF4-523)},
pid = {G:(DE-HGF)POF4-5231 / G:(DE-HGF)POF4-5232},
typ = {PUB:(DE-HGF)33},
doi = {10.5281/ZENODO.15110701},
url = {https://juser.fz-juelich.de/record/1052977},
}