% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Grewe:21140,
      author       = {Grewe, V. and Moussiopoulos, N. and Builtjes, P. and
                      Borrego, C. and Isaksen, I.S.A. and Volz-Thomas, A.},
      title        = {{T}he {ACCENT}-protocol: a framework for benchmarking and
                      model evaluation},
      journal      = {Geoscientific model development},
      volume       = {5},
      issn         = {1991-959X},
      address      = {Katlenburg-Lindau},
      publisher    = {Copernicus},
      reportid     = {PreJuSER-21140},
      pages        = {611 - 618},
      year         = {2012},
      note         = {This work was supported by the Network of Excellence ACCENT
                      and the DLR-project ESMVal.},
      abstract     = {We summarise results from a workshop on 'Model Benchmarking
                      and Quality Assurance' of the EU-Network of Excellence
                      ACCENT, including results from other activities (e.g. COST
                      Action 732) and publications. A formalised evaluation
                      protocol is presented, i.e. a generic formalism describing
                      the procedure of how to perform a model evaluation. This
                      includes eight steps and examples from global model
                      applications which are given for illustration. The first and
                      important step is concerning the purpose of the model
                      application, i.e. the addressed underlying scientific or
                      political question. We give examples to demonstrate that
                      there is no model evaluation per se, i.e. without a focused
                      purpose. Model evaluation is testing, whether a model is fit
                      for its purpose. The following steps are deduced from the
                      purpose and include model requirements, input data, key
                      processes and quantities, benchmark data, quality
                      indicators, sensitivities, as well as benchmarking and
                      grading. We define 'benchmarking' as the process of
                      comparing the model output against either observational data
                      or high fidelity model data, i.e. benchmark data. Special
                      focus is given to the uncertainties, e.g. in observational
                      data, which have the potential to lead to wrong conclusions
                      in the model evaluation if not considered carefully.},
      keywords     = {J (WoSType)},
      cin          = {IEK-8},
      ddc          = {910},
      cid          = {I:(DE-Juel1)IEK-8-20101013},
      pnm          = {Atmosphäre und Klima},
      pid          = {G:(DE-Juel1)FUEK491},
      shelfmark    = {Geosciences, Multidisciplinary},
      typ          = {PUB:(DE-HGF)16},
      UT           = {WOS:000304061200004},
      doi          = {10.5194/gmd-5-611-2012},
      url          = {https://juser.fz-juelich.de/record/21140},
}