% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{Herten:1018971,
      author       = {Herten, Andreas},
      title        = {{M}any {C}ores, {M}any {M}odels: {GPU} {P}rogramming
                      {M}odel vs. {V}endor {C}ompatibility {O}verview},
      publisher    = {ACM New York, NY, USA},
      reportid     = {FZJ-2023-05040},
      pages        = {1019–1026},
      year         = {2023},
      note         = {arXiv: https://arxiv.org/abs/2309.05445 HTML-version of
                      table: https://x-dev.pages.jsc.fz-juelich.de/models/ Data
                      repository: https://github.com/AndiH/gpu-lang-compat},
      comment      = {Proceedings of the SC '23 Workshops of The International
                      Conference on High Performance Computing, Network, Storage,
                      and Analysis},
      booktitle     = {Proceedings of the SC '23 Workshops of
                       The International Conference on High
                       Performance Computing, Network,
                       Storage, and Analysis},
      abstract     = {In recent history, GPUs became a key driver of compute
                      performance in HPC. With the installation of the Frontier
                      supercomputer, they became the enablers of the Exascale era;
                      further largest-scale installations are in progress (Aurora,
                      El Capitan, JUPITER). But the early-day dominance by NVIDIA
                      and their CUDA programming model has changed: The current
                      HPC GPU landscape features three vendors (AMD, Intel,
                      NVIDIA), each with native and derived programming models.
                      The choices are ample, but not all models are supported on
                      all platforms, especially if support for Fortran is needed;
                      in addition, some restrictions might apply. It is hard for
                      scientific programmers to navigate this abundance of choices
                      and limits. This paper gives a guide by matching the GPU
                      platforms with supported programming models, presented in a
                      concise table and further elaborated in detailed comments.
                      An assessment is made regarding the level of support of a
                      model on a platform.},
      month         = {Nov},
      date          = {2023-11-12},
      organization  = {SC-W 2023: Workshops of The
                       International Conference on High
                       Performance Computing, Network,
                       Storage, and Analysis, Denver, CO
                       (USA), 12 Nov 2023 - 17 Nov 2023},
      cin          = {JSC},
      cid          = {I:(DE-Juel1)JSC-20090406},
      pnm          = {5122 - Future Computing $\&$ Big Data Systems (POF4-512) /
                      5112 - Cross-Domain Algorithms, Tools, Methods Labs (ATMLs)
                      and Research Groups (POF4-511) / ATML-X-DEV - ATML
                      Accelerating Devices (ATML-X-DEV)},
      pid          = {G:(DE-HGF)POF4-5122 / G:(DE-HGF)POF4-5112 /
                      G:(DE-Juel-1)ATML-X-DEV},
      typ          = {PUB:(DE-HGF)8 / PUB:(DE-HGF)7},
      doi          = {10.1145/3624062.3624178},
      url          = {https://juser.fz-juelich.de/record/1018971},
}