% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{GarciadeGonzalo:1026685,
      author       = {Garcia de Gonzalo, Simon and Herten, Andreas and Hrywniak,
                      Markus and Kraus, Jiri and Oden, Lena},
      title        = {{E}fficient {D}istributed {GPU} {P}rogramming for
                      {E}xascale},
      reportid     = {FZJ-2024-03500},
      year         = {2023},
      abstract     = {Over the past years, GPUs became ubiquitous in HPC
                      installations around the world, delivering the majority of
                      performance of some of the largest supercomputers (e.g.
                      Summit, Sierra, JUWELS Booster). This trend continues in the
                      recently deployed and upcoming Pre-Exascale and Exascale
                      systems (LUMI, Leonardo; Frontier, Perlmutter): GPUs are
                      chosen as the core computing devices to enter this next era
                      of HPC. To take advantage of future GPU-accelerated systems
                      with tens of thousands of devices, application developers
                      need to have the propers skills and tools to understand,
                      manage, and optimize distributed GPU applications. In this
                      tutorial, participants will learn techniques to efficiently
                      program large-scale multi-GPU systems. While programming
                      multiple GPUs with MPI is explained in detail, also advanced
                      tuning techniques and complementing programming models like
                      NCCL and NVSHMEM are presented. Tools for analysis are shown
                      and used to motivate and implement performance
                      optimizations. The tutorial teaches fundamental concepts
                      that apply to GPU-accelerated systems in general, taking the
                      NVIDIA platform as an example. It is a combination of
                      lectures and hands-on exercises, using one of Europe's
                      fastest supercomputers, JUWELS Booster, for interactive
                      learning and discovery.},
      month         = {Nov},
      date          = {2023-11-12},
      organization  = {The International Conference for High
                       Performance Computing, Networking,
                       Storage, and Analysis 2023, Denver, CO
                       (USA), 12 Nov 2023 - 17 Nov 2023},
      subtyp        = {After Call},
      cin          = {JSC},
      cid          = {I:(DE-Juel1)JSC-20090406},
      pnm          = {5121 - Supercomputing $\&$ Big Data Facilities (POF4-512) /
                      5122 - Future Computing $\&$ Big Data Systems (POF4-512) /
                      5112 - Cross-Domain Algorithms, Tools, Methods Labs (ATMLs)
                      and Research Groups (POF4-511) / ATML-X-DEV - ATML
                      Accelerating Devices (ATML-X-DEV)},
      pid          = {G:(DE-HGF)POF4-5121 / G:(DE-HGF)POF4-5122 /
                      G:(DE-HGF)POF4-5112 / G:(DE-Juel-1)ATML-X-DEV},
      typ          = {PUB:(DE-HGF)6},
      doi          = {10.5281/ZENODO.10214076},
      url          = {https://juser.fz-juelich.de/record/1026685},
}