% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Bishnoi:1021120,
      author       = {Bishnoi, Abhiraj and Stein, Olaf and Meyer, Catrin I. and
                      Redler, René and Eicker, Norbert and Haak, Helmuth and
                      Hoffmann, Lars and Klocke, Daniel and Kornblueh, Luis and
                      Suarez, Estela},
      title        = {{E}arth system modeling on modular supercomputing
                      architecture: coupled atmosphere–ocean simulations with
                      {ICON} 2.6.6-rc},
      journal      = {Geoscientific model development},
      volume       = {17},
      number       = {1},
      issn         = {1991-959X},
      address      = {Katlenburg-Lindau},
      publisher    = {Copernicus},
      reportid     = {FZJ-2024-00574},
      pages        = {261 - 273},
      year         = {2024},
      abstract     = {The confrontation of complex Earth system model (ESM) codes
                      with novel supercomputing architectures poses challenges to
                      efficient modeling and job submission strategies. The
                      modular setup of these models naturally fits a modular
                      supercomputing architecture (MSA), which tightly integrates
                      heterogeneous hardware resources into a larger and more
                      flexible high-performance computing (HPC) system. While
                      parts of the ESM codes can easily take advantage of the
                      increased parallelism and communication capabilities of
                      modern GPUs, others lag behind due to the long development
                      cycles or are better suited to run on classical CPUs due to
                      their communication and memory usage patterns. To better
                      cope with these imbalances between the development of the
                      model components, we performed benchmark campaigns on the
                      Jülich Wizard for European Leadership Science (JUWELS)
                      modular HPC system. We enabled the weather and climate model
                      Icosahedral Nonhydrostatic (ICON) to run in a coupled
                      atmosphere–ocean setup, where the ocean and the model I/O
                      is running on the CPU Cluster, while the atmosphere is
                      simulated simultaneously on the GPUs of JUWELS Booster
                      (ICON-MSA). Both atmosphere and ocean are running globally
                      with a resolution of 5 km. In our test case, an optimal
                      configuration in terms of model performance (core hours per
                      simulation day) was found for the combination of 84 GPU
                      nodes on the JUWELS Booster module to simulate the
                      atmosphere and 80 CPU nodes on the JUWELS Cluster module, of
                      which 63 nodes were used for the ocean simulation and the
                      remaining 17 nodes were reserved for I/O. With this
                      configuration the waiting times of the coupler were
                      minimized. Compared to a simulation performed on CPUs only,
                      the MSA approach reduces energy consumption by 45 $\%$ with
                      comparable runtimes. ICON-MSA is able to scale up to a
                      significant portion of the JUWELS system, making best use of
                      the available computing resources. A maximum throughput of
                      170 simulation days per day (SDPD) was achieved when running
                      ICON on 335 JUWELS Booster nodes and 268 Cluster nodes.},
      cin          = {JSC},
      ddc          = {550},
      cid          = {I:(DE-Juel1)JSC-20090406},
      pnm          = {5111 - Domain-Specific Simulation $\&$ Data Life Cycle Labs
                      (SDLs) and Research Groups (POF4-511) / 5122 - Future
                      Computing $\&$ Big Data Systems (POF4-512) / AIDAS - Joint
                      Virtual Laboratory for AI, Data Analytics and Scalable
                      Simulation $(aidas_20200731)$},
      pid          = {G:(DE-HGF)POF4-5111 / G:(DE-HGF)POF4-5122 /
                      $G:(DE-Juel-1)aidas_20200731$},
      typ          = {PUB:(DE-HGF)16},
      UT           = {WOS:001166577100001},
      doi          = {10.5194/gmd-17-261-2024},
      url          = {https://juser.fz-juelich.de/record/1021120},
}