% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{Huber:874426,
      author       = {Huber, Markus and Kohl, Nils and Leleux, Philippe and
                      Rüde, Ulrich and Thönnes, Dominik and Wohlmuth, Barbara},
      title        = {{M}assively {P}arallel {M}ultigrid with {D}irect {C}oarse
                      {G}rid {S}olvers},
      volume       = {50},
      address      = {Jülich},
      publisher    = {Forschungszentrum Jülich GmbH Zentralbibliothek, Verlag},
      reportid     = {FZJ-2020-01436},
      series       = {Publication Series of the John von Neumann Institute for
                      Computing (NIC) NIC Series},
      pages        = {335 - 344},
      year         = {2020},
      comment      = {NIC Symposium 2020},
      booktitle     = {NIC Symposium 2020},
      abstract     = {Multigrid methods play an important role in the numerical
                      approximation of partial differential equations. As long as
                      only a moderate number of processors is used, many
                      alternatives can be used as solver for the coarsest grid.
                      However, when the number of processors increases, then
                      standard coarsening will stop while the problem is still
                      large and the communication overhead for solving the
                      corresponding coarsest grid problem may dominate. In this
                      case, the coarsest grid must be agglomerated to only a
                      subset of the processors. This article studies the use of
                      sparse direct methods for solving the coarsest grid problem
                      as it arises in a multigrid hierarchy. We use as test case a
                      Stokes-type model and solve algebraic saddle point systems
                      with up to O(10$^{11}$) degrees of freedom on a current
                      peta-scale supercomputer. We compare the sparse direct
                      solver with a preconditioned minimal residual iteration and
                      show that the sparse direct method can exhibit better
                      parallel efficiency.},
      month         = {Feb},
      date          = {2020-02-27},
      organization  = {NIC Symposium 2020, Jülich (Germany),
                       27 Feb 2020 - 28 Feb 2020},
      cin          = {NIC},
      cid          = {I:(DE-Juel1)NIC-20090406},
      pnm          = {899 - ohne Topic (POF3-899)},
      pid          = {G:(DE-HGF)POF3-899},
      typ          = {PUB:(DE-HGF)8 / PUB:(DE-HGF)7},
      url          = {https://juser.fz-juelich.de/record/874426},
}