% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Herbst:485,
      author       = {Herbst, M. and Gottschalk, S. and Reissel, M. and
                      Hardelauf, H. and Kasteel, R. and Javaux, M. and
                      Vanderborght, J. and Vereecken, H.},
      title        = {{O}n preconditioning for a parallel solution of the
                      {R}ichards equation},
      journal      = {Computers $\&$ geosciences},
      volume       = {34},
      issn         = {0098-3004},
      address      = {Amsterdam [u.a.]},
      publisher    = {Elsevier Science},
      reportid     = {PreJuSER-485},
      pages        = {1958 - 1963},
      year         = {2008},
      note         = {We greatly acknowledge the John von Neumann-Institute for
                      Computing (NIC), Julich, for providing the opportunity to
                      use their parallel cornputing resources.},
      abstract     = {In this paper, we present a class of preconditioning
                      methods for a parallel solution of the three-dimensional
                      Richards equation. The preconditioning methods Jacobi
                      scaling, block-Jacobi, incomplete lower-upper, incomplete
                      Cholesky and algebraic multigrid were applied in combination
                      with a parallel conjugate gradient solver and tested for
                      robustness and convergence using two model scenarios. The
                      first scenario was an infiltration into initially dry, sandy
                      soil discretised in 500,000 nodes. The second scenario
                      comprised spatially distributed soil properties using
                      275,706 numerical nodes and atmospheric boundary conditions.
                      Computational results showed a high efficiency of the
                      nonlinear parallel solution procedure for both scenarios
                      using up to 64 processors. Using 32 processors for the first
                      scenario reduced the wall clock time to slightly more than
                      $1\%$ of the single processor run. For scenario 2 the use of
                      64 processors reduces the wall clock time to slightly more
                      than $20\%$ of the 8 processors wall clock time. The
                      difference in the efficiency of the various preconditioning
                      methods is moderate but not negligible. The use of the
                      multigrid preconditioning algorithm is recommended, since on
                      average it performed best for both scenarios. (c) 2008
                      Elsevier Ltd. All rights reserved.},
      keywords     = {J (WoSType)},
      cin          = {ICG-4 / JARA-SIM},
      ddc          = {550},
      cid          = {I:(DE-Juel1)VDB793 / I:(DE-Juel1)VDB1045},
      pnm          = {Terrestrische Umwelt},
      pid          = {G:(DE-Juel1)FUEK407},
      shelfmark    = {Computer Science, Interdisciplinary Applications /
                      Geosciences, Multidisciplinary},
      typ          = {PUB:(DE-HGF)16},
      UT           = {WOS:000261632000030},
      doi          = {10.1016/j.cageo.2008.02.020},
      url          = {https://juser.fz-juelich.de/record/485},
}