% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Schweidtmann:877529,
      author       = {Schweidtmann, Artur M. and Bongartz, Dominik and Grothe,
                      Daniel and Kerkenhoff, Tim and Lin, Xiaopeng and Najman,
                      Jaromil and Mitsos, Alexander},
      title        = {{G}lobal {O}ptimization of {G}aussian {P}rocesses},
      reportid     = {FZJ-2020-02265},
      year         = {2020},
      abstract     = {Gaussian processes~(Kriging) are interpolating data-driven
                      models that are frequently applied in various disciplines.
                      Often, Gaussian processes are trained on datasets and are
                      subsequently embedded as surrogate models in optimization
                      problems. These optimization problems are nonconvex and
                      global optimization is desired. However, previous literature
                      observed computational burdens limiting deterministic global
                      optimization to Gaussian processes trained on few data
                      points. We propose a reduced-space formulation for
                      deterministic global optimization with trained Gaussian
                      processes embedded. For optimization, the branch-and-bound
                      solver branches only on the degrees of freedom and McCormick
                      relaxations are propagated through explicit Gaussian process
                      models. The approach also leads to significantly smaller and
                      computationally cheaper subproblems for lower and upper
                      bounding. To further accelerate convergence, we derive
                      envelopes of common covariance functions for GPs and tight
                      relaxations of acquisition functions used in Bayesian
                      optimization including expected improvement, probability of
                      improvement, and lower confidence bound. In total, we reduce
                      computational time by orders of magnitude compared to
                      state-of-the-art methods, thus overcoming previous
                      computational burdens. We demonstrate the performance and
                      scaling of the proposed method and apply it to Bayesian
                      optimization with global optimization of the acquisition
                      function and chance-constrained programming. The Gaussian
                      process models, acquisition functions, and training scripts
                      are available open-source within the 'MeLOn - Machine
                      Learning Models for Optimization'
                      toolbox~(https://git.rwth-aachen.de/avt.svt/public/MeLOn).},
      cin          = {IEK-10},
      cid          = {I:(DE-Juel1)IEK-10-20170217},
      pnm          = {899 - ohne Topic (POF3-899)},
      pid          = {G:(DE-HGF)POF3-899},
      typ          = {PUB:(DE-HGF)25},
      eprint       = {2005.10902},
      howpublished = {arXiv:2005.10902},
      archivePrefix = {arXiv},
      SLACcitation = {$\%\%CITATION$ = $arXiv:2005.10902;\%\%$},
      url          = {https://juser.fz-juelich.de/record/877529},
}