% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{Gtz:902212,
      author       = {Götz, Markus and Debus, Charlotte and Coquelin, Daniel and
                      Krajsek, Kai and Comito, Claudia and Knechtges, Philipp and
                      Hagemeier, Bjorn and Tarnawa, Michael and Hanselmann, Simon
                      and Siggel, Martin and Basermann, Achim and Streit, Achim},
      title        = {{H}e{AT} – a {D}istributed and {GPU}-accelerated {T}ensor
                      {F}ramework for {D}ata {A}nalytics},
      publisher    = {IEEE},
      reportid     = {FZJ-2021-04100},
      isbn         = {978-1-7281-6251-5},
      pages        = {276-287},
      year         = {2020},
      abstract     = {To cope with the rapid growth in available data, the
                      efficiency of data analysis and machine learning libraries
                      has recently received increased attention. Although great
                      advancements have been made in traditional array-based
                      computations, most are limited by the resources available on
                      a single computation node. Consequently, novel approaches
                      must be made to exploit distributed resources, e.g.
                      distributed memory architectures. To this end, we introduce
                      HeAT, an array-based numerical programming framework for
                      large-scale parallel processing with an easy-to-use
                      NumPy-like API. HeAT utilizes PyTorch as a node-local eager
                      execution engine and distributes the workload on arbitrarily
                      large high-performance computing systems via MPI. It
                      provides both low-level array computations, as well as
                      assorted higher-level algorithms. With HeAT, it is possible
                      for a NumPy user to take full advantage of their available
                      resources, significantly lowering the barrier to distributed
                      data analysis. When compared to similar frameworks, HeAT
                      achieves speedups of up to two orders of magnitude.},
      month         = {Dec},
      date          = {2020-12-10},
      organization  = {2020 IEEE International Conference on
                       Big Data (Big Data), Atlanta (GA), 10
                       Dec 2020 - 13 Dec 2020},
      cin          = {JSC},
      cid          = {I:(DE-Juel1)JSC-20090406},
      pnm          = {5112 - Cross-Domain Algorithms, Tools, Methods Labs (ATMLs)
                      and Research Groups (POF4-511) / HAF - Helmholtz Analytics
                      Framework (ZT-I-0003) / SLNS - SimLab Neuroscience
                      (Helmholtz-SLNS)},
      pid          = {G:(DE-HGF)POF4-5112 / G:(DE-HGF)ZT-I-0003 /
                      G:(DE-Juel1)Helmholtz-SLNS},
      typ          = {PUB:(DE-HGF)8 / PUB:(DE-HGF)7},
      UT           = {WOS:000662554700042},
      doi          = {10.1109/BigData50022.2020.9378050},
      url          = {https://juser.fz-juelich.de/record/902212},
}