% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@MISC{Comito:1019994,
      author       = {Comito, Claudia and Coquelin, Daniel and Tarnawa, Michael
                      and Götz, Markus and Blind, Lena and Debus, Charlie and
                      Hagemeier, Björn and Krajsek, Kai and Ohm, Jakob and Hoppe,
                      Fabian and von der Lehr, Fabrice and Neo, Sun Han and
                      Zutshi, Arnav and Bourgart, Benjamin and Siggel, Martin and
                      Ashwath, V. A. and Schmitz, Simon and Gutiérrez Hermosillo
                      Muriedas, Juan Pedro and Spataro, Luca and Markgraf,
                      Sebastian and Shah, Pratham and Suraj, Sai and Schlimbach,
                      Frank and Glock, Philipp and Kunjadiya, Dhruv and
                      Ishaan-Chandak},
      title        = {helmholtz-analytics/heat: {S}calable {SVD}, {GS}o{C}`22
                      contributions, {D}ocker image, {P}y{T}orch 2 support, {AMD}
                      {GPU}s acceleration (v1.3.0); 1.3.0},
      reportid     = {FZJ-2023-05809},
      year         = {2023},
      abstract     = {This release includes many important updates (see below).
                      We particularly would like to thank our enthusiastic
                      GSoC2022 / tentative GSoC2023 contributors @Mystic-Slice
                      @neosunhan @Sai-Suraj-27 @shahpratham @AsRaNi1
                      @Ishaan-Chandak 🙏🏼 Thank you so much! Highlights #1155
                      Support PyTorch 2.0.1 (by @ClaudiaComito) #1152 Support AMD
                      GPUs (by @mtar) #1126 Distributed hierarchical SVD (by
                      @mrfh92) #1028 Introducing the sparse module: Distributed
                      Compressed Sparse Row Matrix (by @Mystic-Slice) Performance
                      improvements: #1125 distributed heat.reshape() speed-up (by
                      @ClaudiaComito) #1141 heat.pow() speed-up when exponent is
                      int (by @ClaudiaComito @coquelin77 ) #1119 heat.array()
                      default to copy=None (e.g., only if necessary) (by
                      @ClaudiaComito @neosunhan ) #970 Dockerfile and accompanying
                      documentation (by @bhagemeier) Changelog Array-API
                      compliance / Interoperability #1154 Introduce
                      $DNDarray.__array__()$ method for interoperability with
                      numpy , xarray (by @ClaudiaComito) #1147 Adopt NEP29 , drop
                      support for PyTorch 1.7, Python 3.6 (by @mtar) #1119
                      ht.array() default to copy=None (e.g., only if necessary)
                      (by @ClaudiaComito) #1020 Implement $broadcast_arrays$ ,
                      $broadcast_to$ (by @neosunhan) #1008 API: Rename keepdim
                      kwarg to keepdims (by @neosunhan) #788 Interface for DPPY
                      interoperability (by @coquelin77 @fschlimb ) New Features
                      #1126 Distributed hierarchical SVD (by @mrfh92) #1020
                      Implement $broadcast_arrays$ , $broadcast_to$ (by
                      @neosunhan) #983 Signal processing: fully distributed 1D
                      convolution (by @shahpratham) #1063 add eq to Device (by
                      @mtar) Bug Fixes #1141 heat.pow() speed-up when exponent is
                      int (by @ClaudiaComito) #1136 Fixed PyTorch version check in
                      sparse module (by @Mystic-Slice) #1098 Validates number of
                      dimensions in input to $ht.sparse.sparse_csr_matrix$ (by
                      @Ishaan-Chandak) #1095 Convolve with distributed kernel on
                      multiple GPUs (by @shahpratham) #1094 Fix division precision
                      error in random module (by @Mystic-Slice) #1075 Fixed
                      initialization of DNDarrays communicator in some routines
                      (by @AsRaNi1) #1066 Verify input object type and layout +
                      Supporting tests (by @Mystic-Slice) #1037 Distributed
                      weighted average() along tuple of axes: shape of weights to
                      match shape of input (by @Mystic-Slice) Benchmarking #1137
                      Continous Benchmarking of runtime (by @JuanPedroGHM)
                      Documentation #1150 Refactoring for efficiency and
                      readability (by @Sai-Suraj-27) #1130 Reintroduce Quick Start
                      (by @ClaudiaComito) #1079 A better README file (by
                      @Sai-Suraj-27) Linear Algebra #1126, #1160 Distributed
                      hierarchical SVD (by @mrfh92 @ClaudiaComito ) Contributors
                      @AsRaNi1, @ClaudiaComito, @Ishaan-Chandak, @JuanPedroGHM,
                      @Mystic-Slice, @Sai-Suraj-27, @bhagemeier, @coquelin77,
                      @mrfh92, @mtar, @neosunhan, @shahpratham},
      cin          = {JSC},
      cid          = {I:(DE-Juel1)JSC-20090406},
      pnm          = {5111 - Domain-Specific Simulation $\&$ Data Life Cycle Labs
                      (SDLs) and Research Groups (POF4-511)},
      pid          = {G:(DE-HGF)POF4-5111},
      typ          = {PUB:(DE-HGF)33},
      doi          = {10.5281/ZENODO.8060498},
      url          = {https://juser.fz-juelich.de/record/1019994},
}