% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Raz:1023670,
      author       = {Raz, Aviad and Heinrichs, Bert and Avnoon, Netta and Eyal,
                      Gil and Inbar, Yael},
      title        = {{P}rediction and explainability in {AI}: {S}triking a new
                      balance?},
      journal      = {Big data $\&$ society},
      volume       = {11},
      number       = {1},
      issn         = {2053-9517},
      address      = {München},
      publisher    = {GBI-Genios Deutsche Wirtschaftsdatenbank GmbH},
      reportid     = {FZJ-2024-01746},
      pages        = {20539517241235871},
      year         = {2024},
      abstract     = {The debate regarding prediction and explainability in
                      artificial intelligence (AI) centers around the trade-off
                      between achieving high-performance accurate models and the
                      ability to understand and interpret the decisionmaking
                      process of those models. In recent years, this debate has
                      gained significant attention due to the increasing adoption
                      of AI systems in various domains, including healthcare,
                      finance, and criminal justice. While prediction and
                      explainability are desirable goals in principle, the recent
                      spread of high accuracy yet opaque machine learning (ML)
                      algorithms has highlighted the trade-off between the two,
                      marking this debate as an inter-disciplinary,
                      inter-professional arena for negotiating expertise. There is
                      no longer an agreement about what should be the
                      “default” balance of prediction and explainability, with
                      various positions reflecting claims for professional
                      jurisdiction. Overall, there appears to be a growing schism
                      between the regulatory and ethics-based call for
                      explainability as a condition for trustworthy AI, and how it
                      is being designed, assimilated, and negotiated. The impetus
                      for writing this commentary comes from recent suggestions
                      that explainability is overrated, including the argument
                      that explainability is not guaranteed in human healthcare
                      experts either. To shed light on this debate, its premises,
                      and its recent twists, we provide an overview of key
                      arguments representing different frames, focusing on AI in
                      healthcare.},
      cin          = {INM-7},
      ddc          = {004},
      cid          = {I:(DE-Juel1)INM-7-20090406},
      pnm          = {5255 - Neuroethics and Ethics of Information (POF4-525)},
      pid          = {G:(DE-HGF)POF4-5255},
      typ          = {PUB:(DE-HGF)16},
      UT           = {WOS:001175848600001},
      doi          = {10.1177/20539517241235871},
      url          = {https://juser.fz-juelich.de/record/1023670},
}