% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Rathkopf:1005478,
      author       = {Rathkopf, Charles and Heinrichs, Bert},
      title        = {{L}earning to {L}ive with {S}trange {E}rror: {B}eyond
                      {T}rustworthiness in {A}rtificial {I}ntelligence {E}thics},
      journal      = {Cambridge quarterly of healthcare ethics},
      volume       = {-},
      issn         = {0963-1801},
      publisher    = {Cambridge University Press},
      reportid     = {FZJ-2023-01494},
      pages        = {1 - 13},
      year         = {2023},
      abstract     = {Position papers on artificial intelligence (AI) ethics are
                      often framed as attempts to work out technical and
                      regulatory strategies for attaining what is commonly called
                      trustworthy AI. In such papers, the technical and regulatory
                      strategies are frequently analyzed in detail, but the
                      concept of trustworthy AI is not. As a result, it remains
                      unclear. This paper lays out a variety of possible
                      interpretations of the concept and concludes that none of
                      them is appropriate. The central problem is that, by framing
                      the ethics of AI in terms of trustworthiness, we reinforce
                      unjustified anthropocentric assumptions that stand in the
                      way of clear analysis. Furthermore, even if we insist on a
                      purely epistemic interpretation of the concept, according to
                      which trustworthiness just means measurable reliability, it
                      turns out that the analysis will, nevertheless, suffer from
                      a subtle form of anthropocentrism. The paper goes on to
                      develop the concept of strange error, which serves both to
                      sharpen the initial diagnosis of the inadequacy of
                      trustworthy AI and to articulate the novel epistemological
                      situation created by the use of AI. The paper concludes with
                      a discussion of how strange error puts pressure on standard
                      practices of assessing moral culpability, particularly in
                      the context of medicine.},
      cin          = {INM-7},
      ddc          = {610},
      cid          = {I:(DE-Juel1)INM-7-20090406},
      pnm          = {5255 - Neuroethics and Ethics of Information (POF4-525)},
      pid          = {G:(DE-HGF)POF4-5255},
      typ          = {PUB:(DE-HGF)16},
      pubmed       = {36621773},
      UT           = {WOS:000911204600001},
      doi          = {10.1017/S0963180122000688},
      url          = {https://juser.fz-juelich.de/record/1005478},
}