% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Heinrichs:1007861,
      author       = {Heinrichs, Jan-Hendrik},
      title        = {{H}ammer or {M}easuring {T}ape? {A}rtificial {I}ntelligence
                      and {J}ustice in {H}ealthcare},
      journal      = {Cambridge quarterly of healthcare ethics},
      volume       = {16},
      issn         = {0963-1801},
      address      = {Getzville, NY},
      publisher    = {HeinOnline},
      reportid     = {FZJ-2023-02223},
      pages        = {1 - 12},
      year         = {2023},
      abstract     = {Artificial intelligence (AI) is a powerful tool for several
                      healthcare tasks. AI tools are suited to optimize predictive
                      models in medicine. Ethical debates about AI’s extension
                      of the predictive power of medical models suggest a need to
                      adapt core principles of medical ethics. This article
                      demonstrates that a popular interpretation of the principle
                      of justice in healthcare needs amendment given the effect of
                      AI on decision-making. The procedural approach to justice,
                      exemplified with Norman Daniels and James Sabin’s
                      accountability for reasonableness conception, needs
                      amendment because, as research into algorithmic fairness
                      shows, it is insufficiently sensitive to differential
                      effects of seemingly just principles on different groups of
                      people. The same line of research generates methods to
                      quantify differential effects and make them amenable for
                      correction. Thus, what is needed to improve the principle of
                      justice is a combination of procedures for selecting just
                      criteria and principles and the use of algorithmic tools to
                      measure the real impact these criteria and principles have.
                      In this article, the author shows that algorithmic tools do
                      not merely raise issues of justice but can also be used in
                      their mitigation by informing us about the real effects
                      certain distributional principles and criteria would
                      create.},
      cin          = {INM-7},
      ddc          = {610},
      cid          = {I:(DE-Juel1)INM-7-20090406},
      pnm          = {5255 - Neuroethics and Ethics of Information (POF4-525)},
      pid          = {G:(DE-HGF)POF4-5255},
      typ          = {PUB:(DE-HGF)16},
      pubmed       = {37190871},
      UT           = {WOS:001010547100001},
      doi          = {10.1017/S0963180123000257},
      url          = {https://juser.fz-juelich.de/record/1007861},
}