% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Heinrichs:902025,
      author       = {Heinrichs, Jan-Hendrik},
      title        = {{W}hy {D}igital {A}ssistants {N}eed {Y}our {I}nformation to
                      {S}upport {Y}our {A}utonomy},
      journal      = {Philosophy $\&$ technology},
      volume       = {34},
      issn         = {2210-5433},
      address      = {Heidelberg]},
      publisher    = {Springer},
      reportid     = {FZJ-2021-03983},
      pages        = {1687–1705},
      year         = {2021},
      abstract     = {This article investigates how human life is conceptualized
                      in the design and use of digital assistants and how this
                      conceptualization feeds back into the life really lived. It
                      suggests that a specific way of conceptualizing human life
                      — namely as a set of tasks to be optimized — is
                      responsible for the much-criticized information hunger of
                      these digital assistants. The data collection of digital
                      assistants raises not just several issues of privacy, but
                      also the potential for improving people’s degree of
                      self-determination, because the optimization model of daily
                      activity is genuinely suited to a certain mode of
                      self-determination, namely the explicit and reflective
                      setting, pursuing, and monitoring of goals. Furthermore,
                      optimization systems’ need for generation and analysis of
                      data overcomes one of the core weaknesses in human
                      capacities for self-determination, namely problems with
                      objective and quantitative self-assessment. It will be
                      argued that critiques according to which digital assistants
                      threaten to reduce their users’ autonomy tend to ignore
                      that the risks to autonomy are derivative to potential gains
                      in autonomy. These critiques are based on an overemphasis of
                      a success conception of autonomy. Counter to this
                      conception, being autonomous does not require a choice
                      environment that exclusively supports a person’s
                      “true” preferences, but the opportunity to engage with
                      external influences, supportive as well as adverse. In
                      conclusion, it will be argued that ethical evaluations of
                      digital assistants should consider potential gains as well
                      as potential risks for autonomy caused by the use of digital
                      assistants.},
      cin          = {INM-8},
      ddc          = {500},
      cid          = {I:(DE-Juel1)INM-8-20090406},
      pnm          = {5255 - Neuroethics and Ethics of Information (POF4-525)},
      pid          = {G:(DE-HGF)POF4-5255},
      typ          = {PUB:(DE-HGF)16},
      doi          = {10.1007/s13347-021-00481-4},
      url          = {https://juser.fz-juelich.de/record/902025},
}