% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{Rathkopf:1049026,
      author       = {Rathkopf, Charles},
      title        = {{S}hallow {B}elief in {LLM}s},
      reportid     = {FZJ-2025-05121},
      year         = {2025},
      abstract     = {Do large language models have beliefs? Interpretationist
                      theories hold that belief attribution depends on predictive
                      utility rather than on internal representational format.
                      Because LLMs display impressive linguistic fluency, a
                      straightforward interpretationist view seems to imply that
                      they are doxastic equivalents of humans. This paper argues
                      that this implication is mistaken.I separate two questions.
                      First, do propositional-attitude (PA) models predict LLM
                      behavior better than non-PA alternatives? Second, do PA
                      models yield similar predictive utility for LLMs and for
                      humans? LLMs meet the first condition: PA models outperform
                      n-gram baselines. However, PA models achieve much lower
                      predictive utility for LLMs than for humans. This deficit
                      arises from architectural constraints that prevent LLMs from
                      reconciling contradictions across context boundaries.This
                      limitation produces a form of indeterminacy that is largely
                      absent in human belief. Although humans also face
                      indeterminacy, they possess mechanisms such as embodied
                      action, long-term memory, and continual learning that
                      mitigate it over time. LLMs lack these mechanisms. Parallel
                      considerations apply to desire ascription, which undermines
                      attempts to locate an asymmetry between belief and desire in
                      LLMs.The paper develops a predictive-profile framework that
                      captures this reduced utility as a form of shallow belief.
                      The framework preserves the quasi-rational character of LLMs
                      while avoiding both eliminativism and overattribution.},
      month         = {Dec},
      date          = {2025-12-04},
      organization  = {Berlin Philosophy of AI Group, Berlin
                       (Germany), 4 Dec 2025},
      subtyp        = {Other},
      cin          = {INM-7},
      cid          = {I:(DE-Juel1)INM-7-20090406},
      pnm          = {5255 - Neuroethics and Ethics of Information (POF4-525)},
      pid          = {G:(DE-HGF)POF4-5255},
      typ          = {PUB:(DE-HGF)31},
      url          = {https://juser.fz-juelich.de/record/1049026},
}