% IMPORTANT: The following is UTF-8 encoded. This means that in the presence % of non-ASCII characters, it will not work with BibTeX 0.99 or older. % Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or % “biber”. @INPROCEEDINGS{Rathkopf:1005477, author = {Rathkopf, Charles}, title = {{D}o {L}arge {L}anguage {M}odels {U}nderstand {M}eaning?}, reportid = {FZJ-2023-01493}, year = {2023}, abstract = {It is curiously difficult to articulate the capacities of large language modelswithout getting yourself into philosophically controversial terrain. In this talk Iexplain why. The talk has three parts. In the first, I give a sketch of how largelanguage models are built, with particular attention to the way words arerepresented as vector quantities. In the second, I describe the various ways inwhich the capacities of language models have been tested empirically. In thethird, I provide the main philosophical argument. I argue that, in order tounderstand what large language models are, we must reject the seeminglyinnocent metaphysical principle that everything in the world either has a mindor it does not.}, month = {Jan}, date = {2023-01-13}, organization = {Kimball Union Academy, online event (USA), 13 Jan 2023}, subtyp = {Other}, cin = {INM-7}, cid = {I:(DE-Juel1)INM-7-20090406}, pnm = {5255 - Neuroethics and Ethics of Information (POF4-525)}, pid = {G:(DE-HGF)POF4-5255}, typ = {PUB:(DE-HGF)31}, url = {https://juser.fz-juelich.de/record/1005477}, }