% IMPORTANT: The following is UTF-8 encoded. This means that in the presence % of non-ASCII characters, it will not work with BibTeX 0.99 or older. % Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or % “biber”. @ARTICLE{Zafar:1031282, author = {Zafar, Mandy}, title = {{N}ormativity and {AI} moral agency}, journal = {AI and ethics}, volume = {5}, issn = {2730-5953}, address = {[Cham]}, publisher = {Springer}, reportid = {FZJ-2024-05644}, pages = {2605–2622}, year = {2025}, abstract = {The meanings of the concepts of moral agency in application to AI technologies differ vastly from the ones we use for humanagents. Minimal definitions of AI moral agency are often connected with other normative agency-related concepts, such asrationality or intelligence, autonomy, or responsibility. This paper discusses the problematic application of minimal conceptsof moral agency to AI. I explore why any comprehensive account of AI moral agency has to consider the interconnections toother normative agency-related concepts and beware of four basic detrimental mistakes in the current debate. The results ofthe analysis are: (1) speaking about AI agency may lead to serious demarcation problems and confusing assumptions aboutthe abilities and prospects of AI technologies; (2) the talk of AI moral agency is based on confusing assumptions and turnsout to be senseless in the current prevalent versions. As one possible solution, I propose to replace the concept of AI agencywith the concept of AI automated performance (AIAP).Keywords AI agency · AI moral agency · Artificial moral agents · Philosophy of artificial intelligence}, cin = {INM-7}, ddc = {300}, cid = {I:(DE-Juel1)INM-7-20090406}, pnm = {5254 - Neuroscientific Data Analytics and AI (POF4-525) / 5255 - Neuroethics and Ethics of Information (POF4-525)}, pid = {G:(DE-HGF)POF4-5254 / G:(DE-HGF)POF4-5255}, typ = {PUB:(DE-HGF)16}, doi = {10.1007/s43681-024-00566-8}, url = {https://juser.fz-juelich.de/record/1031282}, }