% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@TECHREPORT{Aach:1052307,
      author       = {Aach, Marcel and Adolph, Laurenz and Baumann, Stefan and
                      Benassou, Sabrina and Bernhard, David and Bernhard,
                      Sebastian and Bouzidi, Mohamed-Khalil and Braun, Markus and
                      Brox, Thomas and Burdorf, Sven and Dauner, Daniel and
                      Graaff, Thies de and Doğan, Samed and Derajic, Bojan and
                      Flohr, Fabian and Galesso, Silvio and Ganzer, Malte and
                      Gottschalk, Hanno and Heid, Dominik and Heue, Falk and Hu,
                      Tao and Hubschneider, Christian and Kästingschäfer, Marius
                      and Keser, Mert and Khongsab, Peerayut and Kowol, Kamil and
                      Krause, Felix and Kromm, Edward and Lucente, Giovanni and
                      Lukin, Artem and Mariani, Annajoyce and Mittal, Sudhanshu
                      and Mousakhan, Arian and Mualla, Firas and Molin, Adam and
                      Neuhöfer, Jonas and Niemeijer, Joshua and Bernal, Christian
                      Ojeda and Ommer, Björn and Ourania Tze, Christina and
                      Peyinghaus, Sven and Piecha, Pascal and Prestel, Ulrich and
                      Ramazzina, Andrea and Reichardt, Jörg and Rist, Christoph
                      and Ritter, Werner and Rochau, Dennis and Savarino, Fabrizio
                      and Schenkel, Philipp and Schlauch, Christian and Schmidt,
                      Julian and Taş, Ömer Şahin and Thiel, Laurenz and
                      Vivekanandan, Abhishek and Walz, Stefanie and Wang, Jiangtao
                      and Wang, Zhaoze and Wiederer, Julian and Winter, Katharina
                      and Wunderlich, Carolin and Yadav, Harsh and Yao, Yue and
                      Yang, Yutong},
      title        = {{A}utomotive {F}oundation {M}odels: {F}undamentals,
                      {T}raining {C}oncepts and {A}pplications},
      number       = {Whitepaper 1},
      reportid     = {FZJ-2026-00918, Whitepaper 1},
      pages        = {177 p.},
      year         = {2025},
      abstract     = {Generative AI has gained wide attention through large-scale
                      models such as DALL-E 3, StableDiffusion, and GPT-4,
                      creating new opportunities for innovation while raising
                      important ethicalquestions. At the core of these
                      advancements are foundation models: massive neural
                      networkspretrained on extensive datasets and designed to
                      adapt across diverse tasks and data modalities.Their ability
                      to unify camera, LiDAR, radar, and other sensor data has
                      significant potential forenhancing perception, prediction,
                      and planning in autonomous driving. <br>This report
                      discusses the fundamental concepts underlying foundation
                      models, including theirarchitectural structures, training
                      regimens, and interpretability considerations. It examines
                      cur-rent literature for foundation models and their
                      application and use in the domain of autonomousdriving.
                      Techniques for generative sensor data synthesis are
                      outlined, demonstrating how arti-ficial datasets can
                      replicate complex driving environments and reduce the
                      expense of collectingreal-world samples. Methods for
                      extending these generative approaches to sequential or
                      videodata are also highlighted, enabling realistic motion
                      forecasting and scenario simulation. Fi-nally, the report
                      explores how abstract representations, such as semantic maps
                      or symbolic data,can increase explainability and
                      computational efficiency when applied to autonomous
                      driving.This whitepaper aims to provide both foundational
                      understanding and practical guidance ofthe current
                      state-of-the-art for leveraging generative AI and foundation
                      models in the field ofautonomous driving.},
      cin          = {JSC},
      cid          = {I:(DE-Juel1)JSC-20090406},
      pnm          = {5111 - Domain-Specific Simulation $\&$ Data Life Cycle Labs
                      (SDLs) and Research Groups (POF4-511) / 5112 - Cross-Domain
                      Algorithms, Tools, Methods Labs (ATMLs) and Research Groups
                      (POF4-511) / nxtAIM - nxtAIM – NXT GEN AI Methods
                      (19A23014l) / SDL Fluids $\&$ Solids Engineering},
      pid          = {G:(DE-HGF)POF4-5111 / G:(DE-HGF)POF4-5112 /
                      G:(BMWK)19A23014l / G:(DE-Juel-1)SDLFSE},
      typ          = {PUB:(DE-HGF)29},
      url          = {https://juser.fz-juelich.de/record/1052307},
}