% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{Leroux:1038046,
author = {Leroux, Nathan and Manea, Paul and Sudarshan, Chirag and
Finkbeiner, Jan Robert and Siegel, Sebastian and Strachan,
John Paul and Neftci, Emre},
title = {{A}nalog {I}n-{M}emory {C}omputing {A}ttention {M}echanism
for {F}ast and {E}nergy-{E}fficient {L}arge {L}anguage
{M}odels},
reportid = {FZJ-2025-01095},
year = {2024},
abstract = {Transformer neural networks, driven by self-attention
mechanisms, are core components of foundational and Large
Language Models. In generative transformers, self-attention
uses cache memory to store token projections, avoiding
recomputation at each time step. However, GPU-stored
projections must be loaded into SRAM for each new generation
step, causing latency and energy bottlenecks for long
sequences. In this work, we propose a fast and
energy-efficient hardware implementation of self-attention
using analog in-memory computing based on gain cell
memories. Volatile gain cell memories can be efficiently
written to store new tokens during sequence generation,
while performing analog signed weight multiplications to
compute the dot-products required for self-attention. We
implement Sliding Window Attention, which keeps memory of a
finite set of past steps. A charge-to-pulse converter for
array readout eliminates the need for analog-to-digital
conversion between self-attention stages. Using a
co-designed initialization algorithm to adapt pre-trained
weights to gain cell non-idealities, we achieve NLP
performance comparable to ChatGPT-2 with minimal training
iterations, despite hardware constraints. Our end-to-end
hardware design includes digital controls, estimating area,
latency, and energy. The system reduces attention latency by
up to two orders of magnitude and energy consumption by up
to five orders compared to GPUs, marking a significant step
toward ultra-fast, low-power sequence generation in Large
Language Models.},
cin = {PGI-15 / PGI-14},
cid = {I:(DE-Juel1)PGI-15-20210701 / I:(DE-Juel1)PGI-14-20210412},
pnm = {5234 - Emerging NC Architectures (POF4-523) / BMBF 16ME0404
- Verbundprojekt: Neuro-inspirierte Technologien der
künstlichen Intelligenz für die Elektronik der Zukunft -
NEUROTEC II - (BMBF-16ME0404) / BMBF 16ME0400 -
Verbundprojekt: Neuro-inspirierte Technologien der
künstlichen Intelligenz für die Elektronik der Zukunft -
NEUROTEC II - (16ME0400) / BMBF 03ZU1106CA - NeuroSys:
Algorithm-Hardware Co-Design (Projekt C) - A (03ZU1106CA) /
BMBF 03ZU1106CB - NeuroSys: Algorithm-Hardware Co-Design
(Projekt C) - B (BMBF-03ZU1106CB)},
pid = {G:(DE-HGF)POF4-5234 / G:(DE-82)BMBF-16ME0404 /
G:(BMBF)16ME0400 / G:(BMBF)03ZU1106CA /
G:(DE-Juel1)BMBF-03ZU1106CB},
typ = {PUB:(DE-HGF)25},
doi = {10.34734/FZJ-2025-01095},
url = {https://juser.fz-juelich.de/record/1038046},
}