% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{Bhattacharya:1038334,
author = {Bhattacharya, Tinish and Hutchinson, George H. and
Pedretti, Giacomo and Sheng, Xia and Ignowski, Jim and
Vaerenbergh, Thomas Van and Beausoleil, Ray and Strachan,
John Paul and Strukov, Dmitri B.},
title = {{C}omputing high-degree polynomial gradients in memory},
journal = {Nature Communications},
volume = {15},
number = {1},
issn = {2041-1723},
address = {[London]},
publisher = {Springer Nature},
reportid = {FZJ-2025-01332},
pages = {8211 (2024)},
year = {2024},
abstract = {Specialized function gradient computing hardware could
greatly improve the performance of state-of-the-art
optimization algorithms. Prior work on such hardware,
performed in the context of Ising Machines and related
concepts, is limited to quadratic polynomials and not
scalable to commonly used higher-order functions. Here, we
propose an approach for massively parallel gradient
calculations of high-degree polynomials, which is conducive
to efficient mixed-signal in-memory computing circuit
implementations and whose area scales proportionally with
the product of the number of variables and terms in the
function and, most importantly, independent of its degree.
Two flavors of such an approach are proposed. The first is
limited to binary-variable polynomials typical in
combinatorial optimization problems, while the second type
is broader at the cost of a more complex periphery. To
validate the former approach, we experimentally demonstrated
solving a small-scale third-order Boolean satisfiability
problem based on integrated metal-oxide memristor crossbar
circuits, with competitive heuristics algorithm. Simulation
results for larger-scale, more practical problems show
orders of magnitude improvements in area, speed and energy
efficiency compared to the state-of-the-art. We discuss how
our work could enable even higher-performance systems after
co-designing algorithms to exploit massively parallel
gradient computation.},
cin = {PGI-14},
ddc = {500},
cid = {I:(DE-Juel1)PGI-14-20210412},
pnm = {5234 - Emerging NC Architectures (POF4-523) / 5232 -
Computational Principles (POF4-523)},
pid = {G:(DE-HGF)POF4-5234 / G:(DE-HGF)POF4-5232},
typ = {PUB:(DE-HGF)16},
pubmed = {39294142},
UT = {WOS:001315990000007},
doi = {10.1038/S41467-024-52488-Y},
url = {https://juser.fz-juelich.de/record/1038334},
}