% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{Porrmann:902918,
author = {Porrmann, Florian and Pilz, Sarah and Stella, Alessandra
and Kleinjohann, Alexander and Denker, Michael and
Hagemeyer, Jens and Rückert, Ulrich},
title = {{A}cceleration of the {SPADE} {M}ethod {U}sing a
{C}ustom-{T}ailored {FP}-{G}rowth {I}mplementation},
journal = {Frontiers in neuroinformatics},
volume = {15},
issn = {1662-5196},
address = {Lausanne},
publisher = {Frontiers Research Foundation},
reportid = {FZJ-2021-04673},
pages = {723406},
year = {2021},
abstract = {The SPADE (spatio-temporal Spike PAttern Detection and
Evaluation) method was developed to find reoccurring
spatio-temporal patterns in neuronal spike activity
(parallel spike trains). However, depending on the number of
spike trains and the length of recording, this method can
exhibit long runtimes. Based on a realistic benchmark data
set, we identified that the combination of pattern mining
(using the FP-Growth algorithm) and the result filtering
account for $85–90\%$ of the method's total runtime.
Therefore, in this paper, we propose a customized FP-Growth
implementation tailored to the requirements of SPADE, which
significantly accelerates pattern mining and result
filtering. Our version allows for parallel and distributed
execution, and due to the improvements made, an execution on
heterogeneous and low-power embedded devices is now also
possible. The implementation has been evaluated using a
traditional workstation based on an Intel Broadwell Xeon
E5-1650 v4 as a baseline. Furthermore, the heterogeneous
microserver platform RECS|Box has been used for evaluating
the implementation on two HiSilicon Hi1616 (Kunpeng 916), an
Intel Coffee Lake-ER Xeon E-2276ME, an Intel Broadwell Xeon
D-D1577, and three NVIDIA Tegra devices (Jetson AGX Xavier,
Jetson Xavier NX, and Jetson TX2). Depending on the
platform, our implementation is between 27 and 200 times
faster than the original implementation. At the same time,
the energy consumption was reduced by up to two orders of
magnitude.},
cin = {INM-6 / INM-10 / IAS-6},
ddc = {610},
cid = {I:(DE-Juel1)INM-6-20090406 / I:(DE-Juel1)INM-10-20170113 /
I:(DE-Juel1)IAS-6-20130828},
pnm = {5231 - Neuroscientific Foundations (POF4-523) / 5235 -
Digitization of Neuroscience and User-Community Building
(POF4-523) / VEDLIoT - Very Efficient Deep Learning in IOT
(957197) / LEGaTO - Low Energy Toolset for Heterogeneous
Computing (780681) / HBP SGA3 - Human Brain Project Specific
Grant Agreement 3 (945539) / HAF - Helmholtz Analytics
Framework (ZT-I-0003) / Brain-Scale Simulations
$(jinb33_20191101)$},
pid = {G:(DE-HGF)POF4-5231 / G:(DE-HGF)POF4-5235 /
G:(EU-Grant)957197 / G:(EU-Grant)780681 / G:(EU-Grant)945539
/ G:(DE-HGF)ZT-I-0003 / $G:(DE-Juel1)jinb33_20191101$},
typ = {PUB:(DE-HGF)16},
pubmed = {34603002},
UT = {WOS:000702048700001},
doi = {10.3389/fninf.2021.723406},
url = {https://juser.fz-juelich.de/record/902918},
}