% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{Yeung:907986,
author = {Yeung, Andy Wai Kan and More, Shammi and Wu, Jianxiao and
Eickhoff, Simon},
title = {{R}eporting details of neuroimaging studies on individual
traits prediction: {A} literature survey},
journal = {NeuroImage},
volume = {256},
issn = {1053-8119},
address = {Orlando, Fla.},
publisher = {Academic Press},
reportid = {FZJ-2022-02310},
pages = {119275 -},
year = {2022},
abstract = {Using machine-learning tools to predict individual
phenotypes from neuroimaging data is one of the most
promising and hence dynamic fields in systems neuroscience.
Here, we perform a literature survey of the rapidly work on
phenotype prediction in healthy subjects or general
population to sketch out the current state and ongoing
developments in terms of data, analysis methods and
reporting. Excluding papers on age-prediction and clinical
applications, which form a distinct literature, we
identified a total 108 papers published since 2007. In
these, memory, fluid intelligence and attention were most
common phenotypes to be predicted, which resonates with the
observation that roughly a quarter of the papers used data
from the Human Connectome Project, even though another half
recruited their own cohort. Sample size (in terms of
training and external test sets) and prediction accuracy
(from internal and external validation respectively) did not
show significant temporal trends. Prediction accuracy was
negatively correlated with sample size of the training set,
but not the external test set. While known to be optimistic,
leave-one-out cross-validation (LOO CV) was the prevalent
strategy for model validation (n = 48). Meanwhile, 27
studies used external validation with external test set.
Both numbers showed no significant temporal trends. The most
popular learning algorithm was connectome-based predictive
modeling introduced by the Yale team. Other common learning
algorithms were linear regression, relevance vector
regression (RVR), support vector regression (SVR), least
absolute shrinkage and selection operator (LASSO), and
elastic net. Meanwhile, the amount of data from
self-recruiting studies (but not studies using open, shared
dataset) was positively correlated with internal validation
prediction accuracy. At the same time, self-recruiting
studies also reported a significantly higher internal
validation prediction accuracy than those using open, shared
datasets. Data type and participant age did not
significantly influence prediction accuracy. Confound
control also did not influence prediction accuracy after
adjusted for other factors. To conclude, most of the current
literature is probably quite optimistic with internal
validation using LOO CV. More efforts should be made to
encourage the use of external validation with external test
sets to further improve generalizability of the
models.Keywords: Individual trait; Neuroimaging; Prediction;
Predictive modeling; Survey.},
cin = {INM-7},
ddc = {610},
cid = {I:(DE-Juel1)INM-7-20090406},
pnm = {5252 - Brain Dysfunction and Plasticity (POF4-525) / DFG
project 432015680 - Automatisierte Gehirnalterung-Vorhersage
und deren Interpretation},
pid = {G:(DE-HGF)POF4-5252 / G:(GEPRIS)432015680},
typ = {PUB:(DE-HGF)16},
pubmed = {pmid:35513295},
UT = {WOS:000830858700007},
doi = {10.1016/j.neuroimage.2022.119275},
url = {https://juser.fz-juelich.de/record/907986},
}