% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{Kuo:866006,
author = {Kuo, Po-Chih and Tseng, Yi-Li and Zilles, Karl and Suen,
Summit and Eickhoff, Simon B. and Lee, Juin-Der and Cheng,
Philip E. and Liou, Michelle},
title = {{L}istening to real-world sounds: f{MRI} data for analyzing
connectivity networks},
journal = {Data in Brief},
volume = {26},
issn = {2352-3409},
address = {Amsterdam [u.a.]},
publisher = {Elsevier},
reportid = {FZJ-2019-05266},
pages = {104411},
year = {2019},
note = {We are indebted to the Research Center for Mind, Brain $\&$
Learning at the National Chengchi University for the great
assistance during fMRI data acquisition. This research was
supported by grants MOST-105-2410-H-001-036 and
MOST-106-2410-H-001-026 from the Ministry of Science and
Technology, Taiwan, and by funding from the European
Union’s Horizon 2020 Research and Innovation Programme
under Grant Agreement No. 785907 (HBP SGA2).},
abstract = {There is a growing interest in functional magnetic
resonance imaging (fMRI) studies on connectivity networks in
the brain when subjects are under exposure to natural
sensory stimulation. Because of a complicated coupling
between spontaneous and evoked brain activity under
real-world stimulation, there is no critical mapping between
the experimental inputs and corresponding brain responses.
The dataset contains auditory fMRI scans and T1-weighted
anatomical scans acquired under eyes-closed and eyes-open
conditions. Within each scanning condition, the subject was
presented 12 different sound clips, including human voices
followed by animal vocalizations. The dataset is meant to be
used to assess brain dynamics and connectivity networks
under natural sound stimulation; it also allows for
empirical investigation of changes in fMRI responses between
eyes-closed and eyes-open conditions, between animal
vocalizations and human voices, as well as between the 12
different sound clips during auditory stimulation. The
dataset is a supplement to the research findings in the
paper “Brain dynamics and connectivity networks under
natural auditory stimulation” published in NeuroImage.},
cin = {INM-7 / INM-1},
ddc = {570},
cid = {I:(DE-Juel1)INM-7-20090406 / I:(DE-Juel1)INM-1-20090406},
pnm = {571 - Connectivity and Activity (POF3-571) / HBP SGA2 -
Human Brain Project Specific Grant Agreement 2 (785907)},
pid = {G:(DE-HGF)POF3-571 / G:(EU-Grant)785907},
typ = {PUB:(DE-HGF)16},
pubmed = {pmid:31646154},
UT = {WOS:000495079400133},
doi = {10.1016/j.dib.2019.104411},
url = {https://juser.fz-juelich.de/record/866006},
}