% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{Li:908663,
      author       = {Li, Jingwei},
      title        = {{C}ross-ethnicity/race generalization failure of
                      {RSFC}-based behavioral prediction and potential downstream
                      consequences},
      reportid     = {FZJ-2022-02752},
      year         = {2022},
      abstract     = {Algorithmic biases that favor majority populations pose a
                      key challenge to the application of machinelearning for
                      precision medicine. In neuroimaging, there is growing
                      interest in the prediction ofbehavioral phenotypes based on
                      resting-state functional connectivity (RSFC). In that
                      context, predictivemodels are typically built by
                      capitalizing on large cohorts with mixed ethnic groups, in
                      which theproportions of certain groups, e.g. African
                      Americans (AA), are limited. Here, we investigated
                      cross-ethnicity/race generalizability of the current,
                      field-standard behavioral prediction approach using
                      twolarge-scale public datasets from the United States.
                      Specifically, we observed larger prediction errors inAA than
                      white Americans (WA) for most behavioral measures using both
                      the Human ConnectomeProject (HCP) and the Adolescent Brain
                      Cognitive Development (ABCD) data. This prediction
                      biastowards WA corresponded to more WA-like brain-behavior
                      association patterns learned by the models.Looking into the
                      direction of prediction errors, concerns can be raised if
                      the machine-learning predictionresults would be uncritically
                      used, in particular for the diagnosis of mental disorders.
                      For example, socialsupport measures were more overpredicted
                      for AA than WA, whereas social distress measures such
                      asPerceived Rejection were more underpredicted for AA than
                      WA.Furthermore, African pre-adolescent participants suffered
                      from more overpredicted social problems,rule-breaking and
                      aggressive behaviors compared to white participants. Effects
                      of the trainingpopulations were also studied by comparing
                      predictive models trained specifically on AA, specifically
                      onWA, or on a mixture of AA and WA with equal sizes.
                      Although specific training on AA slightly helped toreduce
                      the biases against AA, most behavioral measures still
                      exhibited larger prediction errors in AAthan WA. Other
                      possible sources of the biases such as neuroimaging
                      preprocessing (e.g., brain templatesand functional atlases)
                      and the design of behavioral measures need to be examined in
                      the future.},
      month         = {Jun},
      date          = {2022-06-19},
      organization  = {Organization for Human Brain Mapping,
                       Glasgow, Scotland (UK), 19 Jun 2022 -
                       23 Jun 2022},
      subtyp        = {After Call},
      cin          = {INM-7},
      cid          = {I:(DE-Juel1)INM-7-20090406},
      pnm          = {5254 - Neuroscientific Data Analytics and AI (POF4-525)},
      pid          = {G:(DE-HGF)POF4-5254},
      typ          = {PUB:(DE-HGF)6},
      url          = {https://juser.fz-juelich.de/record/908663},
}