% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Steinborn:841403,
      author       = {Steinborn, Michael B. and Langner, Robert and Flehmig,
                      Hagen C. and Huestegge, Lynn},
      title        = {{M}ethodology of {P}erformance {S}coring in the d2
                      {S}ustained-{A}ttention {T}est: {C}umulative-{R}eliability
                      {F}unctions and {P}ractical {G}uidelines.},
      journal      = {Psychological assessment},
      volume       = {April 13},
      issn         = {1939-134X},
      address      = {Arlington, Va.},
      publisher    = {American Psychological Association},
      reportid     = {FZJ-2017-08478},
      pages        = {},
      year         = {2017},
      abstract     = {We provide a psychometric analysis of commonly used
                      performance indices of the d2 sustained-attention test, and
                      give methodical guidelines and recommendations, based on
                      this research. We examined experimental effects of repeated
                      testing on performance speed and accuracy (omission and
                      commission errors), and further evaluated aspects of test
                      reliability by means of cumulative reliability function
                      (CRF) analysis. These aspects were also examined for a
                      number of alternative (yet commonly used) scoring techniques
                      and valuation methods. Results indicate that performance is
                      sensitive to change, both differentially within
                      (time-on-task) and between (test-retest) sessions. These
                      effects did not severely affect test reliability, since
                      perfect score reliability was observed for measures of speed
                      (and was even preserved with half the test length) while
                      variability and error scores were more problematic with
                      respect to reliability. Notably, limitations particularly
                      hold for commission but less so for omission errors. Our
                      recommendations to researchers and practitioners are that
                      (a) only the speed score (and error-corrected speed score)
                      is eligible for highly reliable assessment, that (b) error
                      scores might be used as a secondary measure (e.g., to check
                      for aberrant behavior), that (c) variability scores might
                      not be used at all. Given the exceptional reliability of
                      performance speed, and (d) test length may be reduced up to
                      $50\%,$ if necessary for time-economic reasons, to serve
                      purposes of population screening and field assessment.},
      cin          = {INM-7},
      ddc          = {150},
      cid          = {I:(DE-Juel1)INM-7-20090406},
      pnm          = {574 - Theory, modelling and simulation (POF3-574)},
      pid          = {G:(DE-HGF)POF3-574},
      typ          = {PUB:(DE-HGF)16},
      pubmed       = {pmid:28406669},
      UT           = {WOS:000430001800005},
      doi          = {10.1037/pas0000482},
      url          = {https://juser.fz-juelich.de/record/841403},
}