% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Hamdan:1023472,
      author       = {Hamdan, Sami and More, Shammi and Sasse, Leonard and
                      Komeyer, Vera and Patil, Kaustubh and Raimondo, Federico},
      title        = {{J}ulearn: an easy-touse library for leakage-free
                      evaluation and inspection of {ML} models},
      journal      = {GigaByte},
      volume       = {},
      issn         = {2709-4715},
      address      = {[Erscheinungsort nicht ermittelbar]},
      publisher    = {GigaScience Press},
      reportid     = {FZJ-2024-01705},
      pages        = {},
      year         = {2024},
      note         = {This work was partly supported by the Helmholtz-AI project
                      DeGen (ZT-I-PF-5-078), the Helmholtz Portfolio Theme
                      “Supercomputing and Modeling for the Human Brain” the
                      Deutsche Forschungsgemeinschaft (DFG, German Research
                      Foundation), project PA 3634/1-1 and project-ID
                      431549029–SFB 1451 project B05, the Helmholtz Imaging
                      Platform and eBRAIN Health (HORIZON-INFRA-2021-TECH-01).},
      abstract     = {The fast-paced development of machine learning (ML) and its
                      increasing adoption in research challenge researchers
                      without extensive training in ML. In neuroscience, ML can
                      help understand brain-behavior relationships, diagnose
                      diseases and develop biomarkers using data from sources like
                      magnetic resonance imaging and electroencephalography.
                      Primarily, ML builds models to make accurate predictions on
                      unseen data. Researchers evaluate models' performance and
                      generalizability using techniques such as cross-validation
                      (CV). However, choosing a CV scheme and evaluating an ML
                      pipeline is challenging and, if done improperly, can lead to
                      overestimated results and incorrect interpretations. Here,
                      we created julearn, an open-source Python library allowing
                      researchers to design and evaluate complex ML pipelines
                      without encountering common pitfalls. We present the
                      rationale behind julearn’s design, its core features, and
                      showcase three examples of previously-published research
                      projects. Julearn simplifies the access to ML providing an
                      easy-to-use environment. With its design, unique features,
                      simple interface, and practical documentation, it poses as a
                      useful Python-based library for research projects.},
      cin          = {INM-7},
      cid          = {I:(DE-Juel1)INM-7-20090406},
      pnm          = {5253 - Neuroimaging (POF4-525) / 5251 - Multilevel Brain
                      Organization and Variability (POF4-525) / SFB 1451 B05 -
                      Einzelfallvorhersagen der motorischen Fähigkeiten bei
                      Gesunden und Patienten mit motorischen Störungen (B05)
                      (458640473)},
      pid          = {G:(DE-HGF)POF4-5253 / G:(DE-HGF)POF4-5251 /
                      G:(GEPRIS)458640473},
      typ          = {PUB:(DE-HGF)16},
      doi          = {10.46471/gigabyte.113},
      url          = {https://juser.fz-juelich.de/record/1023472},
}