% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@INPROCEEDINGS{Cao:1049627,
author = {Cao, Zhuo and Zhao, Xuan and Krieger, Lena and Scharr,
Hanno and Assent, Ira},
title = {{L}eap{F}actual: {R}eliable {V}isual {C}ounterfactual
{E}xplanation {U}sing {C}onditional {F}low {M}atching},
reportid = {FZJ-2025-05415},
year = {2025},
abstract = {The growing integration of machine learning (ML) and
artificial intelligence (AI) models into high-stakes domains
such as healthcare and scientific research calls for models
that are not only accurate but also interpretable. Among the
existing explainable methods, counterfactual explanations
offer interpretability by identifying minimal changes to
inputs that would alter a model's prediction, thus providing
deeper insights. However, current counterfactual generation
methods suffer from critical limitations, including gradient
vanishing, discontinuous latent spaces, and an overreliance
on the alignment between learned and true decision
boundaries. To overcome these limitations, we propose
LeapFactual, a novel counterfactual explanation algorithm
based on conditional flow matching. LeapFactual generates
reliable and informative counterfactuals, even when true and
learned decision boundaries diverge. Following a
model-agnostic approach, LeapFactual is not limited to
models with differentiable loss functions. It can even
handle human-in-the-loop systems, expanding the scope of
counterfactual explanations to domains that require the
participation of human annotators, such as citizen science.
We provide extensive experiments on benchmark and real-world
datasets showing that LeapFactual generates accurate and
in-distribution counterfactual explanations that offer
actionable insights. We observe, for instance, that our
reliable counterfactual samples with labels aligning to
ground truth can be beneficially used as new training data
to enhance the model. The proposed method is broadly
applicable and enhances both scientific knowledge discovery
and non-expert interpretability.},
month = {Dec},
date = {2025-12-01},
organization = {The Thirty-Ninth Annual Conference on
Neural Information Processing, San
Diego (USA), 1 Dec 2025 - 7 Dec 2025},
subtyp = {Invited},
cin = {IAS-8},
cid = {I:(DE-Juel1)IAS-8-20210421},
pnm = {5254 - Neuroscientific Data Analytics and AI (POF4-525) /
5112 - Cross-Domain Algorithms, Tools, Methods Labs (ATMLs)
and Research Groups (POF4-511)},
pid = {G:(DE-HGF)POF4-5254 / G:(DE-HGF)POF4-5112},
typ = {PUB:(DE-HGF)24},
doi = {10.34734/FZJ-2025-05415},
url = {https://juser.fz-juelich.de/record/1049627},
}