% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{Zhao:1049199,
      author       = {Zhao, Xuan and Krieger, Lena and Cao, Zhuo and Bangun, Arya
                      and Scharr, Hanno and Assent, Ira},
      title        = {{P}robabilistic {F}ramework for {R}obustness of
                      {C}ounterfactual {E}xplanations {U}nder {D}ata {S}hifts},
      reportid     = {FZJ-2025-05281},
      year         = {2025},
      abstract     = {Counterfactual explanations (CEs) are a powerful method for
                      interpreting machine learning models, but CEs might be not
                      valid when the model is updated due to distribution shifts
                      in the underlying data. Existing approaches to robust CEs
                      often impose explicit bounds on model parameters to ensure
                      stability, but such bounds can be difficult to estimate and
                      overly restrictive in practice. In this work, we propose a
                      data shift-driven probabilistic framework for robust
                      counterfactual explanations with plausible data shift
                      modeling via a Wasserstein ball. We formalize a linearized
                      Wasserstein perturbation scheme that captures realistic
                      distributional changes which enables Monte Carlo estimation
                      of CE robustness probabilities with domain-specific data
                      shift tolerances. Theoretical analysis reveals that our
                      framework is equivalent in spirit to model parameter
                      bounding approaches but offers greater flexibility, avoids
                      the need to estimate maximal model parameter shifts.
                      Experiments on real-world datasets demonstrate that the
                      proposed method maintains high robustness of CEs under
                      plausible distribution shifts, outperforming conventional
                      parameter-bounding techniques in both validity and proximity
                      costs.},
      month         = {Dec},
      date          = {2025-12-02},
      organization  = {NeurIPS 2025 - Reliable ML Workshop,
                       San Diego (USA), 2 Dec 2025 - 8 Dec
                       2025},
      subtyp        = {Other},
      cin          = {IAS-8},
      cid          = {I:(DE-Juel1)IAS-8-20210421},
      pnm          = {5112 - Cross-Domain Algorithms, Tools, Methods Labs (ATMLs)
                      and Research Groups (POF4-511)},
      pid          = {G:(DE-HGF)POF4-5112},
      typ          = {PUB:(DE-HGF)6},
      doi          = {10.34734/FZJ-2025-05281},
      url          = {https://juser.fz-juelich.de/record/1049199},
}