% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@INPROCEEDINGS{Glass:1018411,
author = {Glass, Torben and Schiffer, Christian and Amunts, Katrin
and Dickscheid, Timo},
title = {{O}n {U}ncertainty-aware {D}eep {L}earning for
{C}ytoarchitecture {C}lassification},
reportid = {FZJ-2023-04792},
year = {2023},
abstract = {High-resolution light-microscopic scans of histological
brain sections allow identifying cytoarchitectonic areas.
They are defined by the local characteristics of
microstructural organization, which encompasses the size,
type, shape, and distribution of neurons, as well as their
distinct laminar and columnar organization. As established
brain mapping methods relying on statistical image analysis
are infeasible to handle the large size of high-resolution
datasets acquired by high-throughput microscopic scanners,
recent research focused on the development of automated
cytoarchitecture classification methods based on deep
learning. While the performance of these deep learning
methods has steadily increased over the last years, they are
unable to provide reliable estimates of prediction
uncertainty. In particular, the softmax outputs of
classification networks are generally not well suited to
estimate a model's uncertainty. The lack of well-calibrated
uncertainty estimates makes the interpretation of
predictions challenging, in particular when dealing with
out-of-distribution data.To this end, we here studied the
behavior of a state-of-the-art deep neural network for
cytoarchitecture classification with respect to its
uncertainty awareness. We compared it to two methods for
uncertainty quantification: Dropout variational inference
(DVI), which quantifies uncertainty based on the variance of
multiple predictions acquired with inference-time dropout,
and evidential deep learning (EDL), which is explicitly
trained to output an informative uncertainty score. We apply
both methods to in-distribution test data and
out-of-distribution data from a brain not seen during
training. We compare the models based on calibration
metrics, uncertainty scores, and prediction entropy.Our
experiments revealed that the baseline model is generally
overconfident, an often reported behavior of neural networks
that manifests as high-prediction probability even for
incorrectly classified samples. We observe similar behavior
for out-of-distribution samples from a brain not included
during training, where the model was unable to express its
inability to make accurate predictions. In comparison to the
baseline, both DVI and EDL resulted in considerably more
plausible uncertainty measures. For example, we observed
that the uncertainty scores obtained from models trained
with EDL indicate high certainty in regions with highly
distinct cytoarchitectonic properties, including the primary
visual and motor cortex. While EDL outputs a single
normalized uncertainty score per sample, DVI provides
class-level uncertainty estimates based on per-class
variance. This allows us to obtain localized uncertainty
measures for specific brain regions. For example, we
observed a low-certainty ribbon for the primary visual
cortex at the transition between primary and secondary
visual cortex, indicating cytoarchitectonic ambiguities at
the boundary between the two regions.These ambiguities could
be linked to the complex border phenomena that are
characteristic of this region, the so-called border tuft and
fringe area.Our study revealed that predictions of existing
models for cytoarchitecture classification are not well
calibrated and lack the ability to express uncertainty. The
investigated methods address these issues, providing
complementary methods to assess uncertainty and improve
model calibration. Future research will focus on the
refinement of the training strategy and the involved
hyperparameters. Finally, we plan to exploit the obtained
uncertainty measures to identify high-certainty predictions
for self-training approaches, which we expect to improve
classification performance.},
month = {Oct},
date = {2023-10-04},
organization = {7th BigBrain Workshop, Reykjavík
(Iceland), 4 Oct 2023 - 6 Oct 2023},
subtyp = {After Call},
cin = {INM-1},
cid = {I:(DE-Juel1)INM-1-20090406},
pnm = {5251 - Multilevel Brain Organization and Variability
(POF4-525) / 5254 - Neuroscientific Data Analytics and AI
(POF4-525) / HIBALL - Helmholtz International BigBrain
Analytics and Learning Laboratory (HIBALL) (InterLabs-0015)
/ Helmholtz AI - Helmholtz Artificial Intelligence
Coordination Unit – Local Unit FZJ (E.40401.62)},
pid = {G:(DE-HGF)POF4-5251 / G:(DE-HGF)POF4-5254 /
G:(DE-HGF)InterLabs-0015 / G:(DE-Juel-1)E.40401.62},
typ = {PUB:(DE-HGF)24},
url = {https://juser.fz-juelich.de/record/1018411},
}