% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{Strohmer:141504,
author = {Strohmer, Sven and Reckfort, Julia and Dohmen, Melanie and
Huynh, Anh Minh and Axer, Markus},
title = {{R}elating {P}olarized {L}ight {I}maging {D}ata {A}cross
{S}cales},
journal = {Frontiers in neuroinformatics},
volume = {7},
issn = {1662-5196},
address = {Lausanne},
publisher = {Frontiers Research Foundation},
reportid = {FZJ-2013-06672},
pages = {3},
year = {2013},
abstract = {Polarized light imaging (PLI) (Axer et al. (2011a,b))
enables scanning of individual histological human brain
sections with two independent setups: a large-area
polarimeter (LAP, “object space resolution”, which is
referred to as “resolution” in the remainder of this
abstract: 64 × 64 μm²/px) and a polarizing microscope
(PM, resolution: 1.6 × 1.6 μm²/px). While PM images are
of high resolution (HR) containing complex information, the
LAP provides low resolution (LR) overview-like data. The
information contained in an LR image is a mixture of the
information of its HR counterpart (Koenderink (1984)). Each
resolution yields valuable information, which multiplies if
they are combined.Image registration algorithms, for
example, handle multiple resolutions (1) in case of several
modalities with special metrics, and (2) in multi-resolution
approaches (e.g. Trottenberg et al. (2001)) to increase the
stability of the optimization process of automatic image
registration. In the latter case, the data is coarsened
synthetically. Our goal is to directly relate measured HR to
LR data of the same object, avoiding artificial intermediate
steps.All images show the average light intensity, that is
transmitted through a thin brain slice (Axer et al.
(2011a,b)), and depict a region from the human occipital
pole. The images were manually segmented and smoothed by a
Gaussian kernel suitable for noise reduction and adapted to
each resolution.We selected octave 2 at LR and octave 7 at
HR for SURF extraction (Bay et al. (2006)), where one octave
denotes a decrease in resolution by a factor of 2. Features
with corresponding scales were matched with FLANN (Muja and
Lowe (2009)). Homography estimation from the resulting
feature point pairs used RANSAC (Fischler and Bolles
(1981)). The homography and a linear interpolation scheme
were applied to transfer information from LR to HR and vice
versa.Localization of the HR ROI in the LR ROI is plausible
(figure 1(B)), while localization in the LAP image fails,
because the matched feature point positions in HR and LR do
not correspond. Numerical and feature point matching
inaccuracies become evident in figure 1(C).The experiments
were performed with one HR ROI (figure 1(A)), one LAP ROI
(figure 1(B)) and one LAP image. We plan to improve the
algorithm and to obtain complete HR data sets for further
exploration of the method’s performance.Figure 1. This
figure shows input data and results of the experiment. The
arrows indicate the flow of information and the color by
which it is displayed at its destination. Subfigure (A)
shows the down-scaled PM ROI (original size: 20604 px ×
17157 px). (B) shows the up-scaled LAP ROI (original size:
916 px × 510 px) with estimated PM ROI location (green
frame). Note, that only part of the HR ROI is contained in
the LR ROI. Also, most of the fine white structures depicted
in (A) vanished due to the low resolution of (B). (C) shows
the down-scaled overlay image (original size: 20604 px ×
17157 px) of LR data (enclosed in the green frame in (B))
transferred to HR versus PM ROI data of (A), where HR data
is labeled green and transferred LR data is labeled red. HR
data and transferred LR data were normalized. Numerical and
feature point matching inaccuracies become evident. Also,
displacement and distortion compared to HR data is visible.},
month = {Sep},
date = {2013-09-02},
organization = {Imaging the brain at different scales:
How to integrate multi-scale structural
information?, Antwerp (Belgium), 2 Sep
2013 - 6 Sep 2013},
cin = {JSC / INM-1 / JARA-HPC},
ddc = {610},
cid = {I:(DE-Juel1)JSC-20090406 / I:(DE-Juel1)INM-1-20090406 /
$I:(DE-82)080012_20140620$},
pnm = {411 - Computational Science and Mathematical Methods
(POF2-411) / SMHB - Supercomputing and Modelling for the
Human Brain (HGF-SMHB-2013-2017) / SLNS - SimLab
Neuroscience (Helmholtz-SLNS)},
pid = {G:(DE-HGF)POF2-411 / G:(DE-Juel1)HGF-SMHB-2013-2017 /
G:(DE-Juel1)Helmholtz-SLNS},
typ = {PUB:(DE-HGF)16},
doi = {10.3389/conf.fninf.2013.10.00029},
url = {https://juser.fz-juelich.de/record/141504},
}