% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{Oberstra:1043525,
      author       = {Oberstraß, Alexander and Vaca Cerda, Esteban Alejandro and
                      Upschulte, Eric and Niu, Meiqi and Palomero-Gallagher,
                      Nicola and Grässel, David and Schiffer, Christian and Axer,
                      Markus and Amunts, Katrin and Dickscheid, Timo},
      title        = {{I}mage-to-{I}mage {T}ranslation for {V}irtual {C}resyl
                      {V}iolet {S}taining {F}rom 3{D} {P}olarized {L}ight
                      {I}maging},
      reportid     = {FZJ-2025-02901},
      year         = {2025},
      abstract     = {Characterizing the structure of cortical networks in the
                      brain requires complementary imaging techniques and the
                      integration of different aspects such as fiber and cell body
                      distributions. Ideally, different methods are applied to the
                      same tissue for direct comparison. 3D polarized light
                      imaging (3D-PLI) visualizes nerve fibers in brain tissue at
                      high resolution based on optical properties alone. This
                      enables subsequent staining of the same tissue for cell
                      bodies after 3D-PLI measurement. However, this process is
                      time-consuming, technically challenging, and requires
                      nonlinear cross-modal registration to obtain pixel
                      correspondence.Here we investigate image-to-image
                      translation methods to predict the results of cell body
                      staining directly from 3D-PLI, using generative adversarial
                      networks (GANs) and neural style transfer (NST). We use 11
                      coronal sections of a vervet monkey brain for training, each
                      imaged with 3D-PLI and subsequently stained with Cresyl
                      violet for cell bodies. Since pixel-accurate registration of
                      entire sections may be difficult and error-prone, we
                      introduce an online registration head to linearly align
                      model predictions for local image patches to the
                      post-staining during network training. This exploits the
                      fact that local deformations can be approximated by a linear
                      model when a coarse pre-registration is available. The
                      online alignment improves with predictions during training
                      and ultimately converges to an accurate registration. We use
                      a Fourier-based registration approach that is
                      computationally efficient and GPU-parallelizable.We quantify
                      model performance by comparing the predicted virtual
                      staining to post-staining after 3D-PLI measurement. Our best
                      model localizes the majority of larger cell instances (>100
                      µm² in-plane) segmented by a contour proposal network
                      (CPN) with an F1 score of 63.1. The proposed online
                      registration head significantly improves the performance of
                      all investigated models, increasing F1 scores from 40.6 to
                      63.1 for NST and from 22.2 to 50.3 for a GAN.The applied
                      virtual staining enables automatic localization of larger
                      cell instances in unstained 3D-PLI images. Since the model
                      predictions are pixel-aligned with 3D-PLI, they enable joint
                      analysis of fiber tracts and cell bodies and may also serve
                      as targets for registration of real post-staining. Future
                      work will extend the training data to include more sections,
                      brains and species, with potential applications to other
                      imaging modalities.},
      month         = {Jun},
      date          = {2025-06-03},
      organization  = {Helmholtz AI Conference 2025,
                       Karlsruhe (Germany), 3 Jun 2025 - 5 Jun
                       2025},
      subtyp        = {After Call},
      cin          = {INM-1},
      cid          = {I:(DE-Juel1)INM-1-20090406},
      pnm          = {5254 - Neuroscientific Data Analytics and AI (POF4-525) /
                      HIBALL - Helmholtz International BigBrain Analytics and
                      Learning Laboratory (HIBALL) (InterLabs-0015) / Helmholtz AI
                      - Helmholtz Artificial Intelligence Coordination Unit –
                      Local Unit FZJ (E.40401.62) / EBRAINS 2.0 - EBRAINS 2.0: A
                      Research Infrastructure to Advance Neuroscience and Brain
                      Health (101147319) / DFG project G:(GEPRIS)313856816 - SPP
                      2041: Computational Connectomics (313856816) / 3D-MMA -
                      Gradienten der Verteilung multipler Transmitterrezeptoren in
                      der Hirnrinde als Grundlage verteilter kognitiver,
                      sensorischer und motorischer Funktionen. (01GQ1902)},
      pid          = {G:(DE-HGF)POF4-5254 / G:(DE-HGF)InterLabs-0015 /
                      G:(DE-Juel-1)E.40401.62 / G:(EU-Grant)101147319 /
                      G:(GEPRIS)313856816 / G:(BMBF)01GQ1902},
      typ          = {PUB:(DE-HGF)24},
      url          = {https://juser.fz-juelich.de/record/1043525},
}