% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@MASTERSTHESIS{Wang:904999,
      author       = {Wang, Qin},
      title        = {{D}eep learning for segmentation of 3{D}-{PLI} images},
      school       = {RWTH Aachen},
      type         = {Masterarbeit},
      reportid     = {FZJ-2022-00310},
      pages        = {60},
      year         = {2021},
      note         = {Masterarbeit, RWTH Aachen, 2021},
      abstract     = {3D polarized light imaging (3D-PLI) technology is a
                      neuroimaging technique used to capture high-resolution
                      images of thinly sliced segments of brains. Polarizing
                      microscope (PM) images are captured using 3D-PLI technology
                      to create three- dimensional brain models. Before
                      construction, we need to discriminate brain tissue from the
                      background in PM images through image segmentation. Labeling
                      PM images is time consuming because of their ultra-high
                      resolutions. Consequently, we cannot employ supervised
                      learning for PM image segmentation because it requires a
                      large amount of data for training. Recently, self-supervised
                      learning was proposed to alleviate the drawback of
                      insufficiently-labeled data by utilizing unlabeled
                      data.Self-supervised learning is a means for pretraining
                      neural networks to extract image features without labeled
                      data, and then fine-tunes supervised learning networks. It
                      is possible to solve the insufficient labeled PM images
                      problem. In self-supervised learning, the tasks that we use
                      for pre-training are known as the “upstream tasks”. And
                      the tasks that we use for fine-tuning are known as the
                      “downstream tasks”. In this thesis, we explore different
                      self-supervised learning approaches and make quantitative
                      comparisons. Before the self-supervised learning, we begin
                      by presenting the k-means-based image clustering method in
                      which deep neural networks are employed for feature vector
                      extraction. In this way, the clustering method can be used
                      to identify similar images, avoiding the need to manually
                      annotate similar images. Furthermore, to address the lack of
                      training data and make full use of the unlabeled dataset, we
                      implement a couple of self-supervised learning methods and
                      compare the Dice coefficient metric to the baseline model.
                      The self-supervised learning methods we present have two
                      parts. The first one is pretext supervised learning, whereby
                      we describe several upstream tasks, rotation, jigsaw, and
                      inpainting, for example, and experiments on a Pascal VOC
                      dataset and PM image dataset. A contrastive learning method
                      is presented in the second part, in which ablation
                      experiments are conducted for evaluation.},
      cin          = {JSC},
      cid          = {I:(DE-Juel1)JSC-20090406},
      pnm          = {5111 - Domain-Specific Simulation $\&$ Data Life Cycle Labs
                      (SDLs) and Research Groups (POF4-511) / SLNS - SimLab
                      Neuroscience (Helmholtz-SLNS)},
      pid          = {G:(DE-HGF)POF4-5111 / G:(DE-Juel1)Helmholtz-SLNS},
      typ          = {PUB:(DE-HGF)19},
      url          = {https://juser.fz-juelich.de/record/904999},
}