% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Sovetkin:904117,
      author       = {Sovetkin, Evgenii and Achterberg, Elbert Jan and Weber,
                      Thomas and Pieters, Bart E.},
      title        = {{E}ncoder–{D}ecoder {S}emantic {S}egmentation {M}odels
                      for {E}lectroluminescence {I}mages of {T}hin-{F}ilm
                      {P}hotovoltaic {M}odules},
      journal      = {IEEE journal of photovoltaics},
      volume       = {11},
      number       = {2},
      issn         = {2156-3381},
      address      = {New York, NY},
      publisher    = {IEEE},
      reportid     = {FZJ-2021-05687},
      pages        = {444 - 452},
      year         = {2021},
      abstract     = {We consider a series of image segmentation methods based on
                      the deep neural networks in order to perform semantic
                      segmentation of electroluminescence (EL) images of thin-film
                      modules. We utilize the encoder-decoder deep neural network
                      architecture. The framework is general such that it can
                      easily be extended to other types of images (e.g.,
                      thermography) or solar cell technologies (e.g., crystalline
                      silicon modules). The networks are trained and tested on a
                      sample of images from a database with 6000 EL images of
                      copper indium gallium diselenide thin film modules. We
                      selected two types of features to extract, shunts and so
                      called “droplets.” The latter feature is often observed
                      in the set of images. Several models are tested using
                      various combinations of encoder-decoder layers, and a
                      procedure is proposed to select the best model. We show
                      exemplary results with the best selected model. Furthermore,
                      we applied the best model to the full set of 6000 images and
                      demonstrate that the automated segmentation of EL images can
                      reveal many subtle features, which cannot be inferred from
                      studying a small sample of images. We believe these features
                      can contribute to process optimization and quality control.},
      cin          = {IEK-5},
      ddc          = {530},
      cid          = {I:(DE-Juel1)IEK-5-20101013},
      pnm          = {1215 - Simulations, Theory, Optics, and Analytics (STOA)
                      (POF4-121)},
      pid          = {G:(DE-HGF)POF4-1215},
      typ          = {PUB:(DE-HGF)16},
      UT           = {WOS:000621413300027},
      doi          = {10.1109/JPHOTOV.2020.3041240},
      url          = {https://juser.fz-juelich.de/record/904117},
}