% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Alia:1025235,
      author       = {Alia, Ahmed and Maree, Mohammed and Chraibi, Mohcine and
                      Seyfried, Armin},
      title        = {{A} novel {V}oronoi-based convolutional neural network
                      framework for pushing person detection in crowd videos},
      journal      = {Complex $\&$ intelligent systems},
      volume       = {0},
      issn         = {2199-4536},
      address      = {Switzerland},
      publisher    = {Springer Nature},
      reportid     = {FZJ-2024-02803},
      pages        = {27},
      year         = {2024},
      abstract     = {Analyzing the microscopic dynamics of pushing behavior
                      within crowds can offer valuable insights into crowd
                      patternsand interactions. By identifying instances of
                      pushing in crowd videos, a deeper understanding of when,
                      where, and whysuch behavior occurs can be achieved. This
                      knowledge is crucial to creating more effective crowd
                      management strategies,optimizing crowd flow, and enhancing
                      overall crowd experiences. However, manually identifying
                      pushing behavior at themicroscopic level is challenging, and
                      the existing automatic approaches cannot detect such
                      microscopic behavior. Thus,this article introduces a novel
                      automatic framework for identifying pushing in videos of
                      crowds on a microscopic level.The framework comprises two
                      main components: (i) feature extraction and (ii) video
                      detection. In the feature extractioncomponent, a new
                      Voronoi-based method is developed for determining the local
                      regions associated with each person in theinput video.
                      Subsequently, these regions are fed into EfficientNetV1B0
                      Convolutional Neural Network to extract the deepfeatures of
                      each person over time. In the second component, a
                      combination of a fully connected layer with a Sigmoid
                      activationfunction is employed to analyze these deep
                      features and annotate the individuals involved in pushing
                      within the video. Theframework is trained and evaluated on a
                      new dataset created using six real-world experiments,
                      including their correspondingground truths. The experimental
                      findings demonstrate that the proposed framework outperforms
                      state-of-the-art approaches,as well as seven baseline
                      methods used for comparative analysis.},
      cin          = {IAS-7},
      ddc          = {004},
      cid          = {I:(DE-Juel1)IAS-7-20180321},
      pnm          = {5111 - Domain-Specific Simulation $\&$ Data Life Cycle Labs
                      (SDLs) and Research Groups (POF4-511) / Pilotprojekt zur
                      Entwicklung eines palästinensisch-deutschen Forschungs- und
                      Promotionsprogramms 'Palestinian-German Science Bridge'
                      (01DH16027) / DFG project 491111487 -
                      Open-Access-Publikationskosten / 2022 - 2024 /
                      Forschungszentrum Jülich (OAPKFZJ) (491111487)},
      pid          = {G:(DE-HGF)POF4-5111 / G:(BMBF)01DH16027 /
                      G:(GEPRIS)491111487},
      typ          = {PUB:(DE-HGF)16},
      UT           = {WOS:001220476300001},
      doi          = {10.1007/s40747-024-01422-2},
      url          = {https://juser.fz-juelich.de/record/1025235},
}