% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Alia:1007508,
      author       = {Alia, Ahmed and Maree, Mohammed and Chraibi, Mohcine and
                      Toma, Anas and Seyfried, Armin},
      title        = {{A} {C}loud-{B}ased {D}eep {L}earning {F}ramework for
                      {E}arly {D}etection of {P}ushing at {C}rowded {E}vent
                      {E}ntrances},
      journal      = {IEEE access},
      volume       = {11},
      issn         = {2169-3536},
      address      = {New York, NY},
      publisher    = {IEEE},
      reportid     = {FZJ-2023-02091},
      pages        = {45936-45949},
      year         = {2023},
      abstract     = {Crowding at the entrances of large events may lead to
                      critical and life-threatening situations, particularly when
                      people start pushing each other to reach the event faster.
                      Automatic and timely identification of pushing behavior
                      would help organizers and security forces to intervene early
                      and mitigate dangerous situations. In this paper, we propose
                      a cloud-based deep learning framework for automatic early
                      detection of pushing in crowded event entrances. The
                      proposed framework initially modifies and trains the
                      EfficientNetV2B0 Convolutional Neural Network model.
                      Subsequently, it integrates the adapted model with an
                      accurate and fast pre-trained deep optical flow model with
                      the color wheel method to analyze video streams and identify
                      pushing patches in real-time. Moreover, the framework uses
                      live capturing technology and a cloud-based environment to
                      collect video streams of crowds in real-time and provide
                      early-stage results. A novel dataset is generated based on
                      five real-world experiments and their associated ground
                      truth data to train the adapted EfficientNetV2B0 model. The
                      experimental setups simulated a crowded event entrance,
                      while the ground truths for each video experiment were
                      generated manually by social psychologists. Several
                      experiments on the videos and the generated dataset are
                      carried out to evaluate the accuracy and annotation delay
                      time of the proposed framework. The experimental results
                      show that the proposed framework identified pushing
                      behaviors with an accuracy rate of $87\%$ within a
                      reasonable delay time.},
      cin          = {IAS-7},
      ddc          = {621.3},
      cid          = {I:(DE-Juel1)IAS-7-20180321},
      pnm          = {5111 - Domain-Specific Simulation $\&$ Data Life Cycle Labs
                      (SDLs) and Research Groups (POF4-511) / Pilotprojekt zur
                      Entwicklung eines palästinensisch-deutschen Forschungs- und
                      Promotionsprogramms 'Palestinian-German Science Bridge'
                      (01DH16027)},
      pid          = {G:(DE-HGF)POF4-5111 / G:(BMBF)01DH16027},
      typ          = {PUB:(DE-HGF)16},
      UT           = {WOS:000991619600001},
      doi          = {10.1109/ACCESS.2023.3273770},
      url          = {https://juser.fz-juelich.de/record/1007508},
}