@inproceedings{lunajimenez:26008:sign-lang:lrec,
  author    = {Luna-Jimenez, Cristina and Eing, Lennart and Esteban Romero, Sergio and Schneeberger, Tanja and Gebhard, Patrick and Nunnari, Fabrizio and Andr{\'e}, Elisabeth},
  title     = {Emotion Recognition in {German} {Sign} {Language} with Facial Action Units},
  pages     = {297--305},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26008.html},
  abstract  = {Emotion Recognition research in Sign Languages is still in its infancy. Still today, there exists a lack of knowledge about appropriate annotation guidelines and the impact that facial expressions, body postures and head positions have in recognizing emotions while signing, considering that sign language encompasses manual and non-manual cues with linguistic purposes. In this article, we present an acquisition protocol to record acted emotions in German Sign Language under four scenarios (High-Valence and High-Arousal, High-Valence and Low Arousal, Low-Valence and High-Arousal, and Low-Valence and Low-Arousal). The goal is to provide a reference dataset to explore the use of machine learning techniques for an automated classification of emotions in sign language utterances. As a baseline reference, we trained static models with features extracted from the facial muscle activations. The best model achieved an accuracy of 68.84{\%} and a F1 of 67.96{\%} with a random forest trained on the statistics extracted from Action Units. These results highlight the importance of facial expression in sign language, not only for carrying linguistic information but also for transmitting emotions. Results also indicate challenges in detecting emotions in the High-Valence and Low Arousal scenario, which suggests future investigation lines to explore.}
}

@inproceedings{lunajimenez:26011:sign-lang:lrec,
  author    = {Luna-Jimenez, Cristina and Eing, Lennart and Withanage Don, Daksitha and Gonz{\'a}lez, Marco and Nunnari, Fabrizio and Perniss, Pamela and Gebhard, Patrick and Andr{\'e}, Elisabeth},
  title     = {{DGS-BIGEKO}: A Dataset for Hypothetical Emergency Scenarios in {German} {Sign} {Language}},
  pages     = {306--314},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26011.html},
  abstract  = {In this article, we describe DGS-BIGEKO, a sign language dataset containing a conversation in a crisis scenario signed by a professional interpreter in German Sign Language (DGS). The dataset comprises 14 sentences with common questions and answers from protocols occurring in emergency call scenarios translated into DGS. Additionally, the dataset contains signs for an additional 108 concepts that are relevant to emergency call scenarios. The dataset is intended to support research in sign language linguistics and sign language machine translation by providing resources in a very specific domain, where no previous resources are available in DGS. The dataset is freely available for research purposes at the following address: https://doi.org/10.5281/zenodo.18458557}
}

@inproceedings{renner:26009:sign-lang:lrec,
  author    = {Renner, Fabian and Withanage Don, Daksitha and Andr{\'e}, Elisabeth and Luna-Jimenez, Cristina},
  title     = {Effect of Data Augmentation with Multi-View Perspectives of Signers on the {DGS-Fabeln-1} Dataset},
  pages     = {429--437},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26009.html},
  abstract  = {Sign languages constitute the principal form of communication for deaf communities across the globe. Nevertheless, the development of reliable Continuous Sign Language Translation (CSLT) systems is constrained by the lack of sufficient data and models able to handle spatio-temporal information. In this article, we explore the effect of adding multiview perspectives of the signer to the training set as data augmentation using the UniSign framework for the DGS-Fabeln-1 dataset. Our results reveal that increasing dataset size and using multiple camera perspectives significantly improve performance, with the best configurations achieving BLEU-4 scores of 4.20{\%}. These results provide a competitive baseline for the DGS-Fabeln-1 dataset and guidance for further optimizations of CSLT systems.}
}

