@inproceedings{lunajimenez:26011:sign-lang:lrec,
  author    = {Luna-Jimenez, Cristina and Eing, Lennart and Withanage Don, Daksitha and Gonz{\'a}lez, Marco and Nunnari, Fabrizio and Perniss, Pamela and Gebhard, Patrick and Andr{\'e}, Elisabeth},
  title     = {{DGS-BIGEKO}: A Dataset for Hypothetical Emergency Scenarios in {German} {Sign} {Language}},
  pages     = {306--314},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26011.html},
  abstract  = {In this article, we describe DGS-BIGEKO, a sign language dataset containing a conversation in a crisis scenario signed by a professional interpreter in German Sign Language (DGS). The dataset comprises 14 sentences with common questions and answers from protocols occurring in emergency call scenarios translated into DGS. Additionally, the dataset contains signs for an additional 108 concepts that are relevant to emergency call scenarios. The dataset is intended to support research in sign language linguistics and sign language machine translation by providing resources in a very specific domain, where no previous resources are available in DGS. The dataset is freely available for research purposes at the following address: https://doi.org/10.5281/zenodo.18458557}
}

@inproceedings{renner:26009:sign-lang:lrec,
  author    = {Renner, Fabian and Withanage Don, Daksitha and Andr{\'e}, Elisabeth and Luna-Jimenez, Cristina},
  title     = {Effect of Data Augmentation with Multi-View Perspectives of Signers on the {DGS-Fabeln-1} Dataset},
  pages     = {429--437},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26009.html},
  abstract  = {Sign languages constitute the principal form of communication for deaf communities across the globe. Nevertheless, the development of reliable Continuous Sign Language Translation (CSLT) systems is constrained by the lack of sufficient data and models able to handle spatio-temporal information. In this article, we explore the effect of adding multiview perspectives of the signer to the training set as data augmentation using the UniSign framework for the DGS-Fabeln-1 dataset. Our results reveal that increasing dataset size and using multiple camera perspectives significantly improve performance, with the best configurations achieving BLEU-4 scores of 4.20{\%}. These results provide a competitive baseline for the DGS-Fabeln-1 dataset and guidance for further optimizations of CSLT systems.}
}

