@proceedings{lrec:sign-lang:26,
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  title     = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/2026.signlang-1.pdf}
}

@inproceedings{barbera:26041:sign-lang:lrec,
  author    = {Barber{\`a}, Gemma and Broto Clemente, In{\'e}s and Vinaixa Rosell{\'o}, Xavier and Cassany Viladomat, Roger},
  title     = {Capturing Methodology for Generating Synthetic and {3D} Training Data in {Catalan} {Sign} {Language} ({LSC}): The Case of Verbal Agreement},
  pages     = {1--9},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26041.html},
  abstract  = {This paper proposes a hybrid methodology to generate high-quality synthetic data. Unlike other approaches based purely on generative Artificial Intelligence, which may suffer from hallucinations or inconsistent movements, this project uses 3D biomechanics and kinematics algorithms that enforce the anatomical constraints of the human body to ensure physically plausible movements. The aim of this research is to demonstrate that it is possible to synthetically expand the dataset. In particular, this paper focuses on verb agreement, a grammatical domain which is known for its morphological and articulatory complexity. By concentrating on the possible configurations of the movements in signing space when expressing different person agreeing verbal forms, we aim to capture real movements to extract physical parameters and apply them as logical rules ---similar to those of a video game engine--- to automatically synthesize thousands of new conjugations from infinitives with complete anatomical precision. Beyond spatial conjugation, the methodology further augments data through procedural variation of prosody and body morphology.}
}

@inproceedings{bassomadjoukeng:26037:sign-lang:lrec,
  author    = {Basso Madjoukeng, Ariel and Poitier, Pierre and Kenmogne, Belise Edith and Couplet, Adelaide and Leleu, Margaux and Benoit, Frenay},
  title     = {Leveraging Unannotated Sign Language Data via a Robust Data Augmentation Method for Contrastive Representation Learning},
  pages     = {10--16},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26037.html},
  abstract  = {Contrastive learning is a deep learning paradigm that allows the learning of useful representations without annotations. In many fields, including sign language recognition (SLR), contrastive approaches have proven to be very effective for developing pretrained models. To learn representations, they generate augmented variants of an instance through augmentation techniques and then maximize their similarities. The quality of the learned representations is strongly correlated with the augmentations used during training. In several fields, specialized augmentations have been developed and adopted. However, in SLR, we observed two trends: contrastive-based SLR approaches often rely on augmentations that are not realistic for the application (e.g., vertical flip, excessive rotations); specialized augmentation methods lack robustness. Hence, when they are used as a starting point for contrastive algorithms, the learned representations are often irrelevant, and sometimes sensitive. These issues considerably affect the accuracy of SLR models on downstream tasks. In response, this paper proposes a robust augmentation method specially designed for contrastive approaches applied to SLR. The results show an improvement in accuracy during linear evaluation and semi-supervised learning with only 30{\%} of annotations.}
}

@inproceedings{battisti:26034:sign-lang:lrec,
  author    = {Battisti, Alessia and Tissi, Katja and Sidler-Miserez, Sandra and Ebling, Sarah},
  title     = {The {SMILE} Continuous {DSGS} Corpus: A Resource for Longitudinal Exploration of Continuous {Swiss} {German} {Sign} {Language}},
  pages     = {17--30},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26034.html},
  abstract  = {This paper presents the SMILE Continuous DSGS Corpus, a longitudinal dataset that allows for investigating how hearing adults acquire Swiss German Sign Language as a second language. It includes recordings of sign language learners and native signer controls collected at four points over a period of 18 months and annotated for manual and non-manual components, errors, and sentence-level acceptability. The resource provides high-quality, synchronized video suitable for both linguistic and automatic sign language processing research, for example, supporting studies of interlanguage development and training of automatic sign language recognition models. We present here an exploratory analysis of the learner subcorpus using Bayesian mixed-effects modeling. The corpus and accompanying annotations are available for research purposes under a Creative Commons license (CC BY-NC-SA 4.0).}
}

@inproceedings{boddu:26033:sign-lang:lrec,
  author    = {Boddu, Raviteja and Vieira Leite, Guilherme and Lopes da Silva, Joed and Benetti, {\^A}ngelo and Barbieri, Isabela and de Melo Afonso, Nat{\'a}lia and Santos, Thyago and Pedrini, H{\'e}lio and Ven{\^a}ncio Barbosa, Felipe and De Martino, Jos{\'e} Mario and Georges, Munir and Zimmer, Alessandro},
  title     = {The In-Car Sign Language Corpus ({ICSL}): A Multi-Modal Resource for Constrained-Space Sign Language Recognition},
  pages     = {31--41},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26033.html},
  abstract  = {This paper addresses the challenges of using sign language within shared mobility services, such as taxis, carpools, or ride-sharing platforms. The use of sign language recognition (SLR) in real-world, confined environments, specifically vehicle interiors remains largely unexplored. To motivate research in this area, we present the In-Car Sign Language (ICSL) dataset for Brazilian Sign Language (Libras), with the long-term goal of improving public transport accessibility for the Deaf and Hard-of-Hearing community. The dataset consists of: (1) high-precision laboratory motion capture (MoCap) data to establish an idealized linguistic baseline and (2) real-world multi-modal in-car recordings captured using a 2D camera and 3D Time-of-Flight sensors. The dataset provides a basis for comparative analyses between synthesized signing avatar animations and recorded real signing interpreter videos, which enable future research into robust "in-the-wild" SLR models and domain adaptation. We describe in detail the use cases, the setup, the data collection protocol, and the metadata structure of the corpus. In total, we recorded a multimodal dataset exceeding 1.5 million frames, comprising the synchronized multimodal streams described above featuring Libras users across various in-car scenarios. The corpus is provided with gloss annotation of lexical signs and non-lexical sign language elements specially designed to support the training and evaluation of deep neural networks for constrained space recognition. In-vehicle signing offers a technically significant example of a constrained, occluded, and non-frontal environment. While recognizing the diverse communication strategies already employed by the Deaf community, identifying automotive-specific limitations provides a useful stepping stone for research into enhancing in-car accessibility and passenger quality of life.}
}

@inproceedings{borstell:26004:sign-lang:lrec,
  author    = {B{\"o}rstell, Carl},
  title     = {Seeing Who Is Signing and With Which Hand},
  pages     = {42--50},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26004.html},
  abstract  = {This study is a computer vision analysis of 4.5 hours of video data from 40 signers in the Swedish Sign Language Corpus, aiming to evaluate the reliability of classifying 1) who the main signer is at any given time during dyadic conversation, and 2) the dominant hand (i.e., handedness) of each signer. First, the distance moved by the hands of each signer is used to compare the manual activity between a) the two signers to determine whose hands are more active, and b) the hands of each signer to determine which hand is more likely to be dominant. Second, the height of the hands is used to compare their prominence in signing space between a) the two signers to determine whose hands are more prominent, and b) the hands of each signer to determine which hand is more likely to be dominant. The results show that while both distance and height approaches can reliably classify -- individually or combined -- the main signer in any segment of a conversation, the height approach is better at determining the overall handedness (right- or left-dominant) of signers. For the handedness classification, the optimal method turns out to be a two-step approach, first classifying the main signer per segment, then using only signer-relevant segments to classify handedness.}
}

@inproceedings{brown:26026:sign-lang:lrec,
  author    = {Brown, Matt and Ranum, Oline and Fish, Edward and Proctor, Heidi and Woll, Bencie and Bowden, Richard and Cormier, Kearsy},
  title     = {{SignGPT} and the Visual Language Toolkit},
  pages     = {51--60},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26026.html},
  abstract  = {SignGPT's Visual Language Toolkit (VLTK) aims to remove fundamental barriers to large scale sign language modelling by developing data-driven, linguistically grounded methods for continuous sign language recognition. We first identify fundamental issues around the ecological validity of potential data sources (e.g. broadcast media with interpreted signing or captions, scraping of social media). We contrast these with the currently highly resource-intensive development of curated sign language corpora based on linguistic principles. The VLTK addresses this scarcity of high quality sign language data by providing semi-automated glossing and other recognition tools, driving large scale corpus expansion without sacrificing linguistic principles. Unlike prior systems that rely on sparse glossing, the project integrates dense temporal annotation, non-manual and non-lexical feature tracking, and transformer-based architectures to capture the multimodal and spatial structure of signing. By aligning machine vision innovation with linguistic insights and community-embedded evaluation, SignGPT establishes a foundation for robust and extensible sign language models.}
}

@inproceedings{bulla:26021:sign-lang:lrec,
  author    = {Bulla, Jan and Kimmelman, Vadim},
  title     = {Processing Kinematics of Nonmanual Markers in R},
  pages     = {61--70},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26021.html},
  abstract  = {Nonmanual markers, such as head and eyebrow movements, eye blinks, and mouth shapes, are an important part of natural languages, both spoken and signed. Recent developments in computer vision have made it possible to extract facial and body landmark positions, as well as head-rotation measures, from 2D video recordings, which can be further processed to analyse the kinematics of nonmanual articulators. In this paper, we present an R-based workflow for processing raw outputs of computer vision toolkits with the goal of producing reliable and interpretable kinematic measurements of nonmanual articulators.}
}

@inproceedings{chan:26055:sign-lang:lrec,
  author    = {Chan, Frederick and Levow, Gina-Anne and Cheng, Qi},
  title     = {A Small Model for Big Articulators: Sign Language Detection With a Tiny Machine Learning Model},
  pages     = {71--79},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26055.html},
  abstract  = {This paper introduces a small (1,013 parameter) machine learning model for sign language detection in videos of isolated American Sign Language (ASL) signs. Our model aims to alleviate the time-consuming nature of producing sign clips for psycholinguistic study stimuli, sign dictionaries, and sign databases. Given a video where the signer starts from a resting position, signs a sign, and returns to the resting position for an arbitrary number of repetitions, the model detects frames in which signing occurs that can be used to segment video into clips of individual signs. We train and evaluate our model on data with precise coding of signing onset and offset from ASL-LEX 2.0, so that our model's annotations are suitable for psycholinguistics research. The model works on both real signs and pseudosigns, two types of stimuli needed for certain psycholinguistic studies. Our model's small size compared to the state-of-the-art (100K parameters or more) enables quick, bulk processing even on resource-constrained hardware. It achieves this by computing Instantaneous Visual Change (IVC), a 1D measure of changes in brightness in the input video, extracting features from the IVC-over-time signal with a convolution, and classifying the video frames as signing or non-signing with three neural layers.}
}

@inproceedings{czehmann:26064:sign-lang:lrec,
  author    = {Czehmann, Vera and Yazdani, Shakib and Hamidullah, Yasser and Nunnari, Fabrizio and Avramidis, Eleftherios},
  title     = {"A Sacred Bird Called the Phoenix". Auditing the most-used Parallel Corpus for {German} {Sign} {Language} Recognition and Translation},
  pages     = {80--92},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26064.html},
  abstract  = {This paper presents an empirical audit of the widely used RWTH-PHOENIX-2014T corpus, examining its suitability as a benchmark for sign language recognition and translation. Through human annotation of the training set and extensive sign-to-text back translation of the test set, we provide detailed statistics that indicate substantial quality issues, including information loss and lexical errors. Automatic scores comparing human sign-to-text back translations to the original speech transcribed references are remarkably low, suggesting strong translationese effects and substantial paraphrasing, revealing limitations of lexical metrics in adequately scoring translation quality. Replacing the original speech-transcribed references with human sign-to-text back translations while scoring existing sign language translation systems reveals the lack of robustness of system evaluation with lexical metrics against this test set. Our findings highlight risks associated with relying on this corpus for model evaluation and call for more rigorous, linguistically grounded evaluation practices in sign language technology research. The back-translated test set and error annotations are made publicly available.}
}

@inproceedings{dai:26006:sign-lang:lrec,
  author    = {Dai, Zixuan and Sako, Shinji},
  title     = {Diffusion-Based {3D} Sign Language Motion Anonymization: A Feasibility Study on Balancing Identity Confusion and Semantic Preservation},
  pages     = {93--99},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26006.html},
  abstract  = {Sign language motions contain individual-specific kinematic features. As the engineering applications of sign language become more widespread, privacy protection of sign language data has emerged as a new challenge. This paper proposes a diffusion model-based approach for sign language motion anonymization. The proposed framework combines conditional diffusion processes with adversarial training to transform identity features while preserving semantic information. For the design and preliminary validation of the proposed model, we conduct a proof-of-concept experiment using a subset of 22 signers from the ASL100 dataset of WLASL, which demonstrates the feasibility of the proposed approach for sign language anonymization.}
}

@inproceedings{devos:26002:sign-lang:lrec,
  author    = {De Vos, Liesbet and Meurant, Laurence and Van Eecke, Paul and Beuls, Katrien},
  title     = {{GeoQuery-LSFB}: A {French} {Belgian} {Sign} {Language} Corpus with Procedural Semantic Annotations},
  pages     = {100--112},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26002.html},
  abstract  = {Procedural semantic representations describe the meaning of natural language expressions in terms of computer programs that can be evaluated against images, databases, knowledge graphs or other external resources. While resources annotated with procedural semantic representations already exist for a variety of spoken languages, such resources are still lacking entirely for signed languages. In this paper, we introduce GeoQuery-LSFB as a signed language extension to the multilingual GeoQuery corpus. Concretely, we have complemented each procedural semantic annotation from the original corpus with a corresponding French Belgian Sign Language (LSFB) expression that was phonetically transcribed from video recordings following the HamNoSys convention and annotated with French ID-glosses. The GeoQuery-LSFB corpus constitutes a new resource for a low-resource language and offers for the first time the possibility to study, from an onomasialogical perspective, a signed language along a diverse variety of spoken languages.}
}

@inproceedings{dimou:26029:sign-lang:lrec,
  author    = {Dimou, Athanasia-Lida and Goulas, Theodoros and Tsatali, Marianna and Ntova, Tarsita and Hoffmann-Lamplmair, Doris and Fotinea, Stavroula-Evita and Efthimiou, Eleni and Teichmann, Birgit and Tsolaki, Magda and Atkinson, Joanna and Woll, Bencie},
  title     = {The De-Sign Platform: An Online Psychometric Tool for Dementia Screening of Deaf Older Adults in two Sign Languages, {GSL} and {{\"O}GS}},
  pages     = {113--119},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26029.html},
  abstract  = {This article presents the De-Sign platform, a web-based psychometric tool specifically designed for screening dementia in Deaf older adults (50+) who use Austrian Sign Language and Greek Sign Language hereinafter {\"O}GS and GSL respectively. The limited access to dementia services for these populations is primarily attributed to a scarcity of healthcare professionals fluent in sign language. Hence, enhancing access to relevant diagnostic services has become a priority. Currently, there is a significant lack of screening tools specifically developed to identify early signs of dementia that are compatible with national sign languages. To address this issue, the De-Sign Erasmus+ (2022-2025) project has employed suitable psychometric instruments that are adapted to the cultural contexts and linguistic norms of Deaf communities in Austria and Greece. The only existing Cognitive Screening Test (CST) for British Sign Language (BSL), used for diagnosing dementia in Deaf older adults, was initially adapted from English by Atkinson et al. (2015). The De-Sign platform hosts a cognitive screening test in {\"O}GS and GSL. Both were linguistically and culturally adapted from the BSL-CST test, providing two web-based versions of a psychometric tool that enables dementia screening within these populations.}
}

@inproceedings{duppen:26049:sign-lang:lrec,
  author    = {Duppen, Yves A. and De Sisto, Mirella and Mavridou, Ifigeneia and Brown, Phillip and Lepp, Lisa and Shterionov, Dimitar},
  title     = {Feature Analysis of {MoCap} Data for Optimised Sign Language Processing},
  pages     = {120--128},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26049.html},
  abstract  = {Despite the rapid advances in AI and its impact on machine translation (MT), when it comes to sign language (SL) processing and MT, there is a big bottleneck -- the lack of substantial quantities of quality signed data suitable for developing SLMT models. Marker-based motion capturing (MoCap) is a technique for tracing and recording the body movements (including hands and figures) in 3D space with high precision and has been widely used in SL research. MoCap data is of high representative accuracy, making it very suitable for analysing movement patterns and articulatory features. However, it is also very complex -- a recording of a single sign may contain more than 240 entries over 156 features making it difficult for processing. In this paper we analyse MoCap data aiming to understand which captured features are of high importance. Consecutively, we optimise the MoCap data representation, reducing the number of features, and assess how this feature- reduced data impacts sign classification task. We organise MoCap features based on their importance and show how models trained on feature-reduced representations outperform those developed on the complete feature set.}
}

@inproceedings{fabre:26016:sign-lang:lrec,
  author    = {Fabre, Diandra and Lascar, Julie and Halbout, Julie and Vartampetian, Markarit},
  title     = {Leveraging Text-side Augmentation For Sign Language Translation},
  pages     = {129--139},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26016.html},
  abstract  = {Sign language translation faces significant challenges due to the scarcity of annotated data and the inherent complexity of sign languages. This paper presents a method to improve sign-to-text translation models by augmenting data on the text side. We conduct experiments using two state-of-the-art models on two publicly available datasets: PHOENIX-2014T for German Sign Language and Mediapi-RGB for French Sign Language. Our main contributions are : (1) augmenting the training sets of both datasets on the text side using a generative model, (2) evaluating the impact of paraphrasing on BLEU and BLEURT scores, and (3) analyzing the impact of paraphrasing on translation outputs. We observed a significant improvement in translation for both languages. This suggests that adding variability to the training dataset through paraphrasing can lead to better generalization of the models. These results are comparable to state-of-the-art methods that use more complex approaches, such as Visual-Language fine-tuning, to improve translation.}
}

@inproceedings{fernandezsoneira:26048:sign-lang:lrec,
  author    = {Fern{\'a}ndez Soneira, Ana and Bao-Fente, Mar{\'i}a C. and Gonz{\'a}lez-Montesino, Rayco H. and B{\'a}ez Montero, Inmaculada C.},
  title     = {The Construction of the {CORALSE} Corpus, Now and Beyond: A Tool for Documenting {Spanish} {Sign} {Language}},
  pages     = {140--147},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26048.html},
  abstract  = {The main objective of this paper is to present the experience of building the CORALSE corpus and to discuss the challenges that arise when attempting to provide a comprehensive description of a sign language. To this end, we address the following questions, drawing on the data obtained in the completed phases of the CORALSE project as well as on the foundational principles guiding the project's third phase. THE CORALSE CORPUS TODAY: How have we developed a linguistic corpus of sign language?, What steps have we taken in developing the CORALSE corpus?, Which informants have we recorded and what criteria have guided their selection? THE CORALSE CORPUS IN THE FUTURE: Which (native) languages do we prioritise when selecting informants?, How do the perspectives of reference signers, interpreters, educators, and psycholinguists contribute to a more complete understanding of a sign language? Corpus linguistics is understood as a set of methodologies designed to study language through collections of digitised texts. Its development over recent decades---initially driven by advances in computing and, subsequently, by the emergence of the internet---represents one of the most significant transformations in contemporary linguistic research. The projects CORALSE: Annotated Inter-university Corpus of Spanish Sign Language and Textual Typology, Registers and Styles in Spanish Sign Language: New Data for the Expansion of the CORALSE Corpus adopt a corpus linguistics approach to collect, analyse and describe a representative sample of Spanish Sign Language (LSE). We also reflect on the types of linguistic data that are truly necessary to document the actual use of Spanish Sign Language.}
}

@inproceedings{ferrara:26005:sign-lang:lrec,
  author    = {Ferrara, Lindsay},
  title     = {The Community and Ethics Shaping the {Norwegian} {Sign} {Language} Corpus},
  pages     = {148--154},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26005.html},
  abstract  = {Recently, the Norwegian Sign Language Corpus has been published, and it includes language data from over 100 signers from around Norway. Collecting and building such multimodal signed language corpora have important implications for both research and deaf communities. However, consideration is needed to protect the personal nature of signed language data, while also making a long-term resource that is as accessible as possible to various community, research, and professional stakeholders. In addition, the potential exploitation of corpus resources by commercial and other interests, which are not necessarily aligned with the deaf community itself, must also be deliberated. Here, these seemingly opposing issues and the ethics that surround them are discussed. Current best practices in Open Science (including FAIR and CARE data principles), along with ethical discussions raised by scholars working, for example, in Deaf Studies, are shown to be important in navigating this complex research data landscape.}
}

@inproceedings{fiedler:26038:sign-lang:lrec,
  author    = {Fiedler, Anike and Schulder, Marc and Bleicken, Julian and Herrmann, Annika},
  title     = {Generations in the {DGS} {Corpus}: Evolving Outreach Activities and Cross-Generational Stories on Social Media in a Long-Term Corpus Project},
  pages     = {155--163},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26038.html},
  abstract  = {Social media has become a powerful tool for research projects, community outreach, science communication, and to recruit participants. Due to its differences to traditional media and presentation modes, it provides a particular focus on producing very concise content that is entertaining and accessible while staying informative. In this paper, we describe how the long-term project DGS-Korpus, creators of a corpus and dictionary of German Sign Language, evolved its outreach strategies over time. One unique aspect of its unusually long project run-time of nineteen years is that it has involved several cases of multiple family members participating in the project at different points in time, resulting in cross-generational participation. The paper describes how the project's social media campaign uses these cross-generational connections to illustrate important aspects of the project, such as its relevance for cultural heritage and language identity, the different ways that members of the German deaf community were and are involved in the project, and its relevance to interpersonal connections.}
}

@inproceedings{filhol:26045:sign-lang:lrec,
  author    = {Filhol, Michael and Martinod, Emmanuella},
  title     = {Formalising Sign Language Depiction, Characterising Categories and Measuring Iconicity with {AZee}},
  pages     = {164--173},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26045.html},
  abstract  = {This paper deals with depiction in (French) Sign Language, the formal account AZee can provide, and how it compares, validates or simplifies the linguistic notions of classifiers and iconic structures. It reports on a partial encoding work on "Mocap1", a corpus with a high density of depicting structures, following the same method that led to the first AZee reference corpus "40 br{\`e}ves". The approach does not postulate classifiers or iconic structures as entities separate from lexical signs, and nonetheless manages to model the corpus data. We discuss the entailed possibility to rediscover some of the useful categories, and if so define them from AZee's premises. We also specify how a formal metric can be specified to measure iconicity in signed data. While this paper is of linguistic interest as it compares to existing theories, it also provides a concrete step to covering depicting discourse with AZee, therefore enable automatic SL animation of depiction.}
}

@inproceedings{delagarza:26039:sign-lang:lrec,
  author    = {de la Garza, Lorena and Halbout, Julie and Lascar, Julie and Martinez, Niels and Curiel, Arturo and Gouiff{\`e}s, Mich{\`e}le and Braffort, Annelies},
  title     = {Extracting Signs from Weakly Aligned Sign Language Corpora: A Study on {LSF} and {LSM}},
  pages     = {174--183},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26039.html},
  abstract  = {This paper presents a framework for the automatic annotation of sign language data across different recording conditions, including original and interpreted content. The proposed approach integrates weak alignment, sign segmentation, and multiple instance learning with a contrastive loss. The resulting annotations are subsequently refined and filtered to enhance their reliability. Our method was applied to two historically related sign languages, French Sign Language (LSF) and Mexican Sign Language (LSM). This led to the creation of two signaries, comprising approximately 2k categories in LSF (25k occurrences) and 41 categories in LSM (1k occurrences). Both resources provide valuable support for future research in artificial intelligence and linguistics, particularly for comparative analyses between the two languages. A seminal analysis is presented as part of this paper.}
}

@inproceedings{gibet:26046:sign-lang:lrec,
  author    = {Gibet, Sylvie and Reverdy, Cl{\'e}ment and Marteau, Pierre-Fran{\c c}ois},
  title     = {An Annotation Formalism for a {French--LSF} Bilingual Corpus Supporting Sign Language Generation},
  pages     = {184--192},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26046.html},
  abstract  = {This paper introduces an annotation formalism for bilingual corpora of written French and French Sign Language (LSF), based on a manually-produced, expert transcription of LSF video data. The formalism captures the grammatical specificities of LSF, including spatial and iconic mechanisms, while explicitly encoding features that support motor programs for animated signing avatars. We propose a parameterized gloss-based approach, called PGloss-LSF, which integrates syntactic and semantic structures alongside motion features critical for accurate sign synthesis. We illustrate the framework with examples drawn from our bilingual corpus. The annotation process is incremental, ensuring internal consistency and computational tractability through a two-step evaluation: a qualitative assessment aligning generated signs with the annotation language, and a quantitative evaluation via automatic translation using large language models. By bridging the linguistic specificities of sign language with the computational requirements of sign synthesis, this work advances the integration of sign language corpora into multilingual resources and contributes to the standardization of sign language technologies.}
}

@inproceedings{gren:26013:sign-lang:lrec,
  author    = {Gren, Gustaf and Riemer Kankkonen, Nikolaus},
  title     = {A Pose-Based Pipeline for Annotation of Headshakes in Sign Language Corpora},
  pages     = {193--202},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26013.html},
  abstract  = {This paper introduces a pose-based pipeline designed to support scalable annotation of headshakes in sign language corpora. Motivated by the scarcity of annotated datasets and the need for quantitative typological research, the study evaluates whether automated detection can reduce human annotation effort. The system operates on yaw trajectories extracted with MediaPipe Holistic and uses sliding-window segmentation with neural sequence models (LSTM/CNN) to surface candidate segments for review. Training and evaluation are conducted on a subset of the German Sign Language (DGS) Corpus annotated to target grammatical headshakes functioning as negation rather than for every instance of headshakes. On the DGS dataset the best performing LSTM model achieves an F2-score of 0.45, recall of 0.63. Despite the narrow annotation scope, the pipeline reduces search space: annotators need review only 13{\%} of frames to recover 87{\%} of labeled instances. Error analysis indicates that many false positives correspond to plausible head movements excluded by the annotation criteria. A pilot transfer to Swedish Sign Language shows reduced effectiveness without adaptation, underscoring the need for alignment in cross-lingual transfer scenarios.}
}

@inproceedings{halbout:26028:sign-lang:lrec,
  author    = {Halbout, Julie and Braffort, Annelies and Gouiff{\`e}s, Mich{\`e}le and Fabre, Diandra and Lascar, Julie},
  title     = {Learning to Spot Signs from Named Entities. A study on {French} {Sign} {Language}.},
  pages     = {203--211},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26028.html},
  abstract  = {French Sign Language (LSF) is a low-resourced language, with few available corpora, most of which being only partially annotated. Previous work on other sign languages has explored automatic sign annotation using subtitles as weak supervision, existing signaries, or mouthing cues. This paper focuses on the corpus Matignon-LSF, by first leveraging lexical token spotting then by studying Named Entities (locations, companies, persons). Accounting for the Named entities enables the automatic detection of 30\{\%} to 100\{\%} more signs per class and improves the spotting of rare signs. In addition, this work provides insights into the signing of named entities and contributes resources for improving LSF-to-French translation models.}
}

@inproceedings{imashev:26040:sign-lang:lrec,
  author    = {Imashev, Alfarabi and Alizadeh, Tohid},
  title     = {The Iterative Development and Evaluation Framework for Kazakh-Russian Signing Avatars Targeted to Native Deaf Signers},
  pages     = {212--225},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26040.html},
  abstract  = {Nowadays, existing research predominantly focuses on already well-researched sign languages. However, the most extensive studies of sign language in Kazakhstan, which adhere to international standards, started about a decade ago. Native deaf signers in Kazakhstan can often suffer from insufficient educational opportunities, which may result in limited reading proficiency too. Sometimes, deaf signers can recognize letters and read words, but they may not fully understand the overall concept and need to break it down into a sequence of simpler ideas to comprehend it better. Consequently, signing avatars have the potential to interpret internet statements, movie subtitles, or YouTube videos, and this sign language production may increase accessibility and improve communication between deaf and hearing individuals, as well as between humans and avatars. An equally critical challenge is how to develop a tool that will help deaf signers evaluate the performance, appearance, and naturalness of signing avatars without relying on written text across all sign languages, particularly in underserved communities. This paper outlines the iterative development of the Kazakh-Russian Sign Language interpreting avatar, ongoing improvements to the evaluation instrument, and a comparative analysis of this instrument with another evaluation method designed to attain the same objective.}
}

@inproceedings{inan:26015:sign-lang:lrec,
  author    = {Inan, Mert and Imai, Saki and Marshall, Anna and Karel, Tessa and Alikhani, Malihe},
  title     = {Movement Coherence in High Visual Load Environments: Implications for Attention in Mixed-Hearing Classes},
  pages     = {226--238},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26015.html},
  abstract  = {Signed interpretation in movement based instruction creates high visual load environments in which spoken language, sign language, and physical demonstration compete for the same perceptual channel. We present a participatory multimodal observational study of mixed hearing movement and mindfulness classes in which Deaf, Hard of Hearing, and hearing participants practice together. Based on synchronized video recordings and instructor interviews, we examine how alignment across demonstration, signed instruction, and bodily execution is achieved and restored in real time. Drawing on theories of grounding, repair, and sign language interaction, we conceptualize movement coherence as alignment across these parallel streams and describe how breakdowns trigger observable attention shifts and distributed repair across participants, interpreters, and instructors. Across sessions, we identify recurrent coordination strategies including peer checking, freeze and scan, interpreter repositioning, tactile cueing, and pacing adjustment. Our findings provide an empirically grounded account of grounding under attentional constraint in inclusive embodied settings, with implications for sign language interpretation, multimodal discourse, and the design of accessible movement instruction. This paper includes deidentified materials derived from recorded sessions, including selected keyframes, structured interactional annotations, and anonymized instructor and participant survey responses.}
}

@inproceedings{khan:26047:sign-lang:lrec,
  author    = {Khan, Sarmad and McLoughlin, Simon and Murtagh, Irene},
  title     = {A Comparative Analysis of Traditional and Contemporary Visual Features for Computational Annotation of {Irish} {Sign} {Language}},
  pages     = {239--247},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26047.html},
  abstract  = {Automatic annotation of sign language data is critical for advancing linguistic research and developing sign language technologies, yet it remains a major bottleneck due to the inherently motion-based and multi-modal nature of signing. Irish Sign Language, like many sign languages, presents challenges for computational annotation and sign language processing due to limited annotated corpora and the inherent difficulty of reliably annotating movement, trajectories, and coarticulation across manual and non-manual articulators. This paper presents an automated computational framework for gloss-level annotation support in Irish Sign Language, designed to assist scalable corpus annotation by learning motion-related cues directly from sign language videos. Using ELAN-aligned segments from the Signs of Ireland Corpus, we compare contemporary self-supervised visual representations with traditional pose-based features derived from explicit skeletal tracking, evaluating three feature configurations: DINOv2, MediaPipe, and multi-modal fusion. Our results show that self-supervised visual embeddings achieve the highest average accuracy 86.12{\%}, outperforming both multi-modal fusion 84.28{\%} and pose-based representations 76.74{\%}. This indicates that recent visual models can implicitly encode linguistically relevant motion information, including articulator movement and transitional dynamics, reducing the need for explicit landmark extraction in practical annotation pipelines. Overall, this work provides empirical guidance and a deployable computational framework to support computational annotation and enrichment of sign language corpora.}
}

@inproceedings{khristoforova:26050:sign-lang:lrec,
  author    = {Khristoforova, Evgeniia and Poryadin, Roman},
  title     = {{HeSLEx}: A novel online questionnaire for heritage sign language research},
  pages     = {248--255},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26050.html},
  abstract  = {In this paper, we present Heritage Sign Language Experience (HeSLEx), a novel online sociolinguistic questionnaire adapted from the heritage spoken language survey HeLEx (Tomi{\'c} et al., 2023) to provide a standardized community profile prior to data collection. Heritage sign languages are minority sign languages used by Deaf signers in migration contexts and thus offer a unique window on bilingualism in the visual modality. HeSLEx is designed to be visual-first: most content is delivered as videos featuring signing in Russian Sign Language (RSL) by community member in a JavaScript/jsPsych interface. To accommodate heterogeneous RSL comprehension, each video includes optional Russian and German text hidden behind a "Show text" button. HeSLEx adds sign-specific modules, including participant and parental hearing status; modality-appropriate proficiency ratings (signing/comprehension for RSL and German Sign Language; reading/writing for Russian and German); educational histories and language(s) of instruction; interactional contexts central to Deaf life (including Deaf clubs); and Deaf-centered identity and language-attitude measures. Many items use slider scales to yield continuous predictors. The tool is designed to be adaptable to other sign language pairs in the framework of heritage language research and beyond.}
}

@inproceedings{klezovich:26051:sign-lang:lrec,
  author    = {Klezovich, Anna and Mesch, Johanna and Henter, Gustav Eje and Beskow, Jonas},
  title     = {Comparison of Low Bitrate Quantizers for Encoding {Swedish} {Sign} {Language}},
  pages     = {256--261},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26051.html},
  abstract  = {This paper investigates the bitrate--distortion trade-off of different discrete representations for Swedish Sign Language (STS) using the STS Mocap v1 motion capture dataset. We compare the K-Means algorithm with the Residual Vector Quantized Variational Autoencoder (RQ-VAE) to determine how efficiently each method preserves salient motion information at low bitrates. The results show that RQ-VAE consistently achieves lower reconstruction error than K-Means at matching bitrates, particularly for body motion, and better preserves the signing space volume. We further demonstrate that quantized representations can serve as conditioning for a flow-matching generative model, producing plausible but still imperfect sign sequences at low bitrates. These findings highlight the advantages of vector quantized models for efficient sign language motion encoding.}
}

@inproceedings{kopf:26030:sign-lang:lrec,
  author    = {Kopf, Maria and Konrad, Reiner and Langer, Gabriele and Schulder, Marc and K{\"o}nig, Lutz},
  title     = {Exploring Aspects of Spontaneous Signing in the {DGS} {Corpus}},
  pages     = {262--274},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26030.html},
  abstract  = {Most use of sign language is spontaneous, unplanned, embedded in a one-to-one situation and transient. General sign language corpora aim at such naturalistic data. Thus it can be expected that they include phenomena of spontaneous language similar to the ones described for spontaneous speech in vocal languages: that is, (dis)fluencies such as pauses, hesitations, errors, false starts and repairs as well as discourse markers. In this paper we explore which of the known phenomena of spontaneous language from previous research on vocal and sign languages could be identified in the DGS Corpus using the annotations at hand. We describe our search strategies, consider additional annotation tiers for spontaneous language, and provide examples for the phenomena identified.}
}

@inproceedings{lepp:26032:sign-lang:lrec,
  author    = {Lepp, Lisa and De Sisto, Mirella and Shterionov, Dimitar},
  title     = {Two-Handed Signs and Handedness: Phonological Implications for Sign Language Structure},
  pages     = {275--286},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26032.html},
  abstract  = {Handedness ---the use of one versus two hands in sign production--- has traditionally been discussed in relation to dominance and symmetry conditions, yet it remains underrepresented in formal phonological models of sign languages. This paper argues that handedness constitutes a core phonological parameter that directly influences the structure and interaction of movement, handshape, location, and orientation. Building on hierarchical and dependency-based approaches, we propose an adapted phonological dependency model that explicitly integrates handedness in the representation of manual articulators. In one-handed signs, features are specified for a single active hand. In two-handed signs, feature distribution is constrained by symmetry and dominance conditions, which regulate whether the hands must share features or may differ in a structurally restricted way. This structural encoding accounts for variation phenomena such as weak add, weak prop, and weak drop as constrained adjustments within the phonological system. From a technical perspective, this refinement suggests more formal restrictiveness and empirical discriminability within the feature geometries, reduced representational ambiguity, and improved empirical testability across theoretical, corpus-based, and computational implementations, strengthening the interface between phonological theory and sign language technology.}
}

@inproceedings{loy:26043:sign-lang:lrec,
  author    = {Loy, Lisa and Morgan, Hope E.},
  title     = {{HNS2CF}: A Mapping Tool from {HamNoSys} to {SL} {CatForm}},
  pages     = {287--296},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26043.html},
  abstract  = {Over the past six decades, a variety of systems have been developed for representing sign language forms, from Stokoe Notation (Stokoe, 1960) to SignWriting (Sutton, 1999) and lexical database schemas. Each was designed with specific goals and applications, leading to a fragmented landscape of representations. To enable greater interoperability and data sharing among sign language users and researchers, we propose a robust approach to translating between notation systems. As a first step in this direction, we introduce a formal mapping framework between HamNoSys and the SL CatForm coding schema, describe its implementation, and present empirical evidence of its performance. An extensive evaluation of mapping mismatches revealed improvements to the mapping logic needed to further advance the HNS2CF mapping tool. However, the initial version of the system already achieves an overall accuracy of 76.7{\%} and an in-depth analysis reveals that many apparent mismatches stem from annotator disagreement rather than mapping errors, indicating that the tool's actual accuracy is even higher. These results demonstrate the feasibility and promise of establishing mapping mechanisms across sign representation systems.}
}

@inproceedings{lunajimenez:26008:sign-lang:lrec,
  author    = {Luna-Jimenez, Cristina and Eing, Lennart and Esteban Romero, Sergio and Schneeberger, Tanja and Gebhard, Patrick and Nunnari, Fabrizio and Andr{\'e}, Elisabeth},
  title     = {Emotion Recognition in {German} {Sign} {Language} with Facial Action Units},
  pages     = {297--305},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26008.html},
  abstract  = {Emotion Recognition research in Sign Languages is still in its infancy. Still today, there exists a lack of knowledge about appropriate annotation guidelines and the impact that facial expressions, body postures and head positions have in recognizing emotions while signing, considering that sign language encompasses manual and non-manual cues with linguistic purposes. In this article, we present an acquisition protocol to record acted emotions in German Sign Language under four scenarios (High-Valence and High-Arousal, High-Valence and Low Arousal, Low-Valence and High-Arousal, and Low-Valence and Low-Arousal). The goal is to provide a reference dataset to explore the use of machine learning techniques for an automated classification of emotions in sign language utterances. As a baseline reference, we trained static models with features extracted from the facial muscle activations. The best model achieved an accuracy of 68.84{\%} and a F1 of 67.96{\%} with a random forest trained on the statistics extracted from Action Units. These results highlight the importance of facial expression in sign language, not only for carrying linguistic information but also for transmitting emotions. Results also indicate challenges in detecting emotions in the High-Valence and Low Arousal scenario, which suggests future investigation lines to explore.}
}

@inproceedings{lunajimenez:26011:sign-lang:lrec,
  author    = {Luna-Jimenez, Cristina and Eing, Lennart and Withanage Don, Daksitha and Gonz{\'a}lez, Marco and Nunnari, Fabrizio and Perniss, Pamela and Gebhard, Patrick and Andr{\'e}, Elisabeth},
  title     = {{DGS-BIGEKO}: A Dataset for Hypothetical Emergency Scenarios in {German} {Sign} {Language}},
  pages     = {306--314},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26011.html},
  abstract  = {In this article, we describe DGS-BIGEKO, a sign language dataset containing a conversation in a crisis scenario signed by a professional interpreter in German Sign Language (DGS). The dataset comprises 14 sentences with common questions and answers from protocols occurring in emergency call scenarios translated into DGS. Additionally, the dataset contains signs for an additional 108 concepts that are relevant to emergency call scenarios. The dataset is intended to support research in sign language linguistics and sign language machine translation by providing resources in a very specific domain, where no previous resources are available in DGS. The dataset is freely available for research purposes at the following address: https://doi.org/10.5281/zenodo.18458557}
}

@inproceedings{maina:26067:sign-lang:lrec,
  author    = {Maina, Ezekiel and Wanzare, Lilian and Obuhuma, James},
  title     = {Perceptual Validation of {3D} Pose, Guided Sign Language Synthesis},
  pages     = {315--323},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26067.html},
  abstract  = {Sign language corpora face a structural tension between open-access requirements and the irreducible biometric identity embedded in visual, gestural data. While 3D pose estimation enables signer-agnostic abstraction, the representational adequacy of pose-based modeling for preserving linguistic structure remains underexplored. This paper introduces a perceptually-grounded kinematic modeling framework that formalizes 3D landmark sequences as an intermediate linguistic representation and validates their adequacy through avatar-mediated synthesis and large-scale human evaluation. Using 30370 gloss-level Kenyan Sign Language (KSL) segments derived from the AI4KSL corpus, we construct normalized 3D motion trajectories via MediaPipe Holistic. These trajectories are retargeted to parameterized avatars through a constrained kinematic mapping that preserves non-manual marker geometry and articulatory timing. We define a dual evaluation paradigm combining geometric fidelity metrics (PCK=92.7{\%}, OKS=0.88, PCP=91.5{\%}, PDJ>85.3{\%}) with perceptual constructs measured across a statistically powered Deaf participant cohort (N=384). Results demonstrate a strong predictive relationship between structural joint precision and perceived gesture clarity (r=0.76, p<.01), suggesting that linguistic adequacy is partially recoverable from normalized kinematic structure. Furthermore, representational diversity in avatar instantiation significantly increases perceived inclusivity without degrading intelligibility. These findings establish pose-based motion abstraction not merely as an anonymization technique but as a viable corpus-level modeling layer for ethically sustainable language in motion.}
}

@inproceedings{malaia:26027:sign-lang:lrec,
  author    = {Malaia, Evie A. and Krebs, Julia and Harbour, Eric and Martetschl{\"a}ger, Julia and Schwameder, Hermann and Roehm, Dietmar and Wilbur, Ronnie B.},
  title     = {The Displacement-Velocity Dissociation in Sign Language Learning: Kinematic Signatures of Event Structure in Novice {{\"O}GS} Signers},
  pages     = {324--332},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26027.html},
  abstract  = {This study investigates how adult learners acquire linguistically contrastive movement patterns in Austrian Sign Language ({\"O}GS), focusing on the telic/atelic distinction predicted by the Event Visibility Hypothesis. Telic verbs (bounded events) are produced by proficient Deaf signers with shorter duration and temporally precise, low-entropy velocity profiles, whereas atelic verbs (unbounded processes) show more continuous motion. Using 3D motion capture (300 Hz), we compared 8 novice learners (6--12 weeks of instruction) with 6 proficient Deaf signers across 71 verbs. Linear mixed-effects models revealed a dissociation between gross movement patterning and fine-grained velocity profile structure in learner productions. Learners correctly reproduced the proportional path-length contrast between telic and atelic verbs, replicating the gross spatial distinction of proficient signers. However, temporal marking of the telic/atelic contrast was underproduced: learners showed a significantly smaller duration difference between verb types than proficient signers, while total path length did not differ significantly between verb types or groups. Temporal control showed significant between-group differences: learners exhibited elevated sample entropy, with non-proficient velocity profiles within individual sign productions, though spatial consistency across trials (STI) was comparable to that of proficient signers. Peak velocity did not differ between groups, suggesting that learners can reach target speeds but cannot yet modulate temporal structure reliably. These findings support distinct learning trajectories for gross movement patterning and fine-grained motion complexity, and demonstrate that velocity profile structure within signs constitutes a core linguistic target in sign language learning.}
}

@inproceedings{marrocu:26025:sign-lang:lrec,
  author    = {Marrocu, Maria Grazia},
  title     = {{CEFR-Based} Assessment in Sign Languages: The Case of {LSE} and Perspectives for {LIS}},
  pages     = {333--340},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26025.html},
  abstract  = {This study examines the application of the Common European Framework of Reference for Languages (CEFR) to Spanish Sign Language (LSE) in a university context, with reference to the Italian situation (Council of Europe, 2020). In Spain, CEFR descriptors are already integrated into academic programmes for the assessment of LSE, whereas in Italy the context remains uneven due to the lack of shared criteria for the teaching and assessment of Italian Sign Language (LIS). The research project, conducted jointly by Ca' Foscari University of Venice and Rey Juan Carlos University of Madrid, adopts a longitudinal and comparative design focusing on the first three CEFR proficiency levels (A1, A2, B1) of LSE among L2M2 learners (second language second modality). A mixed-methods approach combining classroom observations, self-assessment instruments, and standardised assessment rubrics is used to analyse the alignment between students' self-assessments and instructors' external evaluations, with particular attention to linguistic and metacognitive awareness. The findings show increasing accuracy in self-assessment as proficiency develops, alongside recurring issues such as the overestimation of receptive skills and the underestimation of productive competence. These results highlight the need for targeted assessment interventions and contribute to the development of CEFR-consistent evaluation practices for sign languages.}
}

@inproceedings{morgan:26042:sign-lang:lrec,
  author    = {Morgan, Hope E. and Isard, Amy and Dang, Anh},
  title     = {Improving phonological distance measures for signs: the {CatFormCompare} tool},
  pages     = {341--350},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26042.html},
  abstract  = {This paper describes the CatFormCompare tool, designed to enable the comparison of phonological content between pairs of signs, especially in larger datasets. With this tool and a schema for coding categorical form (the SL CatForm coding schema), a pipeline is created that allows a feedback mechanism for advancing research---specifically by directly addressing one of the hard problems in sign language phonology: how to extract true minimal pairs from datasets coded for categorical form? Solving this problem would simultaneously improve phonological distance measurements for sign languages because it would mean that the units for measuring distance are grounded in the linguistic structure of the language and not simply a by-product of the coding system. Here we report on the tool and the first evaluation of its functioning.}
}

@inproceedings{mostowski:26053:sign-lang:lrec,
  author    = {Mostowski, Piotr and Kuder, Anna and W{\'o}jcicka, Joanna},
  title     = {Assisting Corpus Annotation: Automatic {BIO-Tagging} of Clause-Like Units in {Polish} {Sign} {Language}. A Pilot Study on Corpus Data},
  pages     = {351--360},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26053.html},
  abstract  = {The creation of large-scale sign language corpora is often bottlenecked by the labour-intensive process of multi-layered annotation that requires manual analysis. One of the annotation steps is the challenging and time-consuming task of segmenting continuous signing into clause-like-units (CLUs). In this paper, we propose an automated segmentation framework for Polish Sign Language (PJM) designed to support manual annotation. To detect sentence boundaries, we adapt the Multi-Stage Temporal Convolutional Network (MS-TCN) architecture, enhanced with a Channel Attention mechanism, to effectively fuse multimodal skeleton features (hands, body, and face) extracted via MediaPipe. We evaluate the model on a diverse subset of the PJM Corpus (40 video files, 25 signers), containing nearly 16,000 manually annotated clauses prior to the start of this study. The proposed method achieves a Segmental F1-score of 75.43{\%} at IoU = 0.10 and 57.52{\%} at IoU = 0.50, demonstrating a strong capability in localising sentence boundaries. Furthermore, ablation studies reveal that fusing manual kinematics with non-manual prosodic cues (face) yields a significant performance gain (+13.6 pp) over unimodal baselines, empirically confirming the linguistic necessity of incorporating both manual and non-manual articulators in the process of sentence delimitation. The solution offers a viable means for reducing CLU annotation time by automatically generating high-quality clause boundary proposals.}
}

@inproceedings{murtagh:26031:sign-lang:lrec,
  author    = {Murtagh, Irene and Schulder, Marc and Herrmann, Annika and Paulus, Liona and Bleicken, Julian and Blekos, Kostas and Konstantakopoulos, Athanasios and Antzakas, Klimis and Kosmopoulos, Dimitrios and Valls, Eva and Marques, Ricardo and Blat, Josep and Karampidis, Konstantinos and Elsendoorn, Ben},
  title     = {Introducing {VISTA-SL}: A Multilingual e-Learning Platform for Deaf and Hearing Learners of Sign Languages},
  pages     = {361--370},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26031.html},
  abstract  = {This article introduces the VISTA-SL project, which aims to create an integrated e-learning platform for four European sign languages: German Sign Language, Greek Sign Language, Irish Sign Language, and Dutch Sign Language. Designed as a complement to face-to-face classes, the VISTA-SL platform will combine expertise in sign language education and education technologies to provide an adaptive and interactive learning environment suitable for deaf, hard of hearing and hearing users seeking to learn a sign language, whether it constitutes their first language or not. Building on a co-ordinated curriculum that covers vocabulary, grammar and Deaf culture materials, the platform will provide video material presented by deaf L1 signers, together with games and gamification features to motivate learning, while also providing several assistive technologies. By leveraging cutting edge language processing and computer vision approaches, the platform will provide augmented reality feedback, 3D avatars and an LLM-based virtual instructor, as part of the learning environment. VISTA-SL is developed in collaboration with end-user focus groups, comprising deaf, hard of hearing and hearing individuals. This will serve to ensure that the educational platform aligns with the expectations and needs of its intended users.}
}

@inproceedings{obrien:26035:sign-lang:lrec,
  author    = {O'Brien, Catherine and Sant, Gerard and M{\"u}ller, Mathias and Ebling, Sarah},
  title     = {Evaluation of Pose Estimation Systems for Sign Language Translation},
  pages     = {371--386},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26035.html},
  abstract  = {Many sign language translation (SLT) systems operate on pose sequences instead of raw video to reduce input dimensionality, improve portability, and partially anonymize signers. The choice of pose estimator is often treated as an implementation detail, with systems defaulting to widely available tools such as MediaPipe Holistic or OpenPose. We present a systematic comparison of pose estimators for pose-based SLT, covering widely used baselines (MediaPipe Holistic, OpenPose) and newer whole-body/high-capacity models (MMPose WholeBody, OpenPifPaf, AlphaPose, SDPose, Sapiens, SMPLest-X). We quantify downstream impact by training a controlled SLT pipeline on RWTH-PHOENIX-Weather 2014 where only the pose representation varies, evaluating with BLEU and BLEURT. To contextualize translation outcomes, we analyze temporal stability, missing hand keypoints, and robustness to occlusion using higher-resolution videos from the Signsuisse dataset. SDPose and Sapiens achieve the best translation performance (BLEU ~11.5), outperforming the common MediaPipe baseline (BLEU ~10). In occlusion cases, Sapiens is correct in all tested instances (15/15), while OpenPifPaf fails in nearly all (1/15) and also yields the weakest translation scores. Estimators that frequently leave out hand keypoints are associated with lower BLEU/BLEURT. We release code that can be used not only to reproduce our experiments, but also considerably lowers the barrier for other researchers to use alternative pose estimators.}
}

@inproceedings{okrouhlikova:26017:sign-lang:lrec,
  author    = {Okrouhl{\'i}kov{\'a}, Lenka},
  title     = {Designing a Data Model for a Diachronic Sign Language Database: A Case Study of Nineteenth-Century Bohemian Sources},
  pages     = {387--397},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26017.html},
  abstract  = {Diachronic research on sign languages is limited by the fragmentary and heterogeneous nature of historical documentation. Eighteenth- and nineteenth-century printed texts and manuscripts contain valuable lexical data, but their descriptions vary in precision, terminology, and representational conventions. This paper proposes a structured data model for a diachronic sign language database designed to systematise such archival materials. The proposed model adopts a multi-layered architecture that separates primary evidence from analytical interpretation, distinguishes attested from inferred sign parameters, applies graded confidence levels, and encodes structural, iconic, and metaphorical properties in parallel layers. Detailed source metadata ensures traceability and explicit representation of uncertainty. The model is illustrated through sign attestations drawn from nineteenth century Bohemian sources. The case study demonstrates that even fragmentary records, most commonly documented in dictionaries and pedagogical materials through written descriptions or illustrations, can be systematically represented within a unified data model suitable for structured comparison and diachronic analysis. The proposed model may also provide a methodological basis for comparable work on other European sign languages.}
}

@inproceedings{orazumbekov:26060:sign-lang:lrec,
  author    = {Orazumbekov, Batyrbek and Bayanov, Daniyal and Kaltay, Aruzhan and Sandygulova, Anara},
  title     = {A Video-Based Reverse Dictionary for Sign Language Using Gesture Similarity},
  pages     = {398--407},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26060.html},
  abstract  = {Sign language recognition systems are usually modeled as classification systems that map gesture videos to pre-defined glosses. But these systems do not allow similarity searches, where a user can search for similar gestures without knowing the corresponding gloss. This paper presents a pose-based video-to-video search framework for isolated signs, which acts as a reverse gesture dictionary. The system employs keypoints on the skeletal structure instead of RGB images. Two architectures are proposed for modeling temporal information: an encoder with self-attention in a Transformer architecture and a Spatial-Temporal Graph Convolutional Network (ST-GCN). The embedding space is optimized using metric learning objectives, including supervised contrastive learning and ArcFace angular margin loss. The performance of the retrieval system is evaluated on the WLASL dataset using ranking metrics like Recall@K and mean Average Precision (mAP). Experiments reveal that the temporal modeling using the Transformer architecture is an improvement over the graph-based modeling approach in the low-shot learning scenario. The attention-based temporal pooling approach further enhances the ranking quality, with the best-performing model achieving an mAP of 0.237 on the WLASL validation set. Cross-dataset evaluation on a 226-label AUTSL dataset reveals non-trivial generalization performance on the unseen dataset, despite training only on the WLASL dataset.}
}

@inproceedings{othamar:26059:sign-lang:lrec,
  author    = {Othamar, Elisabeth and Scherrer, Yves},
  title     = {{Norwegian} {Sign} {Language}: Overview of Resources and Experiments with Automatic {SignWriting} Transcription},
  pages     = {408--418},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26059.html},
  abstract  = {Norwegian Sign Language (NTS) remains an under-resourced sign language despite its official recognition in Norway since 2022. The limited availability of structured, reusable, and publicly accessible datasets continues to hinder both linguistic research and the development of sign language technologies such as recognition and translation systems. This paper presents an overview of existing datasets and potential data sources for NTS, categorizing them by accessibility, format, and suitability for computational research. We further discuss legal, ethical, and practical considerations related to data reuse, including copyright and privacy constraints. In addition, we report on a series of pilot experiments exploring alternative data acquisition strategies, including dictionary videos, SignWriting resources, and broadcast news material. These preliminary experiments explore whether automatic SignWriting transcription can serve as an intermediate representation for NTS, and examine its potential role in sign identification within continuous signing. The aim of this work is both to document ongoing efforts and to support future initiatives toward the sustainable development of NTS resources.}
}

@inproceedings{poitier:26022:sign-lang:lrec,
  author    = {Poitier, Pierre and Fink, J{\'e}r{\^o}me and Basso Madjoukeng, Ariel and Couplet, Adelaide and Leleu, Margaux and Fr{\'e}nay, Beno{\^i}t},
  title     = {Long-Term Sign Language Data Crowdsourcing Through Collaborative Lexicons},
  pages     = {419--428},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26022.html},
  abstract  = {While there exists a multitude of different sign languages (SLs) across the world, Deaf communities often lack the digital tools required to document and process their languages. In this work, we introduce Mot-Signe (MOSI), an application designed in close collaboration with actors from the French Belgian Deaf community. Our tool enables users to search for French Belgian Sign Language (LSFB) translations or to propose new ones by recording signs themselves. This crowdsourcing approach facilitates the collection of SL data in the wild, enriching the available documentation on LSFB and proposing an innovative response to the data scarcity issue inherent to sign language processing. To evaluate the sustainability of this community-driven data collection, a longitudinal user study was conducted. Following its public release, MOSI demonstrated significant real-world adoption, enabling the collection of over 3,000 distinct LSFB signs. Notably, MOSI captures highly valuable linguistic variations and specialized vocabulary often absent from traditional corpora.}
}

@inproceedings{renner:26009:sign-lang:lrec,
  author    = {Renner, Fabian and Withanage Don, Daksitha and Andr{\'e}, Elisabeth and Luna-Jimenez, Cristina},
  title     = {Effect of Data Augmentation with Multi-View Perspectives of Signers on the {DGS-Fabeln-1} Dataset},
  pages     = {429--437},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26009.html},
  abstract  = {Sign languages constitute the principal form of communication for deaf communities across the globe. Nevertheless, the development of reliable Continuous Sign Language Translation (CSLT) systems is constrained by the lack of sufficient data and models able to handle spatio-temporal information. In this article, we explore the effect of adding multiview perspectives of the signer to the training set as data augmentation using the UniSign framework for the DGS-Fabeln-1 dataset. Our results reveal that increasing dataset size and using multiple camera perspectives significantly improve performance, with the best configurations achieving BLEU-4 scores of 4.20{\%}. These results provide a competitive baseline for the DGS-Fabeln-1 dataset and guidance for further optimizations of CSLT systems.}
}

@inproceedings{sazonov:26056:sign-lang:lrec,
  author    = {Sazonov, Dmitriy and Gurbuz, Sevgi and Malaia, Evie A. and Martetschl{\"a}ger, Julia and Schwameder, Hermann and Roehm, Dietmar and Wilbur, Ronnie B.},
  title     = {Lost in Expression: Diagnosing Systemic Challenges with Non-Manual Generalization in Sign Language Understanding Tasks},
  pages     = {438--449},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26056.html},
  abstract  = {Incorporation of non-manual information is one of the most challenging aspects of Sign Language Understanding (SLU), as these features contribute to the semantic, syntactic, and pragmatic structure of signed communication as a critical feature of compositional meaning at sign, phrase and sentence level. Despite their key linguistic role, non-manuals are often an afterthought in SLU model and dataset design, with many recent models still neglecting to implement non-manual analysis or evaluate how articulators beyond the hands are contributing to the model prediction. In this work, we identify and analyze the challenges relating to recognition of non-manuals and generalization of their linguistic roles encountered by SLU models, offering new explanations for failures to properly model non-manual behavior. We perform a case study on the subtasks of Continuous Sign Language Recognition and Sign Language Translation by applying the Uni-Sign model to Isharah-1000, a Saudi Sign Language dataset. Using controlled partitioning and feature attribution, we further analyze model behavior and failure cases. With this work we hope to set the stage for the creation of diagnostic frameworks for generalization of non-manuals.}
}

@inproceedings{schiefner:26019:sign-lang:lrec,
  author    = {Schiefner, Annika and Otterspeer, Gom{\`e}r and S{\"u}mer, Beyza and Roelofsen, Floris},
  title     = {The {SignBeach} Dataset of {Dutch} {Sign} {Language} ({NGT}) signs},
  pages     = {450--458},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26019.html},
  abstract  = {This paper presents the SignBeach dataset, including 1401 lexical signs from Dutch Sign Language (NGT). The items in this dataset represent everyday vocabulary appropriate for primary school children and are part of a larger research project, investigating sign learning in a digital environment. Each sign is presented by four deaf signers in a controlled studio environment. For each item, high quality video recordings are available from five synchronised cameras, providing rich multi-view visual input suitable for linguistic analysis and the development of computer vision pipelines. In addition, we provide three types of computational derivatives: keypoint estimates using MediaPipe, handshape estimates using HaMeR, and 3D body reconstructions using SAM 3D Body. Signs are aligned with lexical entries in the NGT Signbank to provide interoperability of the database with other NGT resources. We outline the construction of the dataset and provide information on opportunities for reuse, for example in the context of psycholinguistic studies or in the context of sign language technology. All materials are available for non-commercial reuse under a CC BY-NC 4.0 license.}
}

@inproceedings{susman:26023:sign-lang:lrec,
  author    = {Susman, Margaux and Miquel Blasco, Carla and Bulla, Jan},
  title     = {Comparing Computer Vision Instruments for Eye Blink Analysis},
  pages     = {459--467},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26023.html},
  abstract  = {We compared four tools for analyzing blink velocity and amplitude, examining how MediaPipe, OpenFace, InsightFace, and 3DDFA compare in terms of blink analysis. Building on previous findings that different tools yield different results (Kuznetsova and Kimmelman, 2024), we explored their fixed-effect estimates across linguistic versus non-linguistic blinks, within non-linguistic blinks (eye watering blinks versus gaze-direction-change blinks), and within linguistic blinks (prosodic/turn-taking blinks, sign-aligned/list-marking blinks and backchanneling blinks), while controlling for head pose (Pitch, Roll, Yaw). Using mixed-effects linear models on annotated French Sign Language data, we found tool-specific patterns: consistent negative effects for InsightFace and MediaPipe, but positive. effects for 3DDFA. In addition, the influence of head pose varied across models (Pitch is strongly positive in MediaPipe but negative in InsightFace and some 3DDFA models; Roll and Yaw also switch importance across tools). These discrepancies highlight methodological biases that can distort linguistic interpretations.}
}

@inproceedings{vandendriessche:26012:sign-lang:lrec,
  author    = {Vandendriessche, Toon and De Coster, Mathieu and Dambre, Joni},
  title     = {Grounding Sign Language Representation Learning in Phonology},
  pages     = {468--476},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26012.html},
  abstract  = {Sign language recognition systems are commonly trained using gloss-level supervision, treating signs as holistic lexical units. While effective for classification, such approaches entangle sub-lexical structure and fail to capture the phonological parameters that govern sign formation, limiting interpretability, robustness, and cross-lingual transfer. In this work, we propose a phonologically informed representation learning architecture that explicitly structures the latent space according to linguistic principles. Grounded in the Dependency Model -- a phonological model used to describe Flemish Sign Language (VGT) -- our hierarchical architecture disentangles parameter-specific subspaces for handshape and location and is trained with multi-label phoneme supervision. To evaluate whether phonological information is directly encoded in the geometry of the embedding space, we introduce a non-parametric probing method that measures neighbourhood consistency across increasing scales. We show that conventional gloss-based networks achieve reasonable performance only for very small neighbourhoods, reflecting incidental visual similarity. In contrast, our disentangled representations maintain stable performance for larger neighbourhoods. This behaviour indicates that phonological structure is preserved across broader regions of the space, yielding more coherent and robust embeddings. Together, our results show that explicit phonological supervision -- and crucially, disentangled representation learning -- provides a principled foundation for interpretable and transferable sign language representations. Keywords: Sign Language, Machine Learning}
}

@inproceedings{vandenitte:26018:sign-lang:lrec,
  author    = {Vandenitte, S{\'e}bastien and Hern{\'a}ndez, Doris and Ker{\"a}nen, Jarkko and Jantunen, Tommi and Puupponen, Anna},
  title     = {Towards Integrating Pose Estimation with Neuroimaging for the Analysis of Signed Language Video Stimuli},
  pages     = {477--483},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26018.html},
  abstract  = {We present our project revisiting the video stimuli of an EEG study in Finnish Sign Language to ask whether kinematic properties of the videos impacted their processing by study participants. For each stimulus, an average measure of brain responses across participants is computed. To analyse movement properties in the video stimuli, we rely on MediaPipe for pose estimation. We subsequently report on our project to perform an exploratory analysis of the kinematic properties of the videos which may affect their processing. We focus on several landmarks: the signer's right and left wrists, nose, and upper torso. Our goal is to obtain a kinematic profile of each stimulus video using several average kinematic variables: velocity and acceleration for all selected landmarks, distance between the wrists, and surface covered by the triangular area defined by the left hand, the right hand, and the nose. We conclude by discussing the potential benefits and limitations of this methodological approach.}
}

@inproceedings{wahl:26036:sign-lang:lrec,
  author    = {W{\"a}hl, Sabrina},
  title     = {{KWIC} view on Constructed Action ({CA}) and its Collocates in {German} {Sign} {Language} ({DGS}) -- Possibilities and Limitations},
  pages     = {484--490},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26036.html},
  abstract  = {Constructed action (CA) is a phenomenon that is used in signed discourse to show the actions of a referent (cf. Cormier et al., 2015; for DGS, cf. Fischer and Kollien, 2010). To achieve this, the signer adopts the role of the referent. Most studies use retellings as their data base (e.g. Herrmann and Pendzich, 2018; Cormier et al., 2015). Consequently, there is less research on CA and its use in data that is not influenced by stimuli. Though there is a considerable number of studies on CA, the phenomenon is still not well understood. One possible way to understand this multifaceted phenomenon better is to analyse collocations in conversations. In spoken language lexicography concordance lines -- also known as keyword in context (KWIC) -- have proven to be a useful tool in the analysis of collocations. The data used in this study are Free conversations in the Public DGS Corpus. This paper explores the possibilities and limitations of concordance lines as a tool to analyse collocational behaviour of CA. It also presents preliminary results regarding CA and its collocates, which may be explored further in the future.}
}

@inproceedings{wang:26054:sign-lang:lrec,
  author    = {Wang, Zirui and Bono, Mayumi},
  title     = {Beyond {BLEU}: Linguistic Invisibility and Interactional Repair Sequence in End-to-End Sign Language Translation},
  pages     = {491--500},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26054.html},
  abstract  = {Recent advances in end-to-end sign language translation (SLT) have achieved benchmark performance, yet little is known about whether these systems preserve the multi-channel linguistic structures that are essential for real-world communication. We argue that current optimization and evaluation practices create a form of linguistic invisibility, where interactionally decisive non-manual signals (NMS) are systematically underrepresented despite high translation scores.To empirically examine this issue, we analyze an interactional repair sequence from a Japanese Sign Language (JSL) conversational corpus as a diagnostic probe. Combining qualitative interactional analysis with kinematic measurements, we demonstrate a consistent manual--mouth decoupling pattern in which semantic resolution is carried primarily by mouthing while manual articulation remains largely constant. We show that such cross-channel contrast is unlikely to be preserved under current end-to-end training objectives that prioritize global motion similarity. Based on these findings, we argue that progress in SLT should be evaluated not only by sequence-level accuracy but also by the preservation of linguistically contrastive structures, motivating the development of diagnostic, multi-channel evaluation protocols for future SLT benchmarks. We therefore propose incorporating multi-channel diagnostic evaluation sets and decoupling-sensitive metrics into future SLT benchmarking frameworks, providing a pathway toward models that achieve both high performance and linguistic structural visibility.}
}

@inproceedings{zhao:26014:sign-lang:lrec,
  author    = {Zhao, Mingyu and Yang, Zhanfu and Zhou, Yang and Xia, Zhaoyang and Jin, Can and He, Xiaoxiao and Lin, Shuhang and Neidle, Carol and Metaxas, Dimitris},
  title     = {Continuous Sign Language Recognition using Multimodal Input and Handshape-aware Boundary Detection},
  pages     = {501--512},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26014.html},
  abstract  = {This paper employs a multimodal approach for continuous sign recognition by first using ML for detecting the start and end frames of signs in videos of American Sign Language (ASL) sentences, and then by recognizing the segmented signs. For improved robustness, we use 3D skeletal features extracted from sign language videos to take into account the convergence of sign properties and their dynamics that tend to cluster at sign boundaries. Another focus of this paper is the incorporation of information from 3D hand configuration for boundary detection. To detect handshapes normally expected at the beginning and end of signs, we pretrain a handshape classifier for detection of 87 linguistically defined canonical handshape categories using a dataset that we created by integrating and normalizing several existing datasets. A multimodal fusion module is then used to unify the pretrained sign video segmentation framework and handshape classification models. Finally, the estimated boundaries are used for sign recognition, where the recognition model is trained on a large database containing both citation-form isolated signs and signs pre-segmented (based on manual annotations) from continuous signing---as such signs often differ a bit in certain respects. We evaluate our method on the ASLLRP corpus and demonstrate significant improvements over previous work.}
}

@inproceedings{imai-etal-2026-shape:lrec,
  author    = {Imai, Saki and Kezar, Lee and Aichler, Laurel and Inan, Mert and Walker, Erin and Wooten, Alicia and Quandt, Lorna Cobban and Alikhan, Malihe},
  title     = {How Pragmatics Shape Articulation: A Computational Case Study in {STEM} {ASL} Discourse},
  pages     = {8476--8490},
  editor    = {Piperidis, Stelios and Bel, N{\'u}ria and van den Heuvel, Henk and Ide, Nancy and Krek, Simon and Toral, Antonio},
  booktitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {11--16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-49-4},
  language  = {english},
  url       = {https://lrec.elra.info/lrec2026-main-669},
  doi       = {10.63317/2wjnaaabgz4d},
  abstract  = {Most state-of-the-art sign language models are trained on interpreter or isolated vocabulary data, which overlooks the variability that characterizes natural dialogue. However, human communication dynamically adapts to contexts and interlocutors through spatiotemporal changes and articulation style. This specifically manifests itself in educational settings, where novel vocabularies are used by teachers, and students. To address this gap, we collect a motion capture dataset of American Sign Language (ASL) STEM (Science, Technology, Engineering, and Mathematics) dialogue that enables quantitative comparison between dyadic interactive signing, solo signed lecture, and interpreted articles. Using continuous kinematic features, we disentangle dialogue-specific entrainment from individual effort reduction and show spatiotemporal changes across repeated mentions of STEM terms. On average, dialogue signs are 24.6{\%}-44.6{\%} shorter in duration than the isolated signs, and show significant reductions absent in monologue contexts. Finally, we evaluate sign embedding models on their ability to recognize STEM signs and approximate how entrained the participants become over time. Our study bridges linguistic analysis and computational modeling to understand how pragmatics shape sign articulation and its representation in sign language technologies.}
}

@inproceedings{maximo-chiruzzo-2026-poses:lrec,
  author    = {M{\'a}ximo, Santiago and Chiruzzo, Luis},
  title     = {Generating Sign Language Poses from {HamNoSys} and Natural Language Descriptions},
  pages     = {9358--9367},
  editor    = {Piperidis, Stelios and Bel, N{\'u}ria and van den Heuvel, Henk and Ide, Nancy and Krek, Simon and Toral, Antonio},
  booktitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {11--16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-49-4},
  language  = {english},
  url       = {https://lrec.elra.info/lrec2026-main-735},
  doi       = {10.63317/466di7tv7dpd},
  abstract  = {One of the steps involved in the process of sign language generation is generating a sequence of poses that represent the signs. This paper presents a method for using textual information to improve the translation of signs in HamNoSys format into sequences of poses. The method comprises a description generator that translates HamNoSys into a textual description, an LLM fine-tuned to the task of predicting a pose sequence from a HamNoSys description, and a VQ-VAE network that encodes and decodes pose sequences as a list of discrete symbols. Our experiments found that even using simple dictionary descriptions of HamNoSys, it is possible to improve the predictions of pose sequences by leveraging the information from a pretrained LLM.}
}

@inproceedings{phuangchoke-polprasert-2026-codebook:lrec,
  author    = {Phuangchoke, Ninlawat and Polprasert, Chantri},
  title     = {Bridging Text-to-Sign Translation via Codebook-Oriented Pretraining},
  pages     = {9504--9513},
  editor    = {Piperidis, Stelios and Bel, N{\'u}ria and van den Heuvel, Henk and Ide, Nancy and Krek, Simon and Toral, Antonio},
  booktitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {11--16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-49-4},
  language  = {english},
  url       = {https://lrec.elra.info/lrec2026-main-746},
  doi       = {10.63317/2s9976y7ibcu},
  abstract  = {Sign Language Production (SLP), the automatic translation from spoken to sign languages, faces several challenges due to the intricate mapping between linguistic semantics and the spatial--temporal motion domain. Existing SLP methods employing a transformer model with a Vector Quantization (VQ) method exhibit poor translation performance due to weak semantic alignment between the codebook and the text representation. In this work, we propose a novel text-to-sign translation based on model pretraining, which enhances semantic alignment by inheriting codebook-oriented prior knowledge from masked self-supervised models. Our approach involves two stages: (i) transforming sign language into discrete values by employing VQ with masked self-attention learning to create pre-tasks that bridge the semantic gap between text and codebook representations, (ii) constructing an end-to-end architecture with an encoder-decoder-like structure that inherits the parameters of the model from the first stage. The integration of these designs forms a robust sign language representation and significantly improves the translation model, which surpass prior baselines.}
}

@inproceedings{inoue-etal-2026-continuity:lrec,
  author    = {Inoue, Jundai and Hara, Daisuke and Miwa, Makoto},
  title     = {A Resource and Evaluation Method for Phonological Continuity in {Japanese} {Sign} {Language}},
  pages     = {9514--9524},
  editor    = {Piperidis, Stelios and Bel, N{\'u}ria and van den Heuvel, Henk and Ide, Nancy and Krek, Simon and Toral, Antonio},
  booktitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {11--16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-49-4},
  language  = {english},
  url       = {https://lrec.elra.info/lrec2026-main-747},
  doi       = {10.63317/4p22nojyxbxa},
  abstract  = {Computational models for sign language processing often represent phonological components as categories. This approach, however, does not adequately capture the continuous nature of sign articulation, obscuring nuanced phonetic variation. Furthermore, the field has lacked resources and standardized methods to evaluate a model's ability to represent this continuity. In this work, we address these limitations. First, we introduce the JSL Ordered Triplet Dataset, a new manually-annotated resource designed to benchmark the modeling of gradual phonological progressions in Japanese Sign Language. Second, we propose a learning framework that reframes the task from classification to ranking, using Positive-Unlabeled (PU) learning to optimize the Area Under the ROC Curve (AUC). Our intrinsic evaluation on the new dataset shows that the learned continuous embeddings significantly outperform a cross-entropy baseline in ordering intermediate forms, improving the average accuracy on the continuity ranking task across phonological components from 81.52{\%} to 91.71{\%}. These embeddings also maintain strong discriminative power for standard component classification. This work provides the community with a valuable resource and a method for learning and evaluating more linguistically-grounded representations of sign language.}
}

@inproceedings{nunnari-etal-2026-fairy:lrec,
  author    = {Nunnari, Fabrizio and Jain, Siddhant and Gebhard, Patrick},
  title     = {Sentiment Analysis of {German} {Sign} {Language} Fairy Tales},
  pages     = {9525--9534},
  editor    = {Piperidis, Stelios and Bel, N{\'u}ria and van den Heuvel, Henk and Ide, Nancy and Krek, Simon and Toral, Antonio},
  booktitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {11--16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-49-4},
  language  = {english},
  url       = {https://lrec.elra.info/lrec2026-main-748},
  doi       = {10.63317/3cyfzw6vs9oe},
  abstract  = {We present a dataset and a model for sentiment analysis of German sign language (DGS) fairy tales. First, we perform sentiment analysis for three levels of valence (negative, neutral, positive) on German fairy tales text segments using four large language models (LLMs) and majority voting, reaching an inter-annotator agreement of 0.781 Krippendorff's alpha. Second, we extract face and body motion features from each corresponding DGS video segment using MediaPipe. Finally, we train an explainable model (based on XGBoost) to predict negative, neutral or positive sentiment from video features. Results show an average balanced accuracy of 0.631. A thorough analysis of the most important features reveal that, in addition to eyebrows and mouth motion on the face, also the motion of hips, elbows, and shoulders considerably contribute in the discrimination of the conveyed sentiment, indicating an equal importance of face and body for sentiment communication in sign language.}
}

@inproceedings{yazdani-etal-2026-critical:lrec,
  author    = {Yazdani, Shakib and Hamidullah, Yasser and Espa{\~n}a-Bonet, Cristina and Avramidis, Eleftherios and van Genabith, Josef},
  title     = {A Critical Study of Automatic Evaluation in Sign Language Translation},
  pages     = {9535--9548},
  editor    = {Piperidis, Stelios and Bel, N{\'u}ria and van den Heuvel, Henk and Ide, Nancy and Krek, Simon and Toral, Antonio},
  booktitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {11--16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-49-4},
  language  = {english},
  url       = {https://lrec.elra.info/lrec2026-main-749},
  doi       = {10.63317/4n2sooe4fb2i},
  abstract  = {Automatic evaluation metrics are crucial for advancing sign language translation (SLT). Current SLT evaluation metrics, such as BLEU and ROUGE, are only text-based, and it remains unclear to what extent text-based metrics can reliably capture the quality of SLT outputs. To address this gap, we investigate the limitations of text-based SLT evaluation metrics by analyzing six metrics, including BLEU, chrF, and ROUGE, as well as BLEURT on the one hand, and large language model (LLM)-based evaluators such as G-Eval and GEMBA zero-shot direct assessment on the other hand. Specifically, we assess the consistency and robustness of these metrics under three controlled conditions: paraphrasing, hallucinations in model outputs, and variations in sentence length. Our analysis highlights the limitations of lexical overlap metrics and demonstrates that while LLM-based evaluators better capture semantic equivalence often missed by conventional metrics, they can also exhibit bias toward LLM-paraphrased translations. Moreover, although all metrics are able to detect hallucinations, BLEU tends to be overly sensitive, whereas BLEURT and LLM-based evaluators are comparatively lenient toward subtle cases. This motivates the need for multimodal evaluation frameworks that extend beyond text-based metrics to enable a more holistic assessment of SLT outputs.}
}

@inproceedings{klezovich-etal-2026-enough:lrec,
  author    = {Klezovich, Anna and Mesch, Johanna and Henter, Gustav Eje and Beskow, Jonas},
  title     = {How Much Data Is Enough Data? A New Motion Capture Corpus for Probabilistic Sign Language Generation},
  pages     = {9549--9558},
  editor    = {Piperidis, Stelios and Bel, N{\'u}ria and van den Heuvel, Henk and Ide, Nancy and Krek, Simon and Toral, Antonio},
  booktitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {11--16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-49-4},
  language  = {english},
  url       = {https://lrec.elra.info/lrec2026-main-750},
  doi       = {10.63317/5pmyrs7f9o33},
  abstract  = {We present a new 4.1 hours long high-quality motion capture sign language dataset for Swedish Sign Language --- STS Mocap v1. The dataset consists of high quality multimodal data: body tracked with markers, fingers tracked with Manus Quantum Metagloves, face tracked with iPhone LiveLink app in MetaHuman Animator mode, and corresponding textual sentence translation to spoken Swedish. With the help of this dataset, we show that four hours of motion capture data is enough for generative modeling of sign language conditioned on 2D pose. In comparison, training the same flow-matching model on only 30 minutes of this data, which is a common size for sign language motion capture datasets, shows a significant degradation in the quality of the synthesized data.}
}

@inproceedings{sevilla-lahozbengoechea-2026-multiband:lrec,
  author    = {Sevilla, Antonio F. G. and Lahoz-Bengoechea, Jos{\'e} Mar{\'i}a},
  title     = {Decomposing Sign Language Movements: A Multi-Band Visualization Method for Articulatory Analysis},
  pages     = {9559--9568},
  editor    = {Piperidis, Stelios and Bel, N{\'u}ria and van den Heuvel, Henk and Ide, Nancy and Krek, Simon and Toral, Antonio},
  booktitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {11--16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-49-4},
  language  = {english},
  url       = {https://lrec.elra.info/lrec2026-main-751},
  doi       = {10.63317/32sdurbs4fio},
  abstract  = {Understanding the structure of sign language movements requires methods that can isolate and analyze the hierarchical and simultaneous nature of sign articulation. We present a method for tracking and visualizing sign language movements that progressively isolates dependent movements within the articulatory chain: hand rotation from arm displacement and finger movement from hand movement. Using MediaPipe hand tracking on ordinary 2D video, we decompose motion into separate gestural components and compute velocity and direction for each articulator. We present these movement channels in a time-aligned multi-band visualization that reveals temporal structure, bimanual synchronization patterns, and the coordination of different articulatory components. An interactive web-based viewer synchronizes the visualization with video, enabling researchers to efficiently explore movement patterns and their relationship to signing. We demonstrate the method with examples from isolated signs and continuous signing, showing how it reveals patterns that are difficult to observe in raw video, including bimanual coordination, internal movements, and the distinction between linguistic and non-linguistic segments. This approach provides accessible tools for empirical investigation of rhythmic and prosodic patterns in sign languages.}
}

@inproceedings{kozhirbayev-imashev-2026-llm:lrec,
  author    = {Kozhirbayev, Zhanibek and Imashev, Alfarabi},
  title     = {Evaluating Large Language Models for Text-to-Gloss Translation in {Kazakh-Russian} {Sign} {Language}: A Pilot Study},
  pages     = {9964--9972},
  editor    = {Piperidis, Stelios and Bel, N{\'u}ria and van den Heuvel, Henk and Ide, Nancy and Krek, Simon and Toral, Antonio},
  booktitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {11--16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-49-4},
  language  = {english},
  url       = {https://lrec.elra.info/lrec2026-main-781},
  doi       = {10.63317/2ikts3xaqget},
  abstract  = {Conceptual glossing involves a systematic linguistic transformation in which the models must preserve meaning, grammatical integrity, and punctuation while turning the real language into a more structured structure. The purpose of this study is to assess the accuracy and dependability of glosses produced by these models by juxtaposing them with human-annotated standards, investigating whether the models maintain essential linguistic characteristics. By identifying the strengths and weaknesses of each model, we want to determine which architectures are most suitable for organized language tasks, such as glossing. This may reduce the manual labor required for linguistic annotation by experts while maintaining superior quality outcomes. And help deaf signers with weak reading skills interpret written paragraphs into glosses, making them more comprehensible and naturally looking to them. Text-to-gloss translation converts written or spoken language into sign language glosses, enhancing accessibility for the Deaf and Hard of Hearing (DHH) community. This pilot study evaluates four large language models (LLMs): GPT-4-turbo, Grok 3, Deepseek-V3, and Gemini 20 Flash to generate conceptual glosses in Kazakh-Russian Sign Language (K-RSL), still an under-resourced sign language. Using a dataset of 250 Russian sentences with expert-annotated K-RSL glosses, we assess performance across METEOR, BLEU, BERTScore, and WER. Results show Deepseek-V3 excels on complex texts (METEOR: 0.426 for K-RSL word order, 0.377 for fairytale paragraphs), while Gemini 20 Flash performs strongly on short sentences (METEOR: 0.602). These findings demonstrate LLMs' potential to automate gloss production, reducing manual annotation and aiding DHH individuals with reading comprehension. Challenges include K-RSL's unique grammar and limited datasets. This is the first study to apply LLMs to K-RSL glossing and examine the potential efficacy of autonomous gloss production.}
}

@inproceedings{saha-etal-2026-banglasl:lrec,
  author    = {Saha, Neelavro and Shahriyar, Rafi and Roudra, Nafis Ashraf and Sakib, Saadman and Rasel, Annajiat Alim},
  title     = {Introducing a {Bangla} {Sentence--Gloss} Pair Dataset for {Bangla} Sign Language Translation and Research},
  pages     = {10457--10466},
  editor    = {Piperidis, Stelios and Bel, N{\'u}ria and van den Heuvel, Henk and Ide, Nancy and Krek, Simon and Toral, Antonio},
  booktitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {11--16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-49-4},
  language  = {english},
  url       = {https://lrec.elra.info/lrec2026-main-820},
  doi       = {10.63317/38qenrwzegr9},
  abstract  = {Bangla Sign Language (BdSL) translation represents a low-resource NLP task due to the lack of large-scale datasets that address sentence-level translation. Correspondingly, existing research in this field has been limited to word and alphabet level detection. In this work, we introduce Bangla-SGP, a novel parallel dataset consisting of 1,000 human-annotated sentence--gloss pairs which was augmented with around 3,000 synthetically generated pairs using syntactic and morphological rules through a rule-based Retrieval-Augmented Generation (RAG) pipeline. The gloss sequences of the spoken Bangla sentences are made up of individual glosses which are Bangla sign supported words and serve as an intermediate representation for a continuous sign. Our dataset consists of 1000 high quality Bangla sentences that are manually annotated into a gloss sequence by a professional signer. The augmentation process incorporates rule-based linguistic strategies and prompt engineering techniques that we have adopted by critically analyzing our human annotated sentence-gloss pairs and by working closely with our professional signer. Furthermore, we fine-tune several transformer-based models such as mBart50, Google mT5, GPT4.1-nano and evaluate their sentence-to-gloss translation performance using BLEU scores, based on these evaluation metrics we compare the model's gloss-translation consistency across our dataset and the RWTH-PHOENIX-2014T benchmark.}
}

