@inproceedings{halbout:24024:sign-lang:lrec,
  author    = {Halbout, Julie and Fabre, Diandra and Ouakrim, Yanis and Lascar, Julie and Braffort, Annelies and Gouiff{\`e}s, Mich{\`e}le and Beautemps, Denis},
  title     = {{Matignon-LSF}: a Large Corpus of Interpreted {French} {Sign} {Language}},
  pages     = {202--208},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
  maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
  publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
  address   = {Torino, Italy},
  day       = {25},
  month     = may,
  year      = {2024},
  isbn      = {978-2-493814-30-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24024.html},
  abstract  = {In this paper we present Matignon-LSF, the first dataset of interpreted French Sign Language (LSF) and one of the largest LSF dataset available for research to date. This is a dataset of live interpreted LSF during public speeches by the French government. The dataset comprises 39 hours of LSF videos with French language audio and corresponding subtitles. In addition to this data, we offer pre-computed video features (I3D). We provide a detailed analysis of the proposed dataset as well as some experimental results to demonstrate the interest of this novel dataset.}
}

@inproceedings{martinod:24038:sign-lang:lrec,
  author    = {Martinod, Emmanuella and Filhol, Michael},
  title     = {Formal Representation of Interrogation in {French} {Sign} {Language}},
  pages     = {307--315},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
  maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
  publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
  address   = {Torino, Italy},
  day       = {25},
  month     = may,
  year      = {2024},
  isbn      = {978-2-493814-30-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24038.html},
  abstract  = {This paper concerns the marking of interrogation in French Sign Language (LSF). Early work on Sign Languages (SLs) underlined the role of non-manual elements in the production of interrogatives. Studies often point to the role of eyebrows depending on the type of question: eyebrows would usually be raised for the production of yes/no questions, while they would be lowered for other types of questions. For LSF, previous studies seem to validate this contrast. We tested this thoroughly in the framework of AZee, a formal approach to SL modeling based on the identification of linguistic associations between forms and identified meanings, called production rules. We present our methodology to extract AZee production rules, consisting of data searches alternating form and meaning criteria gradually converging to strong associations, ultimately leading to production rules. Our results (i) show no link between raised or lowered eyebrows and a specific type of question, (ii) highlight instead the role of another non-manual marker: the advancement of the chin. However, since eyebrows remain frequently involved in the analyzed questions (all types included), we intend to further focus on the potential role of the signer's expectations while formulating his request.}
}

@inproceedings{sharma:24041:sign-lang:lrec,
  author    = {Sharma, Paritosh and Challant, Camille and Filhol, Michael},
  title     = {Facial Expressions for Sign Language Synthesis using {FACSHuman} and {AZee}},
  pages     = {334--340},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
  maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
  publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
  address   = {Torino, Italy},
  day       = {25},
  month     = may,
  year      = {2024},
  isbn      = {978-2-493814-30-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24041.html},
  abstract  = {This paper presents an approach to synthesising facial expressions on signing avatars. We implement those generated by a recently proposed set of rules formalised in the AZee framework for LSF. Our methodology combines computer vision, linguistic insights, and morph target animation to address the challenges posed by the synthesis of nuanced facial expressions, which are pivotal for conveying emotions and grammatical cues in sign language. By implementing a set of universally applicable morphs and incorporating these advancements into our animation system, we aim to improve the realism and expressiveness of signing avatars. Our findings suggest an enhancement in the synthesis of non-manual signals, which extends to multiple avatars. This work opens new avenues for future research, including the exploration of more sophisticated facial modelling techniques and the potential integration of facial motion capture data to refine the animation of facial expressions further.}
}

@inproceedings{martinod:22014:sign-lang:lrec,
  author    = {Martinod, Emmanuella and Danet, Claire and Filhol, Michael},
  title     = {Two New {AZee} Production Rules Refining Multiplicity in {French} {Sign} {Language}},
  pages     = {132--138},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2022} 10th Workshop on the Representation and Processing of Sign Languages: Multilingual Sign Language Resources},
  maintitle = {13th International Conference on Language Resources and Evaluation ({LREC} 2022)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {25},
  month     = jun,
  year      = {2022},
  isbn      = {979-10-95546-86-3},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/22014.html},
  abstract  = {This paper is a contribution to sign language (SL) modeling. We focus on the hitherto imprecise notion of "Multiplicity", assumed to express plurality in French Sign Language (LSF), using AZee approach. AZee is a linguistic and formal approach to modeling LSF. It takes into account the linguistic properties and specificities of LSF while respecting constraints linked to a modeling process. We present the methodology to extract AZee production rules. Based on the analysis of strong form-meaning associations in SL data (elicited image descriptions and short news), we identified two production rules structuring the expression of multiplicity in LSF. We explain how these newly extracted production rules are different from existing ones. Our goal is to refine the AZee approach to allow the coverage of a growing part of LSF. This work could lead to an improvement in SL synthesis and SL automatic translation.}
}

@inproceedings{sharma:70018:sltat:lrec,
  author    = {Sharma, Paritosh and Filhol, Michael},
  title     = {Multi-track Bottom-Up Synthesis from Non-Flattened {AZee} Scores},
  pages     = {103--108},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and McDonald, John C. and Shterionov, Dimitar and Wolfe, Rosalee},
  booktitle = {Proceedings of the 7th International Workshop on Sign Language Translation and Avatar Technology: The Junction of the Visual and the Textual: Challenges and Perspectives},
  maintitle = {13th International Conference on Language Resources and Evaluation ({LREC} 2022)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {24},
  month     = jun,
  year      = {2022},
  isbn      = {979-10-95546-82-5},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/2022.sltat-1.16.html},
  abstract  = {We present an algorithm to improve the pre-existing bottom-up animation system for AZee descriptions to synthesize sign language utterances. Our algorithm allows us to synthesize AZee descriptions by preserving the dynamics of underlying blocks. This bottom-up approach aims to deliver procedurally generated animations capable of generating any sign language utterance if an equivalent AZee description exists. The proposed algorithm is built upon the modules of an open-source animation toolkit and takes advantage of the integrated inverse kinematics solver and a non-linear editor.}
}

