This paper presents an approach to synthesising facial expressions on signing avatars. We implement those generated by a recently proposed set of rules formalised in the AZee framework for LSF. Our methodology combines computer vision, linguistic insights, and morph target animation to address the challenges posed by the synthesis of nuanced facial expressions, which are pivotal for conveying emotions and grammatical cues in sign language. By implementing a set of universally applicable morphs and incorporating these advancements into our animation system, we aim to improve the realism and expressiveness of signing avatars. Our findings suggest an enhancement in the synthesis of non-manual signals, which extends to multiple avatars. This work opens new avenues for future research, including the exploration of more sophisticated facial modelling techniques and the potential integration of facial motion capture data to refine the animation of facial expressions further.
@inproceedings{sharma:24041:sign-lang:lrec,
author = {Sharma, Paritosh and Challant, Camille and Filhol, Michael},
title = {Facial Expressions for Sign Language Synthesis using {FACSHuman} and {AZee}},
pages = {334--340},
editor = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
address = {Torino, Italy},
day = {25},
month = may,
year = {2024},
isbn = {978-2-493814-30-2},
language = {english},
url = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24041.pdf}
}