Sign language is more than just moving the fingers or hands; it is a visual language in which non manual gestures play a very important role. Recently, a growing body of research has paid increasing attention to the development of signing avatars endowed with a set of facial expressions in order to perform the actual functioning of the sign language, and gain wider acceptance by deaf users. In this paper, we propose an effective method to generate facial expressions for signing avatars basing on the physics-based muscle model. The main focus of our work is to automate the task of the muscle mapping on the face model in the correct anatomical positions and the detection of the jaw part by using a small set of MPEG-4 Feature Points of the given mesh.
@inproceedings{bouzid:14008:sign-lang:lrec,
author = {Bouzid, Yosra},
title = {Synthesizing facial expressions for sign language avatars},
pages = {11--18},
editor = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
booktitle = {Proceedings of the {LREC2014} 6th Workshop on the Representation and Processing of Sign Languages: Beyond the Manual Channel},
maintitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
publisher = {{European Language Resources Association (ELRA)}},
address = {Reykjavik, Iceland},
day = {31},
month = may,
year = {2014},
language = {english},
url = {https://www.sign-lang.uni-hamburg.de/lrec/pub/14008.pdf}
}