@inproceedings{sharma:24041:sign-lang:lrec,
  author    = {Sharma, Paritosh and Challant, Camille and Filhol, Michael},
  title     = {Facial Expressions for Sign Language Synthesis using {FACSHuman} and {AZee}},
  pages     = {334--340},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
  maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
  publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
  address   = {Torino, Italy},
  day       = {25},
  month     = may,
  year      = {2024},
  isbn      = {978-2-493814-30-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24041.html},
  abstract  = {This paper presents an approach to synthesising facial expressions on signing avatars. We implement those generated by a recently proposed set of rules formalised in the AZee framework for LSF. Our methodology combines computer vision, linguistic insights, and morph target animation to address the challenges posed by the synthesis of nuanced facial expressions, which are pivotal for conveying emotions and grammatical cues in sign language. By implementing a set of universally applicable morphs and incorporating these advancements into our animation system, we aim to improve the realism and expressiveness of signing avatars. Our findings suggest an enhancement in the synthesis of non-manual signals, which extends to multiple avatars. This work opens new avenues for future research, including the exploration of more sophisticated facial modelling techniques and the potential integration of facial motion capture data to refine the animation of facial expressions further.}
}

@inproceedings{sharma:70018:sltat:lrec,
  author    = {Sharma, Paritosh and Filhol, Michael},
  title     = {Multi-track Bottom-Up Synthesis from Non-Flattened {AZee} Scores},
  pages     = {103--108},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and McDonald, John C. and Shterionov, Dimitar and Wolfe, Rosalee},
  booktitle = {Proceedings of the 7th International Workshop on Sign Language Translation and Avatar Technology: The Junction of the Visual and the Textual: Challenges and Perspectives},
  maintitle = {13th International Conference on Language Resources and Evaluation ({LREC} 2022)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {24},
  month     = jun,
  year      = {2022},
  isbn      = {979-10-95546-82-5},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/2022.sltat-1.16.html},
  abstract  = {We present an algorithm to improve the pre-existing bottom-up animation system for AZee descriptions to synthesize sign language utterances. Our algorithm allows us to synthesize AZee descriptions by preserving the dynamics of underlying blocks. This bottom-up approach aims to deliver procedurally generated animations capable of generating any sign language utterance if an equivalent AZee description exists. The proposed algorithm is built upon the modules of an open-source animation toolkit and takes advantage of the integrated inverse kinematics solver and a non-linear editor.}
}

