We present an algorithm to improve the pre-existing bottom-up animation system for AZee descriptions to synthesize sign language utterances. Our algorithm allows us to synthesize AZee descriptions by preserving the dynamics of underlying blocks. This bottom-up approach aims to deliver procedurally generated animations capable of generating any sign language utterance if an equivalent AZee description exists. The proposed algorithm is built upon the modules of an open-source animation toolkit and takes advantage of the integrated inverse kinematics solver and a non-linear editor.
@inproceedings{sharma:70018:sltat:lrec,
author = {Sharma, Paritosh and Filhol, Michael},
title = {Multi-track Bottom-Up Synthesis from Non-Flattened {AZee} Scores},
pages = {103--108},
editor = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and McDonald, John C. and Shterionov, Dimitar and Wolfe, Rosalee},
booktitle = {Proceedings of the 7th International Workshop on Sign Language Translation and Avatar Technology: The Junction of the Visual and the Textual: Challenges and Perspectives},
maintitle = {13th International Conference on Language Resources and Evaluation ({LREC} 2022)},
publisher = {{European Language Resources Association (ELRA)}},
address = {Marseille, France},
day = {24},
month = jun,
year = {2022},
isbn = {979-10-95546-82-5},
language = {english},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/sltat/pdf/2022.sltat-1.16}
}