@inproceedings{duarte:10020:sign-lang:lrec,
  author    = {Duarte, Kyle and Gibet, Sylvie},
  title     = {Corpus Design for Signing Avatars},
  pages     = {73--75},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10020.html},
  abstract  = {The SignCom project uses motion capture (mocap) data to animate a virtual French Sign Language (LSF) signer.  An important part of any signing avatar project is to ensure that a computer animation engine has a large quantity of interesting and on-topic signs from which to build novel signing sequences.  In this article, we detail the process of selecting an adequate range of signs and situations to be included in our corpus: from controlling discourse topic to including signs that can accept modified movements or handshapes, we describe how an avatar corpus has a different motivation than traditional signed language corpora.}
}

@inproceedings{duarte-gibet-2010-heterogeneous:lrec,
  author    = {Duarte, Kyle and Gibet, Sylvie},
  title     = {Heterogeneous Data Sources for Signed Language Analysis and Synthesis: The {SignCom} Project},
  pages     = {461--468},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Maegaard, Bente and Mariani, Joseph and Odijk, Jan and Piperidis, Stelios and Rosner, Mike and Tapias, Daniel},
  booktitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {17--23},
  month     = may,
  year      = {2010},
  isbn      = {978-2-9517408-6-0},
  language  = {english},
  url       = {https://aclanthology.org/L10-1289},
  abstract  = {This paper describes how heterogeneous data sources captured in the SignCom project may be used for the analysis and synthesis of French Sign Language (LSF) utterances. The captured data combine video data and multimodal motion capture (mocap) data, including body and hand movements as well as facial expressions. These data are pre-processed, synchronized, and enriched by text annotations of signed language elicitation sessions. The addition of mocap data to traditional data structures provides additional phonetic data to linguists who desire to better understand the various parts of signs (handshape, movement, orientation, etc.) to very exacting levels, as well as their interactions and relative timings. We show how the phonologies of hand configurations and articulator movements may be studied using signal processing and statistical analysis tools to highlight regularities or temporal schemata between the different modalities. Finally, mocap data allows us to replay signs using a computer animation engine, specifically editing and rearranging movements and configurations in order to create novel utterances.}
}

