The goal of our research is to identify computational models of the referential use of signing space and of spatially inflected verb forms for use in American Sign Language (ASL) animations for accessibility applications for deaf users. This paper describes our collection and annotation of an ASL motion-capture corpus to be analyzed for our research. A study was conducted to compare alternative prompting strategies for eliciting single-signer multi-sentential ASL discourse that maximizes the use of pronominal spatial reference yet minimizes the use of classifier predicates, spatially complex ASL phenomena that are not the focus of our current research.
@inproceedings{huenerfauth:10030:sign-lang:lrec,
author = {Huenerfauth, Matt and Lu, Pengfei},
title = {Eliciting Spatial Reference for a Motion-Capture Corpus of {American} {Sign} {Language} Discourse},
pages = {121--124},
editor = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
publisher = {{European Language Resources Association (ELRA)}},
address = {Valletta, Malta},
day = {22--23},
month = may,
year = {2010},
language = {english},
url = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10030.pdf}
}