@inproceedings{dreuw:10001:sign-lang:lrec,
  author    = {Dreuw, Philippe and Forster, Jens and Gweth, Yannick and Stein, Daniel and Ney, Hermann and Mart{\'i}nez Ruiz, Gregorio and Verges Llahi, Jaume and Crasborn, Onno and Ormel, Ellen and Du, Wei and Hoyoux, Thomas and Piater, Justus and Moya Lazaro, Jos{\'e} Miguel and Wheatley, Mark},
  title     = {{SignSpeak} - Understanding, Recognition, and Translation of Sign Languages},
  pages     = {65--72},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10001.html},
  abstract  = {The SignSpeak project will be the first step to approach sign language recognition and translation at a scientific level already reached in similar research fields such as automatic speech recognition or statistical machine translation of spoken languages. Deaf communities revolve around sign languages as they are their natural means of communication. Although deaf, hard of hearing and hearing signers can communicate without problems amongst themselves, there is a serious challenge for the deaf community in trying to integrate into educational, social and work environments. The overall goal of SignSpeak is to develop a new vision-based technology for recognizing and translating continuous sign language to text. New knowledge about the nature of sign language structure from the perspective of machine recognition of continuous sign language will allow a subsequent breakthrough in the development of a new vision-based technology for continuous sign language recognition and translation. Existing and new publicly available corpora will be used to evaluate the research progress throughout the whole project.}
}

@inproceedings{piater:10052:sign-lang:lrec,
  author    = {Piater, Justus and Hoyoux, Thomas and Du, Wei},
  title     = {Video Analysis for Continuous Sign Language Recognition},
  pages     = {192--195},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10052.html},
  abstract  = {The recognition of continuous, natural signing is very challenging due to the multimodal nature of the visual cues (fingers, lips, facial expressions, body pose, etc.), as well as technical limitations such as spatial and temporal resolution and unreliable depth cues.  On the other hand, signing gestures are designed to be robustly discernible. We therefore argue in favor of an integrative approach to sign language recognition that aims to extract sufficient aggregate information for robust sign language recognition, even if many of the individual cues are unreliable.  Our strategy to implement such an integrated system currently rests on two modules, for which we will show initial results. The first module uses active appearance models for detailed face tracking, allowing the quantification of facial expressions such as mouth and eye aperture and eyebrow raise. The second module is dedicated to hand tracking using color and appearance.  A third module will be concerned with tracking upper-body articulated pose, linking the face to the hands for increased overall robustness.}
}

@inproceedings{forster-etal-2012-rwth:lrec,
  author    = {Forster, Jens and Schmidt, Christoph and Hoyoux, Thomas and Koller, Oscar and Zelle, Uwe and Piater, Justus and Ney, Hermann},
  title     = {{RWTH}-{PHOENIX}-{Weather}: A Large Vocabulary Sign Language Recognition and Translation Corpus},
  pages     = {3785--3789},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Do{\u g}an, Mehmet U{\u g}ur and Maegaard, Bente and Mariani, Joseph and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios},
  booktitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {21--27},
  month     = may,
  year      = {2012},
  isbn      = {978-2-9517408-7-7},
  language  = {english},
  url       = {https://aclanthology.org/L12-1503},
  abstract  = {This paper introduces the RWTH-PHOENIX-Weather corpus, a video-based, large vocabulary corpus of German Sign Language suitable for statistical sign language recognition and translation. In contrastto most available sign language data collections, the RWTH-PHOENIX-Weather corpus has not been recorded for linguistic research but for the use in statistical pattern recognition. The corpus contains weather forecasts recorded from German public TV which are manually annotated using glosses distinguishing sign variants, and time boundaries have been marked on the sentence and the gloss level. Further, the spoken German weather forecast has been transcribed in a semi-automatic fashion using a state-of-the-art automatic speech recognition system. Moreover, an additional translation of the glosses into spoken German has been created to capture allowable translation variability. In addition to the corpus, experimental baseline results for hand and head tracking, statistical sign language recognition and translation are presented.}
}

@inproceedings{dreuw-etal-2010-signspeak:lrec,
  author    = {Dreuw, Philippe and Ney, Hermann and Mart{\'i}nez Ruiz, Gregorio and Crasborn, Onno and Piater, Justus and Moya Lazaro, Jos{\'e} Miguel and Wheatley, Mark},
  title     = {The {SignSpeak} Project - Bridging the Gap Between Signers and Speakers},
  pages     = {476--481},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Maegaard, Bente and Mariani, Joseph and Odijk, Jan and Piperidis, Stelios and Rosner, Mike and Tapias, Daniel},
  booktitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {17--23},
  month     = may,
  year      = {2010},
  isbn      = {978-2-9517408-6-0},
  language  = {english},
  url       = {https://aclanthology.org/L10-1238},
  abstract  = {The SignSpeak project will be the first step to approach sign language recognition and translation at a scientific level already reached in similar research fields such as automatic speech recognition or statistical machine translation of spoken languages. Deaf communities revolve around sign languages as they are their natural means of communication. Although deaf, hard of hearing and hearing signers can communicate without problems amongst themselves, there is a serious challenge for the deaf community in trying to integrate into educational, social and work environments. The overall goal of SignSpeak is to develop a new vision-based technology for recognizing and translating continuous sign language to text. New knowledge about the nature of sign language structure from the perspective of machine recognition of continuous sign language will allow a subsequent breakthrough in the development of a new vision-based technology for continuous sign language recognition and translation. Existing and new publicly available corpora will be used to evaluate the research progress throughout the whole project.}
}

