@inproceedings{wolfe:18023:sign-lang:lrec,
  author    = {Wolfe, Rosalee and Hanke, Thomas and Langer, Gabriele and Jahn, Elena and Worseck, Satu and Bleicken, Julian and McDonald, John C. and Johnson, Sarah},
  title     = {Exploring Localization for Mouthings in Sign Language Avatars},
  pages     = {207--212},
  editor    = {Bono, Mayumi and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna and Osugi, Yutaka},
  booktitle = {Proceedings of the {LREC2018} 8th Workshop on the Representation and Processing of Sign Languages: Involving the Language Community},
  maintitle = {11th International Conference on Language Resources and Evaluation ({LREC} 2018)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Miyazaki, Japan},
  day       = {12},
  month     = may,
  year      = {2018},
  isbn      = {979-10-95546-01-6},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/18023.html},
  abstract  = {According to the World Wide Web Consortium (W3C), localization is ``the adaptation of a product, application or document content to meet the language, cultural and other requirements of a specific target market''. One requirement necessary for localizing a sign language avatar is creating a capability to produce convincing mouthing. For purposes of this inquiry we make a distinction between mouthings and mouth gesture. The term `mouthings' refers to mouth movements derived from words of a spoken language whereas `mouth gesture' refers to mouth movements not derived from a spoken language.  This effort focuses on the former. The prevalence of mouthings varies across different sign languages and individual signers. Although mouthings occur regularly in most sign languages, their significance and status have been a matter of sometimes heated discussions among sign linguists. However, no matter the theoretical viewpoint one takes on the issue of mouthing, one must acknowledge that for most if not all sign languages, mouthings do occur. If an avatar purports to fully and naturally express any sign language, it must have the capacity to express all aspects of the language, which likely will include mouthings.  Although most avatar systems were created for hearing communities, several technologies have emerged to improve speech recognition for those who are hard-of-hearing or who find themselves in noisy environments.  These were not satisfactory for Deaf communities as they did not portray sign language.  Initial efforts to incorporate mouthing in sign language avatars utilized a mouth picture or viseme for each letter of the International Phonetic Alphabet (IPA), but were hampered by a reliance on blend shapes.  Muscle-based avatars have the advantage of avoiding the limitations of blend shapes. This paper reports on a first step to identify the requirements for extending a muscle-based avatar to incorporate mouthings in multiple sign languages.}
}

@inproceedings{hanke:12028:sign-lang:lrec,
  author    = {Hanke, Thomas and Matthes, Silke and Regen, Anja and Worseck, Satu},
  title     = {Where Does a Sign Start and End? Segmentation of Continuous Signing},
  pages     = {69--74},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2012} 5th Workshop on the Representation and Processing of Sign Languages: Interactions between Corpus and Lexicon},
  maintitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {27},
  month     = may,
  year      = {2012},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/12028.html},
  abstract  = {There are two basic approaches how to segment continuous signing into individual signs:\begin{itemize}\item A sign starts where the preceding one ends (i.e. fluent signing means there are no gaps between signs)\item Transitional movements between signs do not count as part of either sign. Therefore, usually there are gaps between two signs during which the articulators move from the end of one sign to the beginning of the next.\end{itemize}Both approaches have their pros and cons. However, in the context of the DGS Corpus and the Dicta-Sign project the second approach offers advantages for the subsequent processing. Here we investigate how sensitive this approach is with respect to higher video frame rates.}
}

@inproceedings{matthes:12016:sign-lang:lrec,
  author    = {Matthes, Silke and Hanke, Thomas and Regen, Anja and Storz, Jakob and Worseck, Satu and Efthimiou, Eleni and Dimou, Athanasia-Lida and Braffort, Annelies and Glauert, John and Safar, Eva},
  title     = {{Dicta-Sign} -- Building a Multilingual Sign Language Corpus},
  pages     = {117--122},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2012} 5th Workshop on the Representation and Processing of Sign Languages: Interactions between Corpus and Lexicon},
  maintitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {27},
  month     = may,
  year      = {2012},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/12016.html},
  abstract  = {This paper presents the multilingual corpus of four European sign languages compiled in the framework of the Dicta-Sign project. Dicta-Sign researched ways to enable communication between Deaf individuals through the development of human-computer interfaces (HCI) for Deaf users, by means of sign language. Sign language resources were compiled to inform progress in the other research areas within the project, especially video recognition of signs, sign-to-sign translation, linguistic modelling, and sign generation. The aim for the corpus data collection was to achieve as high a level of naturalness as possible with semi-spontaneous utterances under lab conditions. At the same time the elicited data were supposed to be semantically close enough to be comparable both across individual informants and for all four sign languages. The sign language data were annotated using iLex and are now made available via a web portal that allows for different access options to the data.}
}

