@inproceedings{vangemert:70017:sltat:lrec,
  author    = {Van Gemert, Britt and Cokart, Richard and Esselink, Lyke and De Meulder, Maartje and Sijm, Nienke and Roelofsen, Floris},
  title     = {First Steps Towards a Signing Avatar for Railway Travel Announcements in the {Netherlands}},
  pages     = {109--116},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and McDonald, John C. and Shterionov, Dimitar and Wolfe, Rosalee},
  booktitle = {Proceedings of the 7th International Workshop on Sign Language Translation and Avatar Technology: The Junction of the Visual and the Textual: Challenges and Perspectives},
  maintitle = {13th International Conference on Language Resources and Evaluation ({LREC} 2022)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {24},
  month     = jun,
  year      = {2022},
  isbn      = {979-10-95546-82-5},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/2022.sltat-1.17.html},
  abstract  = {This paper presents first steps towards a sign language avatar for communicating railway travel announcements in Dutch Sign Language. Taking an interdisciplinary approach, it demonstrates effective ways to employ co-design and focus group methods in the context of developing sign language technology, and presents several concrete findings and results obtained through co-design and focus group sessions which have not only led to improvements of our own prototype but may also inform the development of signing avatars for other languages and in other application domains.}
}

@inproceedings{skobov:20001:sign-lang:lrec,
  author    = {Skobov, Victor and Lepage, Yves},
  title     = {{Video-to-HamNoSys} Automated Annotation System},
  pages     = {209--216},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2020} 9th Workshop on the Representation and Processing of Sign Languages: Sign Language Resources in the Service of the Language Community, Technological Challenges and Application Perspectives},
  maintitle = {12th International Conference on Language Resources and Evaluation ({LREC} 2020)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {16},
  month     = may,
  year      = {2020},
  isbn      = {979-10-95546-54-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/20001.html},
  abstract  = {The Hamburg Notation System (HamNoSys) was developed for movement annotation of any sign language (SL) and can be used to produce signing animations for a virtual avatar with the JASigning platform. This provides the potential to use HamNoSys, i.e., strings of characters, as a representation of an SL corpus instead of video material. Processing strings of characters instead of images can significantly contribute to sign language research. However, the complexity of HamNoSys makes it difficult to annotate without a lot of time and effort. Therefore annotation has to be automatized. This work proposes a conceptually new approach to this problem. It includes a new tree representation of the HamNoSys grammar that serves as a basis for the generation of grammatical training data and classification of complex movements using machine learning. Our automatic annotation system relies on HamNoSys grammar structure and can potentially be used on already existing SL corpora. It is retrainable for specific settings such as camera angles, speed, and gestures. Our approach is conceptually different from other SL recognition solutions and offers a developed methodology for future research.}
}

@inproceedings{efthimiou:12025:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Glauert, John and Bowden, Richard and Braffort, Annelies and Collet, Christophe and Maragos, Petros and Lefebvre-Albaret, Fran{\c c}ois},
  title     = {Sign Language technologies and resources of the {Dicta-Sign} project},
  pages     = {37--44},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2012} 5th Workshop on the Representation and Processing of Sign Languages: Interactions between Corpus and Lexicon},
  maintitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {27},
  month     = may,
  year      = {2012},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/12025.html},
  abstract  = {Here we present the outcomes of Dicta-Sign FP7-ICT project. Dicta-Sign researched ways to enable communication between Deaf individuals through the development of human-computer interfaces (HCI) for Deaf users, by means of Sign Language. It has researched and developed recognition and synthesis engines for sign languages (SLs) that have brought sign recognition and generation technologies significantly closer to authentic signing. In this context, Dicta-Sign has developed several technologies demonstrated via a sign language aware Web 2.0, combining work from the fields of sign language recognition, sign language animation via avatars and sign language resources and language models development, with the goal of allowing Deaf users to make, edit, and review avatar-based sign language contributions online, similar to the way people nowadays make text-based contributions on the Web.}
}

@inproceedings{efthimiou:10027:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Glauert, John and Bowden, Richard and Braffort, Annelies and Collet, Christophe and Maragos, Petros and Goudenove, Fran{\c c}ois},
  title     = {{DICTA-SIGN}: Sign Language Recognition, Generation and Modelling with application in Deaf Communication},
  pages     = {80--83},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10027.html},
  abstract  = {Here we present the components and objectives of the EU funded project DICTA-SIGN. Dicta-Sign (http://www.dictasign.eu) is a three-year research project that involves the Institute for Language and Speech Processing, the University of Hamburg, the University of East Anglia, the University of Surrey, LIMSI/CNRS, the Universit{\'e} Paul Sabatier, the National Technical University of Athens, and WebSourd. It aims to improve the state of web-based communication for Deaf people by allowing the use of sign language in various human-computer interaction scenarios. It researches and develops recognition and synthesis engines for signed languages, aiming at a level of detail necessary for recognizing and generating authentic signing. In this context, Dicta-Sign aims at developing several technologies demonstrated via a sign language-aware Web 2.0. 
\par
Dicta-Sign supports four European sign languages: Greek. British, German, and French Sign Language and differs from previous work in that it aims to integrate tightly recognition, animation, and machine translation. All these components are informed by appropriate linguistic models from the ground up, including phonology, grammar, and non-manual features. 
\par
Expected outputs of the project include:\begin{itemize}\item A parallel multi-lingual corpus for four national sign languages - German, British, French and Greek (DGS, BSL, LSF and GSL respectively),\item A substantial multilingual dictionary of at least 1000 signs for each represented sign language,\item A continuous sign language recognition system that achieves significant improvement in terms of coverage and accuracy of sign recognition in comparison with current technology; furthermore this system will research the novel directions of multimodal sign fusion and signer adaptation,\item A language generation and synthesis component, covering in detail the role of manual, non-manual and placement within signing space,\item Annotation tools which incorporate these technologies providing access to the corpus and whose long term utility can be judged by the up-take by other sign language researchers,\item Three bidirectional integrated prototype systems which show the utility of the system components beyond the annotation tools application,\item A showcase demonstrator which exhibits how integration of the different components can support user communication needs.\end{itemize}}
}

@inproceedings{elliott:10035:sign-lang:lrec,
  author    = {Elliott, Ralph and Bueno, Javier and Kennaway, Richard and Glauert, John},
  title     = {Towards the Integration of Synthetic {SL} Animation with Avatars into Corpus Annotation Tools},
  pages     = {84--87},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10035.html},
  abstract  = {We outline the main features of our synthetic virtual human sign language system, JASigning.  We describe how we have extended its input notation, SiGML, to allow explicit control of performance time, and we describe our initial steps on the path to integrating virtual human sign language performance into annotation tools, where it may be compared with video depicting the corresponding real human performance.}
}

@inproceedings{goulas:10008:sign-lang:lrec,
  author    = {Goulas, Theodoros and Fotinea, Stavroula-Evita and Efthimiou, Eleni and Pissaris, Michalis},
  title     = {{SiS-Builder}: A Sign Synthesis Support Tool},
  pages     = {102--105},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10008.html},
  abstract  = {Here we will present research work performed in the framework of the DICTA-SIGN project, closely related to Sign Language Synthesis and Animation and especially intended to cover for the need of creating lexical resources and evaluating them when performed by a signing avatar. Along these lines, a tool has been created to automatically generate SiGML transcriptions of any given HamNoSys string, as well as the relevant transcription file, by providing the HamNoSys characters of a sign.
\par
Users are allowed to create a phrase of up to 4 sign units, by introducing the corresponding HamNoSys notations in a sequence of appropriate fields. The related xml script is then automatically created, allowing also for on demand storage on the server.
\par
The here reported tool is web based, accessible by everyone, and allows users to interact with it without any special installations on the client side. Users may register, but this is not mandatory for use of the tool. Registered members can save their created scripts on the server in contrary to the non registered ones. Online and offline manuals are available to the users as well as a FAQ facility. 
\par
As regards further tool functionalities, users are also enabled to see the HamNoSys notation of a sign chosen from a validated list of lemmas or by entering the raw xml script in the proper field. Users are able to switch between SiGML data and HamNoSys notations on an instant by just selecting the wished function. This way, it is possible to test/ see the results of creation of a lexical item, either by consulting the HamNoSys sequence, for those familiar with the HamNoSys syntax, or by animating the results through the avatar with the use of the SiGML script.
\par
Users can create HamNoSys sequences by choosing the proper selection on the menu. This is a new feature, enhancing the tool's functionalities, added -upon completion of an evaluation phase- to allow users to create HamNoSys strings on line and then proceed with automatic creation of the corresponding SiGML scripts.
\par
Furthermore, along with the HamNoSys manual characters, users may add non manual characters to the creation of the SiGML script. If needed, users may add more than one movement of a particular body part, i.e. head or shoulders, to make animation look closer to natural signing. 
\par
The final step is creation of the script. The user is then able to copy and paste the script to the avatar page to visualise the results of the created sequence, the latter been performed by the avatar.
\par
Registered users are able to maintain/modify the data created by them. 
\par
The tool is based on open source internet technologies for easy access and compatibility. Technologies that have been used are mostly "php" and "java script".}
}

@inproceedings{jennings:10011:sign-lang:lrec,
  author    = {Jennings, Vince and Elliott, Ralph and Kennaway, Richard and Glauert, John},
  title     = {Requirements for a Signing Avatar},
  pages     = {133--136},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10011.html},
  abstract  = {We present the technical specification for an avatar that is compliant with Animgen, the synthetic signing engine used at the University of East Anglia for generating deaf signing animations. The specification will include both the basic definition required for any standard animating avatar, and the additional parameters that Animgen requires to generate signing. Avatars compatible with Animgen are created using the ARPToolkit, an application developed at UEA that has a plug-in architecture for tools that are used for rigging an avatar mesh for animation. The toolkit also generates the additional data needed by Animgen for each avatar.}
}

@inproceedings{morrissey:10032:sign-lang:lrec,
  author    = {Morrissey, Sara and Somers, Harold and Smith, Robert and Gilchrist, Shane and Dandapat, Sandipan},
  title     = {Building Sign Language Corpora for Use in Machine Translation},
  pages     = {172--177},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10032.html},
  abstract  = {In recent years data-driven methods of machine translation (MT) have overtaken rule-based approaches as the predominant means of automatically translating between languages. A pre-requisite for such an approach is a parallel corpus of the source and target languages. Technological developments in sign language (SL) capturing, analysis and processing tools now mean that SL corpora are becoming increasingly available. With transcription and language analysis tools being mainly designed and used for linguistic purposes, we describe the process of creating a multimedia parallel corpus specifically for the purposes of English to Irish Sign Language (ISL) MT. As part of our larger project on localisation, our research is focussed on developing assistive technology for patients with limited English in the domain of healthcare. 
\par
Focussing on the first point of contact a patient has with a GP{\'i}s office, the medical secretary, we sought to develop a corpus from the dialogue between the two parties when scheduling an appointment. Throughout the development process we have created one parallel corpus in six different modalities from this initial dialogue, namely English speech, English text, ISL videos, Bangla text, HamNoSys transcription and SiGML code. In this paper we discuss the multi-stage process of the development of this parallel corpus as individual and interdependent entities, both for our own MT purposes and their usefulness in the wider MT and SL research domains.}
}

