@inproceedings{mcdonald:24018:sign-lang:lrec,
  author    = {McDonald, John C. and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Wolfe, Rosalee},
  title     = {Multilingual Synthesis of Depictions through Structured Descriptions of Sign: An Initial Case Study},
  pages     = {153--162},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
  maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
  publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
  address   = {Torino, Italy},
  day       = {25},
  month     = may,
  year      = {2024},
  isbn      = {978-2-493814-30-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24018.html},
  abstract  = {Sign language synthesis systems must contend with an enormous variety of possible target languages across the world, and in many locations, such as Europe, the number of sign languages that can be found in a relatively limited geographical area can be surprising. For such a synthesis system to be widely useful, it must not be limited to only one target language. This presents challenges both for the linguistic models and the animation systems that drive these displays. This paper presents a case study for animating discourse in three target languages, French, Greek and German, generated directly from the same base linguistic description. The case study exploits non-lexical constructs in sign, which are more common among sign languages, while providing a first step for synthesizing those aspects that are different. Further, it suggests a possible path forward to exploring whether linguistic structures in one sign language can be exploited in other sign languages, which might be particularly helpful in under-resourced languages.}
}

@inproceedings{picron:24021:sign-lang:lrec,
  author    = {Picron, Frankie and Van Landuyt, Davy and Omardeen, Rehana and Efthimiou, Eleni and Wolfe, Rosalee and Fotinea, Stavroula-Evita and Goulas, Theodoros and Tismer, Christian and Kopf, Maria and Hanke, Thomas},
  title     = {The {EASIER} Mobile Application and Avatar End-User Evaluation Methodology},
  pages     = {175--180},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
  maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
  publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
  address   = {Torino, Italy},
  day       = {25},
  month     = may,
  year      = {2024},
  isbn      = {978-2-493814-30-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24021.html},
  abstract  = {Here we report on the methodological approach adopted for the end-user evaluation studies carried out during the lifecycle of the EASIER project, focusing on the project's mobile app and avatar technologies. Evaluation was performed in two cycles and involved both deaf signers' and hearing sign language (SL) experts' groups from five SLs to provide user feedback, which served as a reference to base the next development steps of the respective EASIER components. With this goal in mind, priorities were (i) to exploit information gathered via focus group discussions after (ii) presenting evaluators with the technological components and related questionnaires fully accessible to signers to maximize feedback and underline the importance of user involvement in the development of the technology.}
}

@inproceedings{schulder:24034:sign-lang:lrec,
  author    = {Schulder, Marc and Bigeard, Sam and Kopf, Maria and Hanke, Thomas and Kuder, Anna and W{\'o}jcicka, Joanna and Mesch, Johanna and Bj{\"o}rkstrand, Thomas and Vacalopoulou, Anna and Vasilaki, Kyriaki and Goulas, Theodoros and Fotinea, Stavroula-Evita and Efthimiou, Eleni},
  title     = {Signs and Synonymity: Continuing Development of the {Multilingual} {Sign} {Language} {Wordnet}},
  pages     = {281--291},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
  maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
  publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
  address   = {Torino, Italy},
  day       = {25},
  month     = may,
  year      = {2024},
  isbn      = {978-2-493814-30-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24034.html},
  abstract  = {The Multilingual Sign Language Wordnet is the first publicly available wordnet resource for sign languages. It is a growing multilingual resource providing data for eight sign languages to date. During the initial phase of its creation, the focus lay on producing the infrastructure to support various languages and to produce initial sets of content for them. This article represents the start of the second phase, in which the focus is moved to establishing overlapping coverage across the different sign languages. Building on the data produced so far, a new feature to assist annotation is introduced which leverages established partial synonymy between signs (inter- and cross-lingually) to discover likely additional synonymies. Other improvements to the annotation interface and workflow build directly on the experiences from the first phase. Working with the updated annotation interface, new data is produced for Polish Sign Language, Greek Sign Language and Swedish Sign Language.}
}

@inproceedings{gavrilescu:24037:sign-lang:lrec,
  author    = {Gavrilescu, Robert and Geraci, Carlo and Mesch, Johanna},
  title     = {Content Questions in Sign Language -- From theory to language description via corpus, experiments, and fieldwork},
  pages     = {298--306},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
  maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
  publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
  address   = {Torino, Italy},
  day       = {25},
  month     = may,
  year      = {2024},
  isbn      = {978-2-493814-30-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24037.html},
  abstract  = {The theory of language structure informs us about what we should expect when we want to investigate a certain construction. However, reality is often richer than what theories predict. In this study, we start from a theoretically informed set of hypotheses about the structure of wh-questions in sign language, we test them using a sign language corpus, a designed production experiment, and structured fieldwork in three sign languages, Swedish, Greek and French Sign Languages. The results will inform us on what type of contribution each research method can provide to reach accurate language descriptions.}
}

@inproceedings{bigeard:22036:sign-lang:lrec,
  author    = {Bigeard, Sam and Schulder, Marc and Kopf, Maria and Hanke, Thomas and Vasilaki, Kyriaki and Vacalopoulou, Anna and Goulas, Theodoros and Dimou, Athanasia-Lida and Fotinea, Stavroula-Evita and Efthimiou, Eleni},
  title     = {Introducing Sign Languages to a Multilingual Wordnet: Bootstrapping Corpora and Lexical Resources of {Greek} {Sign} {Language} and {German} {Sign} {Language}},
  pages     = {9--15},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2022} 10th Workshop on the Representation and Processing of Sign Languages: Multilingual Sign Language Resources},
  maintitle = {13th International Conference on Language Resources and Evaluation ({LREC} 2022)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {25},
  month     = jun,
  year      = {2022},
  isbn      = {979-10-95546-86-3},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/22036.html},
  abstract  = {Wordnets have been a popular lexical resource type for many years. Their sense-based representation of lexical items and numerous relation structures have been used for a variety of computational and linguistic applications. The inclusion of different wordnets into multilingual wordnet networks has further extended their use into the realm of cross-lingual research. Wordnets have been released for many spoken languages. Research has also been carried out into the creation of wordnets for several sign languages, but none have yet resulted in publicly available datasets. This article presents our own efforts towards an inclusion of sign languages in a multilingual wordnet, starting with Greek Sign Language (GSL) and German Sign Language (DGS). Based on differences in available language resources between GSL and DGS, we trial two workflows with different coverage priorities. We also explore how synergies between both workflows can be leveraged and how future work on additional sign languages could profit from building on existing sign language wordnet data. The results of our work are made publicly available.}
}

@inproceedings{dimou:70021:sltat:lrec,
  author    = {Dimou, Athanasia-Lida and Papavassiliou, Vassilis and McDonald, John C. and Goulas, Theodoros and Vasilaki, Kyriaki and Vacalopoulou, Anna and Fotinea, Stavroula-Evita and Efthimiou, Eleni and Wolfe, Rosalee},
  title     = {Signing Avatar Performance Evaluation within {EASIER} Project},
  pages     = {39--44},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and McDonald, John C. and Shterionov, Dimitar and Wolfe, Rosalee},
  booktitle = {Proceedings of the 7th International Workshop on Sign Language Translation and Avatar Technology: The Junction of the Visual and the Textual: Challenges and Perspectives},
  maintitle = {13th International Conference on Language Resources and Evaluation ({LREC} 2022)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {24},
  month     = jun,
  year      = {2022},
  isbn      = {979-10-95546-82-5},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/2022.sltat-1.6.html},
  abstract  = {The direct involvement of deaf users in the development and evaluation of signing avatars is imperative to achieve legibility and raise trust among synthetic signing technology consumers. A paradigm of constructive cooperation between researchers and the deaf community is the EASIER project , where user driven design and technology development have already started producing results. One major goal of the project is the direct involvement of sign language (SL) users at every stage of development of the project's signing avatar. As developers wished to consider every parameter of SL articulation including affect and prosody in developing the EASIER SL representation engine, it was necessary to develop a steady communication channel with a wide public of SL users who may act as evaluators and can provide guidance throughout research steps, both during the project's end-user evaluation cycles and beyond. To this end, we have developed a questionnaire-based methodology, which enables researchers to reach signers of different SL communities on-line and collect their guidance and preferences on all aspects of SL avatar animation that are under study. In this paper, we report on the methodology behind the application of the EASIER evaluation framework for end-user guidance in signing avatar development as it is planned to address signers of four SLs -Greek Sign Language (GSL), French Sign Language (LSF), German Sign Language (DGS) and Swiss German Sign Language (DSGS)- during the first project evaluation cycle. We also briefly report on some interesting findings from the pilot implementation of the questionnaire with content from the Greek Sign Language (GSL).}
}

@inproceedings{papadimitriou:70026:sltat:lrec,
  author    = {Papadimitriou, Katerina and Potamianos, Gerasimos and Sapountzaki, Galini and Goulas, Theodoros and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Maragos, Petros},
  title     = {{Greek} {Sign} {Language} Recognition for the {SL-ReDu} Learning Platform},
  pages     = {79--84},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and McDonald, John C. and Shterionov, Dimitar and Wolfe, Rosalee},
  booktitle = {Proceedings of the 7th International Workshop on Sign Language Translation and Avatar Technology: The Junction of the Visual and the Textual: Challenges and Perspectives},
  maintitle = {13th International Conference on Language Resources and Evaluation ({LREC} 2022)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {24},
  month     = jun,
  year      = {2022},
  isbn      = {979-10-95546-82-5},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/2022.sltat-1.12.html},
  abstract  = {There has been increasing interest lately in developing education tools for sign language (SL) learning that enable self-assessment and objective evaluation of learners' SL productions, assisting both students and their instructors. Crucially, such tools require the automatic recognition of SL videos, while operating in a signer-independent fashion and under realistic recording conditions. Here, we present an early version of a Greek Sign Language (GSL) recognizer that satisfies the above requirements, and integrate it within the SL-ReDu learning platform that constitutes a first in GSL with recognition functionality. We develop the recognition module incorporating state-of-the-art deep-learning based visual detection, feature extraction, and classification, designing it to accommodate a medium-size vocabulary of isolated signs and continuously fingerspelled letter sequences. We train the module on a specifically recorded GSL corpus of multiple signers by a web-cam in non-studio conditions, and conduct both multi-signer and signer-independent recognition experiments, reporting high accuracies. Finally, we let student users evaluate the learning platform during GSL production exercises, reporting very satisfactory objective and subjective assessments based on recognition performance and collected questionnaires, respectively.}
}

@inproceedings{wolfe:70005:sltat:lrec,
  author    = {Wolfe, Rosalee and McDonald, John C. and Johnson, Ronan and Sturr, Ben and Klinghoffer, Syd and Bonzani, Anthony and Alexander, Andrew and Barnekow, Nicole},
  title     = {Supporting Mouthing in Signed Languages: New innovations and a proposal for future corpus building},
  pages     = {125--130},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and McDonald, John C. and Shterionov, Dimitar and Wolfe, Rosalee},
  booktitle = {Proceedings of the 7th International Workshop on Sign Language Translation and Avatar Technology: The Junction of the Visual and the Textual: Challenges and Perspectives},
  maintitle = {13th International Conference on Language Resources and Evaluation ({LREC} 2022)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {24},
  month     = jun,
  year      = {2022},
  isbn      = {979-10-95546-82-5},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/2022.sltat-1.19.html},
  abstract  = {A recurring concern, oft repeated, regarding the quality of signing avatars is the lack of proper facial movements, particularly in actions that involve mouthing. An analysis uncovered three challenges contributing to the problem. The first is a difficulty in devising an algorithmic strategy for generating mouthing due to the rich variety of mouthings in sign language. For example, part or all of a spoken word may be mouthed depending on the sign language, the syllabic structure of the mouthed word, as well as the register of address and discourse setting. The second challenge was technological. Previous efforts to create avatar mouthing have failed to model the timing present in mouthing or have failed to properly model the mouth's appearance. The third challenge is one of usability. Previous editing systems, when they existed, were time-consuming to use. This paper describes efforts to improve avatar mouthing by addressing these challenges, resulting in a new approach for mouthing animation. The paper concludes by proposing an experiment in corpus building using the new approach.}
}

@inproceedings{koulierakis:20035:sign-lang:lrec,
  author    = {Koulierakis, Ioannis and Siolas, Georgios and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Stafylopatis, Andreas-Georgios},
  title     = {Recognition of Static Features in Sign Language Using Key-Points},
  pages     = {123--126},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2020} 9th Workshop on the Representation and Processing of Sign Languages: Sign Language Resources in the Service of the Language Community, Technological Challenges and Application Perspectives},
  maintitle = {12th International Conference on Language Resources and Evaluation ({LREC} 2020)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {16},
  month     = may,
  year      = {2020},
  isbn      = {979-10-95546-54-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/20035.html},
  abstract  = {In this paper we report on a research effort focusing on recognition of static features of sign formation in single sign videos. Three sequential models have been developed for handshape, palm orientation and location of sign formation respectively, which make use of key-points extracted via OpenPose software. The models have been applied to a Danish and a Greek Sign Language dataset, providing results around 96{\%}. Moreover, during the reported research, a method has been developed for identifying the time-frame of real signing in the video, which allows to ignore transition frames during sign recognition processing.}
}

@inproceedings{efthimiou:18046:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Kakoulidis, Panos and Goulas, Theodoros},
  title     = {Terminology Enrichment through Crowd Sourcing at {PYLES} Platform},
  pages     = {33--38},
  editor    = {Bono, Mayumi and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna and Osugi, Yutaka},
  booktitle = {Proceedings of the {LREC2018} 8th Workshop on the Representation and Processing of Sign Languages: Involving the Language Community},
  maintitle = {11th International Conference on Language Resources and Evaluation ({LREC} 2018)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Miyazaki, Japan},
  day       = {12},
  month     = may,
  year      = {2018},
  isbn      = {979-10-95546-01-6},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/18046.html},
  abstract  = {The Information System PYLES is a management system for on-line lessons, designed to support accesible asynchronous e-learning, addressing learning needs of students with various communication capabilities and needs at the  Technological Educational Institute of Athens (TEI-A). It, thus, exploits both uptodate assistive technology software and content in various forms.  This platform has been used as the basis for the development of an active repository of multimodal educational resources, also incorporating a terminology lexicon for the Greek Sign Language (GSL) and a general purpose dictionary of GSL. The platform provides advanced customization options according to user needs but also a collaborative environment for the support of teaching and learning processes.  The information system (http://eclassamea.teiath.gr/ ) is built on the open code platform `Open eClass' (http://www.openeclass.org/), a free e-learning platform that it actually enriches with tools and functionalities which allow extended accessibility regarding both the environment and the educational content.  Regarding customization to serve GSL signers' needs, the platform incorporates: -         Selected lesson presentations in GSL on the basis of deaf students' preferences regarding the curriculum offers -         An on line dictionary of general purpose lemma list -         An on line terminology glossary -         Administrative form related information in GSL  Following the Open eClass patern, three basic user roles are supported: (i) student, (ii) instructor, and (iii) administrator. However, the platform also supports special intermediary roles such as ``administrator assistant'', ``user administrator'', ``group leader'' and ``visitor''.  These roles serve among other functionalities, the options available for lexical material enrichment through crowd sourcing.  The GSL terminology environment allows for the creation of different glossaries directly by their users, where GSL signers are invited to upload their suggestions for various terms under specific quality control conditions.  Authorized users may enter new terminology items including the term definition and various supporting multimedia material (icons, video, text etc), while they can modify or completely delete entries. Furthermore, they can validate terms suggested by non authorized users to make them visible to the whole user community. Terminology enrichment actions incorporate: 1.          New lemma or new sense entry 2.          Modification of a lemma or a sense3.          Validation of a proposed lemma sense 4.          Communication or hiding of a lemma sense or a lemma description 5.          Linking of a lemma with a lemma in a different language (Greek and/or English)}
}

@inproceedings{efthimiou:18043:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Vasilaki, Kyriaki and Fotinea, Stavroula-Evita and Vacalopoulou, Anna and Goulas, Theodoros and Dimou, Athanasia-Lida},
  title     = {The {POLYTROPON} Parallel Corpus},
  pages     = {39--44},
  editor    = {Bono, Mayumi and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna and Osugi, Yutaka},
  booktitle = {Proceedings of the {LREC2018} 8th Workshop on the Representation and Processing of Sign Languages: Involving the Language Community},
  maintitle = {11th International Conference on Language Resources and Evaluation ({LREC} 2018)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Miyazaki, Japan},
  day       = {12},
  month     = may,
  year      = {2018},
  isbn      = {979-10-95546-01-6},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/18043.html},
  abstract  = {Here we present the POLYTROPON parallel corpus for the language pair Greek Sign Language (GSL) -- Greek, which is created and annotated aiming to serve as a golden corpus available to the community of SL technologies for experimentation with various approaches to SL processing, focusing on machine learning for SL recognition, machine translation (MT) and information retrieval. The corpus volume incorporates 3653 clauses in three repetitions each, captured in front view by means of one HD and one kinect camera. Corpus annotation has allowed to extract initial features sets with the aim to reach a GSL level of abstraction close to the one currently available for Greek language representations, exploiting the inherent characteristics of the language in view of applying initial deep learning experiments on GSL data, where both words and signs may be represented as vectors of characteristics which allow dependency tree structure representations of input text and signed clauses as those created by the use of Tree Editor TrEd 2.0.}
}

@inproceedings{kimmelman-etal-2018-ipsl:lrec,
  author    = {Kimmelman, Vadim and Klezovich, Anna and Moroz, George},
  title     = {{IPSL}: A Database of Iconicity Patterns in Sign Languages. Creation and Use},
  pages     = {4230--4234},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Cieri, Christopher and Declerck, Thierry and Goggi, Sara and Hasida, Koiti and Isahara, Hitoshi and Maegaard, Bente and Mariani, Joseph and Mazo,  H{\'e}l{\`e}ne and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios and Tokunaga, Takenobu},
  booktitle = {11th International Conference on Language Resources and Evaluation ({LREC} 2018)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Miyazaki, Japan},
  day       = {7--12},
  month     = may,
  year      = {2018},
  isbn      = {979-10-95546-00-9},
  language  = {english},
  url       = {https://aclanthology.org/L18-1667}
}

@inproceedings{efthimiou:16003:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Dimou, Athanasia-Lida and Goulas, Theodoros and Karioris, Panagiotis and Vasilaki, Kyriaki and Vacalopoulou, Anna and Pissaris, Michalis},
  title     = {From a Sign Lexical Database to an {SL} Golden Corpus -- the {POLYTROPON} {SL} Resource},
  pages     = {63--68},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2016} 7th Workshop on the Representation and Processing of Sign Languages: Corpus Mining},
  maintitle = {10th International Conference on Language Resources and Evaluation ({LREC} 2016)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Portoro{\v z}, Slovenia},
  day       = {28},
  month     = may,
  year      = {2016},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/16003.html},
  abstract  = {The POLYTROPON lexicon resource is being created in an attempt i) to gather and recapture already available lexical resources of Greek Sign Language (GSL) in an up-to-date homogeneous manner, ii) to enrich these resources with new lemmas, and iii) to end up with a multipurpose-multiuse resource which can be equally exploited in end user oriented educational/communication services and in supporting various SL technologies. The database that hosts the newly acquired resource, incorporates various SL oriented fields of information, including information on compounding, GSL synonyms, classifier qualities, lemma related senses, semantic groupings etc, and also lemma coding for their manual and non-manual articulation activity. It also provides linking of GSL and Modern Greek equivalent(s) lemma pairs to serve bilingual use purposes. A by-product of considerable value is the parallel corpus which derived from the GSL examples of use accompanying each lemma entry in the dictionary and their translations into Modern Greek. The annotation of the corpus for the entailed signs and assignment of respective glosses in combination with data capturing by both HD and Kinect cameras in three repetitions, allowed for the creation of a golden parallel corpus available to the community of SL technologies for experimentation with various approaches to SL recognition, MT and information retrieval.}
}

@inproceedings{fotinea-etal-2016-multimodal:lrec,
  author    = {Fotinea, Stavroula-Evita and Efthimiou, Eleni and Koutsombogera, Maria and Dimou, Athanasia-Lida and Goulas, Theodoros and Vasilaki, Kyriaki},
  title     = {Multimodal Resources for Human-Robot Communication Modelling},
  pages     = {3455--3460},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Goggi, Sara and Grobelnik, Marko and Maegaard, Bente and Mariani, Joseph and Mazo, H{\'e}l{\`e}ne and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios},
  booktitle = {10th International Conference on Language Resources and Evaluation ({LREC} 2016)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Portoro{\v z}, Slovenia},
  day       = {23--28},
  month     = may,
  year      = {2016},
  isbn      = {978-2-9517408-9-1},
  language  = {english},
  url       = {https://aclanthology.org/L16-1551},
  abstract  = {This paper reports on work related to the modelling of Human-Robot Communication on the basis of multimodal and multisensory human behaviour analysis. A primary focus in this framework of analysis is the definition of semantics of human actions in interaction, their capture and their representation in terms of behavioural patterns that, in turn, feed a multimodal human-robot communication system. Semantic analysis encompasses both oral and sign languages, as well as both verbal and non-verbal communicative signals to achieve an effective, natural interaction between elderly users with slight walking and cognitive inability and an assistive robotic platform.}
}

@inproceedings{dimou:14022:sign-lang:lrec,
  author    = {Dimou, Athanasia-Lida and Goulas, Theodoros and Efthimiou, Eleni and Fotinea, Stavroula-Evita},
  title     = {Creation of a multipurpose sign language lexical resource: The {GSL} lexicon database},
  pages     = {37--42},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2014} 6th Workshop on the Representation and Processing of Sign Languages: Beyond the Manual Channel},
  maintitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Reykjavik, Iceland},
  day       = {31},
  month     = may,
  year      = {2014},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/14022.html},
  abstract  = {The GSL lexicon database is the first extensive database of Greek Sign Language (GSL) signs, created on the basis of knowledge derived from the linguistic analysis of natural signers{\'i} data. It incorporates a lemma list that currently includes approximately 6,000 entries and is intended to reach a total number of 10,000 entries within the next two years. The design of the database allows for classification of signs on the basis of their articulation features as regards both manual and non-manual elements. The adopted information management schema accompanying each entry provides for retrieval according to a variety of linguistic properties. In parallel, annotation of the full set of sign articulation features feeds more natural performance of synthetic signing engines and more effective treatment of sign language (SL) data in the framework of sign recognition and natural language processing.}
}

@inproceedings{dimou:12018:sign-lang:lrec,
  author    = {Dimou, Athanasia-Lida and Pitsikalis, Vassilis and Goulas, Theodoros and Theodorakis, Stavros and Karioris, Panagiotis and Pissaris, Michalis and Fotinea, Stavroula-Evita and Efthimiou, Eleni and Maragos, Petros},
  title     = {A {GSL} continuous phrase corpus: Design and acquisition},
  pages     = {23--26},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2012} 5th Workshop on the Representation and Processing of Sign Languages: Interactions between Corpus and Lexicon},
  maintitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {27},
  month     = may,
  year      = {2012},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/12018.html},
  abstract  = {The corpus presented in this article is composed of a limited number of Greek Sign Language (GSL) sentences and was created in order to provide additional data to the already obtained corpus during the first year of the Dicta-Sign project (Matthes et al., 2010). More specifically this corpus intended to serve as the ground upon which a significant part of the recognition process would be tested and evaluated, more precisely, the continuous sign language recognition algorithms developed in the project.
\par
Given the targeted nature of this corpus we present here the constraints as well as the procedure followed in order to obtain it.
\par
The procedure followed for the creation of this corpus, consists of its linguistic design and validation, the studio and hardware acquisition configuration, the implementation and supervision of the acquisition itself and the post-processing and annotation of the obtained data in order to release the set of usable annotated resources. The specific GSL phrase corpus forms the basis for machine learning and training to serve experimentation in the domain of continuous sign language processing and recognition.}
}

@inproceedings{efthimiou:12025:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Glauert, John and Bowden, Richard and Braffort, Annelies and Collet, Christophe and Maragos, Petros and Lefebvre-Albaret, Fran{\c c}ois},
  title     = {Sign Language technologies and resources of the {Dicta-Sign} project},
  pages     = {37--44},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2012} 5th Workshop on the Representation and Processing of Sign Languages: Interactions between Corpus and Lexicon},
  maintitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {27},
  month     = may,
  year      = {2012},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/12025.html},
  abstract  = {Here we present the outcomes of Dicta-Sign FP7-ICT project. Dicta-Sign researched ways to enable communication between Deaf individuals through the development of human-computer interfaces (HCI) for Deaf users, by means of Sign Language. It has researched and developed recognition and synthesis engines for sign languages (SLs) that have brought sign recognition and generation technologies significantly closer to authentic signing. In this context, Dicta-Sign has developed several technologies demonstrated via a sign language aware Web 2.0, combining work from the fields of sign language recognition, sign language animation via avatars and sign language resources and language models development, with the goal of allowing Deaf users to make, edit, and review avatar-based sign language contributions online, similar to the way people nowadays make text-based contributions on the Web.}
}

@inproceedings{filhol:12024:sign-lang:lrec,
  author    = {Filhol, Michael and Braffort, Annelies},
  title     = {A Study on Qualification/Naming Structures in Sign Languages},
  pages     = {63--66},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2012} 5th Workshop on the Representation and Processing of Sign Languages: Interactions between Corpus and Lexicon},
  maintitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {27},
  month     = may,
  year      = {2012},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/12024.html},
  abstract  = {In the prospect of animating virtual signers, this article addresses the issue of representing Sign, in particular on levels not restricted to the language lexicon. In order to choose and design a suitable model, we illustrate the main steps of our corpus-based methodology for linguistic structure identification and formal description with the example of a specific structure we have named ``qualification/naming''. We also discuss its similarity and difference with other Sign properties described in the literature such as compound signs. Consequently we explain our choice for a description model that does not separate lexicon and grammar in two disjoint levels for virtual signer input.}
}

@inproceedings{matthes:12016:sign-lang:lrec,
  author    = {Matthes, Silke and Hanke, Thomas and Regen, Anja and Storz, Jakob and Worseck, Satu and Efthimiou, Eleni and Dimou, Athanasia-Lida and Braffort, Annelies and Glauert, John and Safar, Eva},
  title     = {{Dicta-Sign} -- Building a Multilingual Sign Language Corpus},
  pages     = {117--122},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2012} 5th Workshop on the Representation and Processing of Sign Languages: Interactions between Corpus and Lexicon},
  maintitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {27},
  month     = may,
  year      = {2012},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/12016.html},
  abstract  = {This paper presents the multilingual corpus of four European sign languages compiled in the framework of the Dicta-Sign project. Dicta-Sign researched ways to enable communication between Deaf individuals through the development of human-computer interfaces (HCI) for Deaf users, by means of sign language. Sign language resources were compiled to inform progress in the other research areas within the project, especially video recognition of signs, sign-to-sign translation, linguistic modelling, and sign generation. The aim for the corpus data collection was to achieve as high a level of naturalness as possible with semi-spontaneous utterances under lab conditions. At the same time the elicited data were supposed to be semantically close enough to be comparable both across individual informants and for all four sign languages. The sign language data were annotated using iLex and are now made available via a web portal that allows for different access options to the data.}
}

@inproceedings{efthimiou:10005:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Dimou, Athanasia-Lida and Kalimeris, Constandinos},
  title     = {Towards decoding Classifier function in {GSL}},
  pages     = {76--79},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10005.html},
  abstract  = {Here we will present work based on a corpus specially designed and elicited in order to provide data for the study of Classifier function in Greek Sign Language (GSL).
\par
Data elicitation was based on presentation to informants of a series of stimuli which lead to utterances entailing the set of Classifier functions met in the language.
\par
The whole set of video recorded data were annotated in order to provide an appropriate corpus for the investigation of Classifier instantiations. Annotation work was complemented by the use of a search tool external to the ELAN environment, which was developed in order to handle the whole of annotated material. This search tool allows to create a data base of annotated video clips, by exploiting the set of classification features used to annotate the video recorded data. Among the attribute-value pairs forming the complete set of annotation features used, the following tiers of annotation were adopted: a) "Discourse Unit", indicating the sentence or utterance in which a classifier is met, b) "CP{\_}{\_}{\_}{\_}" for marking the maximal classifier predicate, c) "CP{\_}GLOSS" to describe the semantic content of classifiers, and d) "HS" with font symbols as values for specifying the handshape or handshapes involved in signing. These tiers provide the necessary information to group pieces of data as to the different classifiers and classifier functions met in GSL, i.e. [Discourse Unit: various types of tables], [CP{\_}MAX: round tables of different sizes], [CP{\_}GLOSS: ROUND, FLAT, SIZE], [HS: D, L, B...].
\par
Theoretical analysis of the so created linguistic data supports formulation of a proposal for Classifier behaviour which differentiates among three distinguished major grammar functions. The key property that allows for a principled account of Classifier behaviour is that Classifiers are semantic markers which create semantic classes of objects recognised to share a set of common semantic features.
\par
In this line, we will argue that Classifiers are morphemes articulated according to SL phonology primes. According to our proposal GSL utilises Classifier morphemes in three distinct ways:\begin{enumerate}\item To create new lexicon items: Classifier affixation adds specific semantic properties to an entity, making it part of the semantic class this specific Classifier identifies.\item To add qualitative/quantitative values: Classifiers function as modifiers adding qualitative/quantitative values to syntactic heads or maximal phrases.\item To serve co-indexing: In sign utterances, Classifiers may be used as pronominal elements, where co-indexing obligatorily involves an expanded set of agreement features which, apart from the standard features "Number" and "Gender", also includes the feature "Semantic Class".\end{enumerate}}
}

@inproceedings{efthimiou:10027:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Glauert, John and Bowden, Richard and Braffort, Annelies and Collet, Christophe and Maragos, Petros and Goudenove, Fran{\c c}ois},
  title     = {{DICTA-SIGN}: Sign Language Recognition, Generation and Modelling with application in Deaf Communication},
  pages     = {80--83},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10027.html},
  abstract  = {Here we present the components and objectives of the EU funded project DICTA-SIGN. Dicta-Sign (http://www.dictasign.eu) is a three-year research project that involves the Institute for Language and Speech Processing, the University of Hamburg, the University of East Anglia, the University of Surrey, LIMSI/CNRS, the Universit{\'e} Paul Sabatier, the National Technical University of Athens, and WebSourd. It aims to improve the state of web-based communication for Deaf people by allowing the use of sign language in various human-computer interaction scenarios. It researches and develops recognition and synthesis engines for signed languages, aiming at a level of detail necessary for recognizing and generating authentic signing. In this context, Dicta-Sign aims at developing several technologies demonstrated via a sign language-aware Web 2.0. 
\par
Dicta-Sign supports four European sign languages: Greek. British, German, and French Sign Language and differs from previous work in that it aims to integrate tightly recognition, animation, and machine translation. All these components are informed by appropriate linguistic models from the ground up, including phonology, grammar, and non-manual features. 
\par
Expected outputs of the project include:\begin{itemize}\item A parallel multi-lingual corpus for four national sign languages - German, British, French and Greek (DGS, BSL, LSF and GSL respectively),\item A substantial multilingual dictionary of at least 1000 signs for each represented sign language,\item A continuous sign language recognition system that achieves significant improvement in terms of coverage and accuracy of sign recognition in comparison with current technology; furthermore this system will research the novel directions of multimodal sign fusion and signer adaptation,\item A language generation and synthesis component, covering in detail the role of manual, non-manual and placement within signing space,\item Annotation tools which incorporate these technologies providing access to the corpus and whose long term utility can be judged by the up-take by other sign language researchers,\item Three bidirectional integrated prototype systems which show the utility of the system components beyond the annotation tools application,\item A showcase demonstrator which exhibits how integration of the different components can support user communication needs.\end{itemize}}
}

@inproceedings{matthes:10019:sign-lang:lrec,
  author    = {Matthes, Silke and Hanke, Thomas and Storz, Jakob and Efthimiou, Eleni and Dimou, Athanasia-Lida and Karioris, Panagiotis and Braffort, Annelies and Choisier, Annick and Pelhate, Julia and Safar, Eva},
  title     = {Elicitation tasks and materials designed for {Dicta-Sign}'s multi-lingual corpus},
  pages     = {158--163},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10019.html},
  abstract  = {Within the framework of the Dicta-Sign project, parallelised sign language corpora are being compiled for four European sign languages (BSL, DGS, GSL, and LSF). The aim for the data collection was to achieve as high a level of naturalness as can be achieved with semi-spontaneous utterances under lab conditions. Therefore, informants were filmed in pairs interacting with each other. With respect to parallelisability, elicitation tasks had to be designed that result in semantically close answers without predetermining the choice of vocabulary and grammar. 
\par
The domain selected for Dicta-Sign is `Travel across Europe'. The tasks developed within the project cover different interaction formats ranging from monologues to sequences of very short turns, also with different levels of predictability. They include communication for transport by different means and contexts as well as related personal experiences. The elicitation materials are of different media formats and at various levels of complexity. They comprise of town and transportation network maps, pictures displaying a variety of places, items and situations linked to the target domain, as well as stories presented in sign language or as a picture story. In each session ten different tasks are to be performed, each of them planned to have a duration of about five to ten minutes, thereby switching roles between the informants several times during a recording session. 
\par
Taking into account cultural differences as well as language dependent issues regarding the different countries in the project, the material was designed in a way that only minor adjustments are needed that do not change the character of a task. The elicitation tasks and materials developed within the project as well as experiences gained adjusting and using the material for Dicta-Sign's different target languages are illustrated in this paper.}
}

@inproceedings{safar:10060:sign-lang:lrec,
  author    = {Safar, Eva and Glauert, John},
  title     = {Sign Language {HPSG}},
  pages     = {204--207},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10060.html},
  abstract  = {We present an overview of some relevant aspects of sign language synthesis in the ViSiCAST project, which might serve as a possible basis for the Dicta-Sign project. Dicta-Sign is a 3-year EU-funded project, that undertakes parallel corpus collection in different Sign Languages (SLs) and fundamental research and development of sign recognition and generation techniques in order to open up new potential applications for sign language users. One of the aims in Dicta-Sign is to find a model that is suitable for both recognition and generation. In this paper we revisit the main aspects of the synthesis techniques implemented in ALE Prolog using a sign language specific HPSG with the view for future changes needed. We briefly describe the HPSG feature structure and the rules and principles of the grammar, which cover important SL phenomena like mode, prodrop, plurals, classifiers and signing space.}
}

@inproceedings{efthimiou:08030:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita},
  title     = {Annotation and Maintenance of the {Greek} {Sign} {Language} Corpus ({GSLC})},
  pages     = {58--63},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Hanke, Thomas and Thoutenhoofd, Ernst D. and Zwitserlood, Inge},
  booktitle = {Proceedings of the {LREC2008} 3rd Workshop on the Representation and Processing of Sign Languages: Construction and Exploitation of Sign Language Corpora},
  maintitle = {6th International Conference on Language Resources and Evaluation ({LREC} 2008)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marrakech, Morocco},
  day       = {1},
  month     = jun,
  year      = {2008},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/08030.html},
  abstract  = {This paper presents the design and development of a representative language corpus for the Greek Sign Language (GSL). Focus is put on the annotation methodology adopted to provide for linguistic information and annotated corpus exploitation for the extraction of a linguistic model intended to support HCI applications based on sign recognition.
\par
The existence of an annotated corpus is a prerequisite for the creation of linguistic resources and for the development of NLP applications for any natural language articulated either orally or through signing. In the case of a sign language corpus, annotation performed on video sequences, is intended to support exploitation of linguistic information conveyed through various combinations of spatial-temporal parameters around the signer's body.
\par
The Greek Sign Language Corpus (GSLC) is been developed in the framework of the national project DIANOEMA (GSRT, M3.3, id 35) that aims at optical analysis and recognition of both static and dynamic signs, incorporating a GSL linguistic model in controlling robot motion. Since no previous GSL corpus is available to meet the requirements of multipurpose use in an HCI environment, the design of GSLC has taken into account annotation requirements as well as linguistic adequacy controls to ensure both corpus-based linguistic analysis and corpus re-usability. Linguistic analysis is a sufficient component for the development of NLP tools that, in the case of signed languages, support deaf accessibility to IT content and services. To effectively support this kind of language intensive operations, linguistic analysis has to derive from safe language data and also provide for an amount of linguistic phenomena, which allow for an adequate description of the language structure. In this context, safe data are defined as data commonly accepted by a specific language community. The design of GSLC content has made a distinction between three parts on the basis of the articulation units to be considered in respect to both linguistic analysis and the sign recognition process.
\par
The first part comprises a list of lemmata which are representative of the use of handshapes as a primary sign formation component. This part of the corpus is developed on the basis of measurements of handshape frequency of use in sign morpheme formation but it has also taken into account the complete set of sign formation parameters. In this sense, in order to provide data for all sign articulation features of GSL, the corpus also includes characteristic lemmata with respect to all manual and non-manual features of the language. The second part of GSLC is composed of sets of controlled utterances, which form paradigms capable to expose the mechanisms GSL uses to expresses specific core grammar phenomena. The grammar coverage that corresponds to this part of the corpus is representative enough to allow for a formal description of the main structural-semantic mechanisms of the language. Finally, the third part of GSLC contains free narration sequences, which are intended to provide data of spontaneous language production and be used for machine learning purposes as regards sign recognition. With respect to data collection, all parts of the corpus have been performed by native signers under controlled conditions that guarantee absence of language interference from the part of the spoken language of the signers' environment. Finally, quality control mechanisms have been applied to ensure data integrity.
\par
In the framework of the current research target, annotation on the GSLC involves, on the one hand, descriptions of the phonological structure of morphemes and, on the other hand, sentence level markers. Sign phonology involves manual and non-manual features of sign formation. For the description of the phonological composition of sign morphemes the HamNoSys coding set is being used along with GSL specific feature coding. Sentence level annotation, except for sentence boundaries, involves phrase boundary marking and grammar information marking related to multi-layer indicators, as is the case of e.g. topicalisation, nominal phrase formation, temporal indicators and sentential negation. Sentence level annotation makes use of the ELAN annotator. Annotation integrity is subject to quality controls that involve both peer and external review by expert annotators.
\par
The GSLC current implementation has foreseen extensibility on all content levels as well as on annotation features, thus, allowing for corpus re-usability in GSL research and HCI applications beyond the scope of a specific research project.
\par
Indicative bibliography
\par
Bowden, R., Windridge, D., Kadir, T., Zisserman, A. {\&} Brady, M. (2004). «A Linguistic Feature Vector for the Visual Interpretation of Sign Language», In Tomas Pajdla, Jiri Matas (Eds), Proc. 8th European Conference on Computer Vision, ECCV04. LNCS3022, Springer-Verlag, Volume 1, pp391- 401.
\par
Bellugi, U. {\&} Fischer, S. (1972). «A comparison of Sign language and spoken language: rate and grammatical mechanisms», Cognition: International Journal of Cognitive Psychology, 1, 173-200.
\par
Efthimiou, E., Sapountzaki, G., Karpouzis, C. {\&} Fotinea, S-E. (2004). «Developing an e-Learning platform for the Greek Sign Language». Lecture Notes in Computer Science 3118: 1107-1113. Springer.
\par
Efthimiou, E., Fotinea, S-E. {\&} Sapountzaki, G. (2006). «Processing linguistic data for GSL structure representation»,Proc. of the Workshop on the Representation and Processing of Sign Languages: Lexicographic matters and didactic scenarios, Satellite Workshop to LREC-2006 Conference, May 28, pp.49-54.
\par
ELAN annotator, Max Planck Institute for Psycholinguistics, available at: http://www.mpi.nl/tools/elan.html
\par
Fotinea, S-E., Efthimiou, E., Karpouzis, K. {\&} Caridakis, G. (2005). ``Dynamic GSL synthesis to support access to e-content'', Proc. of the 3rd International Conference on Universal Access in Human-Computer Interaction (UAHCI 2005), 22-27 July 2005, Las Vegas, Nevada, USA.
\par
HamNoSys Sign Language Notation System: www.sign-lang.uni-hamburg.de/projects/HamNoSys.html
\par
Karpouzis, K. Caridakis, G., Fotinea, S-E. {\&} Efthimiou, E. (2005). ``Educational Resources and Implementation of a Greek Sign Language Synthesis Architecture'', Computers and Education International Journal, Elsevier, in print, electronically available since Sept 05.
\par
Kraiss, K.-F. (Ed.), (2006). Advanced Man-Machine Interaction - Fundamentals and Implementation. Series: Signals and Communication Technology, Springer.
\par
Stokoe, W. 1978. Sign Language Structure (revised ed.). Silver Spring, MD: Linstok.}
}

@inproceedings{efthimiou:06010:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Sapountzaki, Galini},
  title     = {Processing Linguistic Data for {GSL} Structure Representation},
  pages     = {49--54},
  editor    = {Vettori, Chiara},
  booktitle = {Proceedings of the {LREC2006} 2nd Workshop on the Representation and Processing of Sign Languages: Lexicographic Matters and Didactic Scenarios},
  maintitle = {5th International Conference on Language Resources and Evaluation ({LREC} 2006)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Genoa, Italy},
  day       = {28},
  month     = may,
  year      = {2006},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/06010.html},
  abstract  = {The here presented work reports on incorporation of a core grammar of Greek Sign Language (GSL) into a Greek to GSL conversion tool. The output of conversion feeds a signing avatar, enabling dynamic sign synthesis. Efficient conversion is of significant importance in order to support access to e-content by the Greek deaf community, given that the conversion tool may well be integrated into various applications, which require linguistic knowledge. The converter is built upon standard principles of Machine Translation (MT) and matches Greek parsed input to equivalent GSL output. The transfer module makes use of NLP techniques to enrich linear sign concatenation with GSL-specific complex features uttered both manually and non-manually. GSL features are either checked against properties coded in a lexicon DB for base signs or they are generated by grammar rules. The GSL computational grammar is based on natural data analysis in order to capture the generative characteristics of the language. The conversion grammar of the transfer module, however, makes use of a number of heuristic solutions. This is implicated by the type of input for conversion, which derives from a statistical shallow parser, so that various semantic features have to be retrieved by mere grouping of lemmata. However, this type of input is directly connected with the requirement for fast processing of vast amounts of linguistic information.}
}

@inproceedings{sapountzaki:04004:sign-lang:lrec,
  author    = {Sapountzaki, Galini and Efthimiou, Eleni and Karpouzis, Costas and Kourbetis, Vassilis},
  title     = {Open-ended Resources in {Greek} {Sign} {Language}: Development of an e-Learning Platform},
  pages     = {13--19},
  editor    = {Streiter, Oliver and Vettori, Chiara},
  booktitle = {Proceedings of the {LREC2004} Workshop on the Representation and Processing of Sign Languages: From {SignWriting} to Image Processing. Information techniques and their implications for teaching, documentation and communication},
  maintitle = {4th International Conference on Language Resources and Evaluation ({LREC} 2004)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Lisbon, Portugal},
  day       = {30},
  month     = may,
  year      = {2004},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/04004.html},
  abstract  = {In this paper we present the creation of dynamic linguistic resources of Greek Sign Language (GSL). The resources will feed the development of an educational multitask platform within the SYNENNOESE project for the teaching of and in GSL. The platform combines avatar and animation technologies for the production of sign sequences/streams, exploiting digital linguistic resources of both lexicon and grammar of GSL. In SYNENNOESE, the input is written Greek text, which is then transformed into GSL and appears animated on screen. A syntactic parser decodes the structural patterns of written Greek and matches them into equivalent patterns in GSL, which are then signed by a virtual human. The adopted notation system for the lexical database is HamNoSys (Hamburg Notation System). For the implementation of the digital signer tool, the signer's synthetic movement follows MPEG-4 standard and frame H-Anim with the use of VRML language.}
}

@inproceedings{efthimiou:04010:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Vacalopoulou, Anna and Fotinea, Stavroula-Evita and Steinhauer, Gregory},
  title     = {Multipurpose Design and Creation of {GSL} Dictionaries},
  pages     = {51--58},
  editor    = {Streiter, Oliver and Vettori, Chiara},
  booktitle = {Proceedings of the {LREC2004} Workshop on the Representation and Processing of Sign Languages: From {SignWriting} to Image Processing. Information techniques and their implications for teaching, documentation and communication},
  maintitle = {4th International Conference on Language Resources and Evaluation ({LREC} 2004)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Lisbon, Portugal},
  day       = {30},
  month     = may,
  year      = {2004},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/04010.html},
  abstract  = {In this paper we present the methodology of data collection and implementation of databases with the purpose to create extensive lexical and terminological resources for the Greek Sign Language (GSL). The focus is on issues of linguistic content validation, multipurpose design and reusability of resources, exemplified by the multimedia dictionary products of the projects NOEMA (1999- 2001) and PROKLISI (2002-2004). As far as data collection methodology, DB design and resources development are concerned, a clear distinction is made between general language lexical items and terms, since the creation of resources for the two types of data undergoes different methodological principles, lexeme formation and usage conditions. There is also reference to content and interface evaluation mechanisms, as well as to basic linguistic research carried out for the support of lexicographical work.}
}

