@inproceedings{salonen:20004:sign-lang:lrec,
  author    = {Salonen, Juhana and Kronqvist, Antti and Jantunen, Tommi},
  title     = {The Corpus of {Finnish} {Sign} {Language}},
  pages     = {197--202},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2020} 9th Workshop on the Representation and Processing of Sign Languages: Sign Language Resources in the Service of the Language Community, Technological Challenges and Application Perspectives},
  maintitle = {12th International Conference on Language Resources and Evaluation ({LREC} 2020)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {16},
  month     = may,
  year      = {2020},
  isbn      = {979-10-95546-54-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/20004.html},
  abstract  = {This paper presents the Corpus of Finnish Sign Language (Corpus FinSL), a structured and annotated collection of Finnish Sign Language (FinSL) videos published in May 2019 in FIN-CLARIN's Language Bank of Finland. The corpus is divided into two subcorpora, one of which comprises elicited narratives and the other conversations. All of the FinSL material has been annotated using ELAN and the lexical database Finnish Signbank. Basic annotation includes ID-glosses and translations into Finnish. The anonymized metadata of Corpus FinSL has been organized in accordance with the IMDI standard. Altogether, Corpus FinSL contains nearly 15 hours of video material from 21 FinSL users. Corpus FinSL has already been exploited in FinSL research and teaching, and it is predicted that in the future it will have a significant positive impact on these fields as well as on the status of the sign language community in Finland.}
}

@inproceedings{jantunen-etal-2020-comes:lrec,
  author    = {Jantunen, Tommi and Puupponen, Anna and Burger, Birgitta},
  title     = {What Comes First: Combining Motion Capture and Eye Tracking Data to Study the Order of Articulators in Constructed Action in Sign Language Narratives},
  pages     = {6003--6007},
  editor    = {Calzolari, Nicoletta and Fr{\'e}d{\'e}ric B{\'e}chet and Blache, Philippe and Choukri, Khalid and Cieri, Christopher and Declerck, Thierry and Goggi, Sara and Isahara, Hitoshi and Maegaard, Bente and Mariani, Joseph and Mazo, H{\'e}l{\`e}ne and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios},
  booktitle = {12th International Conference on Language Resources and Evaluation ({LREC} 2020)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {11--16},
  month     = may,
  year      = {2020},
  isbn      = {979-10-95546-34-4},
  language  = {english},
  url       = {https://aclanthology.org/2020.lrec-1.735},
  abstract  = {We use synchronized 120 fps motion capture and 50 fps eye tracking data from two native signers to investigate the temporal order in which the dominant hand, the head, the chest and the eyes start producing overt constructed action from regular narration in seven short Finnish Sign Language stories. From the material, we derive a sample of ten instances of regular narration to overt constructed action transfers in ELAN which we then further process and analyze in Matlab. The results indicate that the temporal order of articulators shows both contextual and individual variation but that there are also repeated patterns which are similar across all the analyzed sequences and signers. Most notably, when the discourse strategy changes from regular narration to overt constructed action, the head and the eyes tend to take the leading role, and the chest and the dominant hand tend to start acting last. Consequences of the findings are discussed.}
}

@inproceedings{takkinen:18038:sign-lang:lrec,
  author    = {Takkinen, Ritva and Ker{\"a}nen, Jarkko and Salonen, Juhana},
  title     = {Depicting Signs and Different Text Genres: Preliminary Observations in the Corpus of {Finnish} {Sign} {Language}},
  pages     = {189--194},
  editor    = {Bono, Mayumi and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna and Osugi, Yutaka},
  booktitle = {Proceedings of the {LREC2018} 8th Workshop on the Representation and Processing of Sign Languages: Involving the Language Community},
  maintitle = {11th International Conference on Language Resources and Evaluation ({LREC} 2018)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Miyazaki, Japan},
  day       = {12},
  month     = may,
  year      = {2018},
  isbn      = {979-10-95546-01-6},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/18038.html},
  abstract  = {In this article we first discuss the different kinds of signs occurring in sign languages and then concentrate on depicting signs, especially on their classification in Finnish Sign Language. Then we briefly describe the corpora of Finland's sign languages (CFINSL). The actual study concerns the occurrences of depicting signs in CFINSL in different text genres, introductions, narratives and free discussions. Depicting signs occurred most frequently in narratives, second most frequently in discussions and least frequently in introductions.  The most frequent depicting signs in all genres were those that depicted the whole entity moving or being located. The second most frequent were those signs that expressed the handling of entities. The least frequent depicting signs were those with size- and shape-tracing handshapes. The proportion of depicting signs of all the signs in each genre was 17.9{\%} in the narratives, 2.9{\%} in the discussions and 2.2{\%} in the introductions. In order to deepen the analysis, depicting signs will have to be investigated from the perspective of movement types and the use of one or two hands.}
}

@inproceedings{cassidy-etal-2018-signbank:lrec,
  author    = {Cassidy, Steve and Crasborn, Onno and Nieminen, Henri and Stoop, Wessel and Hulsbosch, Micha and Even, Susan and Komen, Erwin and Johnston, Trevor},
  title     = {{S}ignbank: Software to Support Web Based Dictionaries of Sign Language},
  pages     = {2359--2364},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Cieri, Christopher and Declerck, Thierry and Goggi, Sara and Hasida, Koiti and Isahara, Hitoshi and Maegaard, Bente and Mariani, Joseph and Mazo,  H{\'e}l{\`e}ne and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios and Tokunaga, Takenobu},
  booktitle = {11th International Conference on Language Resources and Evaluation ({LREC} 2018)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Miyazaki, Japan},
  day       = {7--12},
  month     = may,
  year      = {2018},
  isbn      = {979-10-95546-00-9},
  language  = {english},
  url       = {https://aclanthology.org/L18-1374}
}

@inproceedings{jantunen:16006:sign-lang:lrec,
  author    = {Jantunen, Tommi and Pippuri, Outi and Wainio, Tuija and Puupponen, Anna and Laaksonen, Jorma},
  title     = {Annotated video corpus on {FinSL} with {Kinect} and computer-vision data},
  pages     = {93--100},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2016} 7th Workshop on the Representation and Processing of Sign Languages: Corpus Mining},
  maintitle = {10th International Conference on Language Resources and Evaluation ({LREC} 2016)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Portoro{\v z}, Slovenia},
  day       = {28},
  month     = may,
  year      = {2016},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/16006.html},
  abstract  = {This paper presents an annotated video corpus of Finnish Sign Language (FinSL) to which has been appended Kinect and computer-vision data. The video material consists of signed retellings of the stories Snowman and Frog, where are you?, elicited from 12 native FinSL signers in a dialogue setting. The recordings were carried out with 6 cameras directed toward the signers from different angles, and 6 signers were also recorded with one Kinect motion and depth sensing input device. All the material has been annotated in ELAN for signs, translations, grammar and prosody. To further facilitate research into FinSL prosody, computer-vision data describing the head movements and the aperture changes of the eyes and mouth of all the signers has been added to the corpus. The total duration of the material is 45 minutes and that part of it that is permitted by research consents is available for research purposes via the LAT online service of the Language Bank of Finland. The paper briefly demonstrates the linguistic use of the corpus.}
}

@inproceedings{keranen:16016:sign-lang:lrec,
  author    = {Ker{\"a}nen, Jarkko and Syrj{\"a}l{\"a}, Henna and Salonen, Juhana and Takkinen, Ritva},
  title     = {The Usability of the Annotation},
  pages     = {111--116},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2016} 7th Workshop on the Representation and Processing of Sign Languages: Corpus Mining},
  maintitle = {10th International Conference on Language Resources and Evaluation ({LREC} 2016)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Portoro{\v z}, Slovenia},
  day       = {28},
  month     = may,
  year      = {2016},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/16016.html},
  abstract  = {Several corpus projects for sign languages have tried to establish conventions and standards for the annotation of signed data. When discussing corpora, it is necessary to develop a way of considering and evaluating holistically the features and problems of annotation. This paper aims to develop a conceptual framework for the evaluation of the usability of annotations. The purpose of the framework is not to give conventions for annotating but to offer tools for the evaluation of the usability of the annotation, in order to make annotations more usable and make it possible to justify and explain decisions about annotation conventions. Based on our experience of annotation in the corpus project of Finland`s Sign Languages (CFINSL), we have developed six principles for the evaluation of annotation. In this article, using these six principles, we evaluate the usability of the annotations in CFINSL and other corpus projects. The principles have offered benefits in CFINSL: we are able to evaluate our annotations more systematically and holistically than ever before. Our work can be seen as an effort to bring a framework of usability to corpus work.}
}

@inproceedings{salonen:16017:sign-lang:lrec,
  author    = {Salonen, Juhana and Takkinen, Ritva and Puupponen, Anna and Nieminen, Henri and Pippuri, Outi},
  title     = {Creating Corpora of {Finland}'s Sign Languages},
  pages     = {179--184},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2016} 7th Workshop on the Representation and Processing of Sign Languages: Corpus Mining},
  maintitle = {10th International Conference on Language Resources and Evaluation ({LREC} 2016)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Portoro{\v z}, Slovenia},
  day       = {28},
  month     = may,
  year      = {2016},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/16017.html},
  abstract  = {This paper discusses the process of creating corpora of the sign languages used in Finland, Finnish Sign Language (FinSL) and Finland-Swedish Sign Language (FinSSL). It describes the process of getting informants and data, editing and storing the data, the general principles of annotation, and the creation of a web-based lexical database, the FinSL Signbank, developed on the basis of the NGT Signbank, which is a branch of the Auslan Signbank. The corpus project of Finland{\' }s Sign Languages (CFINSL) started in 2014 at the Sign Language Centre of the University of Jyv{\"a}skyl{\"a}. Its aim is to collect conversations and narrations from 80 FinSL users and 20 FinSSL users who are living in different parts of Finland. The participants are filmed in signing sessions led by a native signer in the Audio-visual Research Centre at the University of Jyv{\"a}skyl{\"a}. The edited material is stored in the IDA storage service produced by the CSC -- IT Center for Science, and the metadata will be saved into CMDI metadata. Every informant is asked to sign a consent form where they state for what kinds of purposes their signing can be used. The corpus data are annotated using the ELAN tool. At the moment, annotations are created on the levels of glosses and translation.}
}

@inproceedings{luzardo:14021:sign-lang:lrec,
  author    = {Luzardo, Marcos and Viitaniemi, Ville and Karppa, Matti and Laaksonen, Jorma and Jantunen, Tommi},
  title     = {Estimating head pose and state of facial elements for sign language video},
  pages     = {105--112},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2014} 6th Workshop on the Representation and Processing of Sign Languages: Beyond the Manual Channel},
  maintitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Reykjavik, Iceland},
  day       = {31},
  month     = may,
  year      = {2014},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/14021.html},
  abstract  = {In this work we present methods for automatic estimation of non-manual gestures in sign language videos. More specifically, we study the estimation of three head pose angles (yaw, pitch, roll) and the state of facial elements (eyebrow position, eye openness, and mouth state). This kind of estimation facilitates automatic annotation of sign language videos and promotes more prolific production of annotated sign language corpora. The proposed estimation methods are incorporated in our publicly available SLMotion software package for sign language video processing and analysis. Our method implements a model-based approach: for head pose we employ facial landmarks and skins masks as features, and estimate yaw and pitch angles by regression and roll using a geometric measure; for the state of facial elements we use the geometric information of facial elements of the face as features, and estimate quantized states using a classification algorithm. We evaluate the results of our proposed methods in quantitative and qualitative experiments.}
}

@inproceedings{puupponen:14009:sign-lang:lrec,
  author    = {Puupponen, Anna and Jantunen, Tommi and Takkinen, Ritva and Wainio, Tuija and Pippuri, Outi},
  title     = {Taking non-manuality into account in collecting and analyzing {Finnish} {Sign} {Language} video data},
  pages     = {143--148},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2014} 6th Workshop on the Representation and Processing of Sign Languages: Beyond the Manual Channel},
  maintitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Reykjavik, Iceland},
  day       = {31},
  month     = may,
  year      = {2014},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/14009.html},
  abstract  = {This paper describes our attention to research into non-manuals when collecting a large body of video data in Finnish Sign Language (FinSL). We will first of all give an overview of the data-collecting process and of the choices that we made in order for the data to be usable in research into non-manual activity (e.g. camera arrangement, video compression, and Kinect technology). Secondly, the paper will outline our plans for the analysis of the non-manual features of this data. We discuss the technological methods we plan to use in our investigation of non-manual features (i.e. computer-vision based methods) and give examples of the type of results that this kind of approach can provide us with.}
}

@inproceedings{raino:14025:sign-lang:lrec,
  author    = {Rain{\`o}, P{\"a}ivi and Huovila, Marja and Seilola, Irja},
  title     = {Visualizing the spatial working memory in mathematical discourse in {Finnish} {Sign} {Language}},
  pages     = {149--152},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2014} 6th Workshop on the Representation and Processing of Sign Languages: Beyond the Manual Channel},
  maintitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Reykjavik, Iceland},
  day       = {31},
  month     = may,
  year      = {2014},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/14025.html},
  abstract  = {In this paper, we will present problems that arise when trying to render legible signed texts containing mathematical discourse in Finnish Sign Language. Calculation processes in sign language are carried out using fingers, both hands and the three-dimensional neutral space in front of the signer. Specific hand movements and especially the space in front of the body function like a working memory where fingers, hands and space are used as buoys in a regular and syntactically well-defined manner when retrieving, for example, subtotals. As these calculation processes are performed in fragments of seconds with both hands that act individually, simultaniousity and multidimensionality create problems for traditional coding and notation systems used in sign language research. Conversion to glosses or translations to spoken or written text (e.g. in Finnish or English) has proven challenging and what is most important, none of these ways gives justice to this unique concept mapping and mathematical thinking in signed language.  Our proposal is an intermediary solution, a simple numeric animation while looking for a more developed, possibly a three-dimensional representation to visualise the calculation processes in signed languages.}
}

@inproceedings{karppa-etal-2014-slmotion:lrec,
  author    = {Karppa, Matti and Viitaniemi, Ville and Luzardo, Marcos and Laaksonen, Jorma and Jantunen, Tommi},
  title     = {{SLM}otion - An extensible sign language oriented video analysis tool},
  pages     = {1886--1891},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Loftsson, Hrafn and Maegaard, Bente and Mariani, Joseph and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios,},
  booktitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Reykjavik, Iceland},
  day       = {26--31},
  month     = may,
  year      = {2014},
  isbn      = {978-2-9517408-8-4},
  language  = {english},
  url       = {https://aclanthology.org/L14-1208},
  abstract  = {We present a software toolkit called SLMotion which provides a framework for automatic and semiautomatic analysis, feature extraction and annotation of individual sign language videos, and which can easily be adapted to batch processing of entire sign language corpora. The program follows a modular design, and exposes a Numpy-compatible Python application programming interface that makes it easy and convenient to extend its functionality through scripting. The program includes support for exporting the annotations in ELAN format. The program is released as free software, and is available for GNU/Linux and MacOS platforms.}
}

@inproceedings{viitaniemi-etal-2014-pot:lrec,
  author    = {Viitaniemi, Ville and Jantunen, Tommi and Savolainen, Leena and Karppa, Matti and Laaksonen, Jorma},
  title     = {{S}-pot - a benchmark in spotting signs within continuous signing},
  pages     = {1892--1897},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Loftsson, Hrafn and Maegaard, Bente and Mariani, Joseph and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios,},
  booktitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Reykjavik, Iceland},
  day       = {26--31},
  month     = may,
  year      = {2014},
  isbn      = {978-2-9517408-8-4},
  language  = {english},
  url       = {https://aclanthology.org/L14-1377},
  abstract  = {In this paper we present S-pot, a benchmark setting for evaluating the performance of automatic spotting of signs in continuous sign language videos. The benchmark includes 5539 video files of Finnish Sign Language, ground truth sign spotting results, a tool for assessing the spottings against the ground truth, and a repository for storing information on the results. In addition we will make our sign detection system and results made with it publicly available as a baseline for comparison and further developments.}
}

@inproceedings{jantunen:12003:sign-lang:lrec,
  author    = {Jantunen, Tommi and Burger, Birgitta and De Weerdt, Danny and Seilola, Irja and Wainio, Tuija},
  title     = {Experiences collecting motion capture data on continuous signing},
  pages     = {75--82},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2012} 5th Workshop on the Representation and Processing of Sign Languages: Interactions between Corpus and Lexicon},
  maintitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {27},
  month     = may,
  year      = {2012},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/12003.html},
  abstract  = {This paper describes some of the experiences the authors have had collecting continuous motion capture data on Finnish Sign Language in the motion capture laboratory of the Department of Music at the University of Jyv{\"a}skyl{\"a}, Finland. Monologue and dialogue data have been recorded with an eight-camera optical motion capture system by tracking, at a frame rate of 120 Hz, the three-dimensional locations of small ball-shaped reflective markers attached to the signer's hands, arms, head, and torso. The main question from the point of view of data recording concerns marker placement, while the main themes discussed concerning data processing include gap-filling (i.e. the process of interpolating the information of missing frames on the basis of surrounding frames) and the importing of data into ELAN for subsequent segmentation (e.g. into signs and sentences). The paper will also demonstrate how the authors have analyzed the continuous motion capture data from the kinematic perspective.}
}

@inproceedings{karppa-etal-2012-comparing:lrec,
  author    = {Karppa, Matti and Jantunen, Tommi and Viitaniemi, Ville and Laaksonen, Jorma and Burger, Birgitta and De Weerdt, Danny},
  title     = {Comparing computer vision analysis of signed language video with motion capture recordings},
  pages     = {2421--2425},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Do{\u g}an, Mehmet U{\u g}ur and Maegaard, Bente and Mariani, Joseph and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios},
  booktitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {21--27},
  month     = may,
  year      = {2012},
  isbn      = {978-2-9517408-7-7},
  language  = {english},
  url       = {https://aclanthology.org/L12-1152},
  abstract  = {We consider a non-intrusive computer-vision method for measuring the motion of a person performing natural signing in video recordings. The quality and usefulness of the method is compared to a traditional marker-based motion capture set-up. The accuracy of descriptors extracted from video footage is assessed qualitatively in the context of sign language analysis by examining if the shape of the curves produced by the different means resemble one another in sequences where the shape could be a source of valuable linguistic information. Then, quantitative comparison is performed first by correlating the computer-vision-based descriptors with the variables gathered with the motion capture equipment. Finally, multivariate linear and non-linar regression methods are applied for predicting the motion capture variables based on combinations of computer vision descriptors. The results show that even the simple computer vision method evaluated in this paper can produce promisingly good results for assisting researchers working on sign language analysis.}
}

@inproceedings{jantunen:10033:sign-lang:lrec,
  author    = {Jantunen, Tommi},
  title     = {A comparison of two linguistic sign identification methods},
  pages     = {129--132},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10033.html},
  abstract  = {This paper employs two linguistic sign identification methods -- a manual one focusing on the dominant hand and a nonmanual one focusing on the mouth -- and compares the kinds of sequences they classify as signs from a video containing continuous signing. The study is motivated by two projects, of which one investigates the ontological nature of the sign and the other aims to develop an automatic sign recognition tool. In the study, both methods were able to associate all the free semantic-functional elements in the data with signs. However, in the nonmanual method the overall number of identified signs was lower because the stretching of the mouth movement of the semantic element over the following pointing meant that the combinations of semantic elements and pointings were counted as single signs. Moreover, signs identified by the nonmanual method were longer than those identified by the manual method. The results from the nonmanual method agree with the claim that phrase internal sequences of semantic elements and pointings are lexical head plus clitic combinations. Consequently, it is suggested that pointings in such contexts do not need to be independently detected by the automatic sign recognition tool.}
}

@inproceedings{wheatley:10040:sign-lang:lrec,
  author    = {Wheatley, Mark and Pabsch, Annika},
  title     = {Sign Language in {Europe}},
  pages     = {251--254},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10040.html},
  abstract  = {Sign languages across the globe are fully-fledged languages that differ between Deaf communities throughout Europe and the world. A recent survey by the European Union of the Deaf gathered that there are about 650,000 sign language users in the EU for whom using a sign language is the only way to communicate and have equal access. It is therefore crucial to legally recognise national sign languages. Being treated equally without prejudice also with regards to language is a basic Human Right as postulated in the UN Declaration of Human Rights. Other rights, such as the right to education and a fair trial can only be guaranteed if sign languages are recognised as distinct languages in order to provide sign language interpreters and education in sign language. At EU level, a number of documents and Resolutions have been adopted but so far only three European countries have recognised sign language at constitutional level: Austria, Finland and Portugal. Other countries, such as Hungary and Spain have taken other legal measures to protect their sign languages. Although Europe's sign languages enjoy some recognition, sign language users across Europe are still lacking legal protection at the same level as other minorities.}
}

@inproceedings{koskela:08002:sign-lang:lrec,
  author    = {Koskela, Markus and Laaksonen, Jorma and Jantunen, Tommi and Takkinen, Ritva and Rain{\`o}, P{\"a}ivi and Raike, Antti},
  title     = {Content-Based Video Analysis and Access for {Finnish} {Sign} {Language} -- A Multidisciplinary Research Project},
  pages     = {101--104},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Hanke, Thomas and Thoutenhoofd, Ernst D. and Zwitserlood, Inge},
  booktitle = {Proceedings of the {LREC2008} 3rd Workshop on the Representation and Processing of Sign Languages: Construction and Exploitation of Sign Language Corpora},
  maintitle = {6th International Conference on Language Resources and Evaluation ({LREC} 2008)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marrakech, Morocco},
  day       = {1},
  month     = jun,
  year      = {2008},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/08002.html},
  abstract  = {In this research project, computer vision techniques for recognition and analysis of gestures and facial expressions from video will be developed and the techniques will be applied for processing of sign language. This is a collaborative project between four partners: Helsinki University of Technology, University of Art and Design, University of Jyv{\"a}skyl{\"a}, and the Finnish Association of the Deaf. It has several objectives of which four are presented in more detail in this poster.
\par
The first objective is to develop novel methods for content-based processing and analysis of sign language video recorded using a single camera. The PicSOM retrieval system framework developed by the Helsinki University of Technology regarding content-based analysis of multimedia data will be adapted to continuous signing to facilitate automatic and semi-automatic analysis of sign language videos.
\par
The second objective of the project is to develop a computer system which can both (i) automatically indicate meaningful signs and other gesture-like sequences from a video signal which contains natural sign language data, and (ii) disregard parts of the signal which do not count as such sequences. In other words, the goal is to develop an automatized mechanism which can identify sign and gesture boundaries and indicate, from the video, the sequences that correspond to signs and gestures. The system is not expected to be able to tell the meanings of these sequences.
\par
An automatic segmentation of recorded continuous-signing sign language is an important first step in the automatic processing of sign language videos and online applications. It is our hypothesis that the temporal boundaries of different sign gestures can be detected and signs and non-signs (intersign transitions, other movements) can be classified using a combination of a hand motion detector, still image multimodal analysis, facial expression analysis and and other non- manual signal recognition. The PicSOM system inherently supports such fusion of different features.
\par
The third objective is linked to generating an example-based corpus for FinSL. There exist increasing amounts of recorded video data of the language, but almost no means for utilizing it efficiently due to missing indexing and lack of methods for content-based access. The studied methods could facilitate a leap forward in founding the corpus.
\par
The fourth objective is a feasibility study for the implementation of mobile video access to sign language dictionaries and corpora. Currently an existing dictionary can be searched by giving a rough description of the location, motion and handform of the sign. The automatic content-based analysis methods could be applied to online mobile phone videos, thus enabling sign language access to dictionaries and corpora.}
}

