@inproceedings{jantunen:16006:sign-lang:lrec,
  author    = {Jantunen, Tommi and Pippuri, Outi and Wainio, Tuija and Puupponen, Anna and Laaksonen, Jorma},
  title     = {Annotated video corpus on {FinSL} with {Kinect} and computer-vision data},
  pages     = {93--100},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2016} 7th Workshop on the Representation and Processing of Sign Languages: Corpus Mining},
  maintitle = {10th International Conference on Language Resources and Evaluation ({LREC} 2016)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Portoro{\v z}, Slovenia},
  day       = {28},
  month     = may,
  year      = {2016},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/16006.html},
  abstract  = {This paper presents an annotated video corpus of Finnish Sign Language (FinSL) to which has been appended Kinect and computer-vision data. The video material consists of signed retellings of the stories Snowman and Frog, where are you?, elicited from 12 native FinSL signers in a dialogue setting. The recordings were carried out with 6 cameras directed toward the signers from different angles, and 6 signers were also recorded with one Kinect motion and depth sensing input device. All the material has been annotated in ELAN for signs, translations, grammar and prosody. To further facilitate research into FinSL prosody, computer-vision data describing the head movements and the aperture changes of the eyes and mouth of all the signers has been added to the corpus. The total duration of the material is 45 minutes and that part of it that is permitted by research consents is available for research purposes via the LAT online service of the Language Bank of Finland. The paper briefly demonstrates the linguistic use of the corpus.}
}

@inproceedings{luzardo:14021:sign-lang:lrec,
  author    = {Luzardo, Marcos and Viitaniemi, Ville and Karppa, Matti and Laaksonen, Jorma and Jantunen, Tommi},
  title     = {Estimating head pose and state of facial elements for sign language video},
  pages     = {105--112},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2014} 6th Workshop on the Representation and Processing of Sign Languages: Beyond the Manual Channel},
  maintitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Reykjavik, Iceland},
  day       = {31},
  month     = may,
  year      = {2014},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/14021.html},
  abstract  = {In this work we present methods for automatic estimation of non-manual gestures in sign language videos. More specifically, we study the estimation of three head pose angles (yaw, pitch, roll) and the state of facial elements (eyebrow position, eye openness, and mouth state). This kind of estimation facilitates automatic annotation of sign language videos and promotes more prolific production of annotated sign language corpora. The proposed estimation methods are incorporated in our publicly available SLMotion software package for sign language video processing and analysis. Our method implements a model-based approach: for head pose we employ facial landmarks and skins masks as features, and estimate yaw and pitch angles by regression and roll using a geometric measure; for the state of facial elements we use the geometric information of facial elements of the face as features, and estimate quantized states using a classification algorithm. We evaluate the results of our proposed methods in quantitative and qualitative experiments.}
}

@inproceedings{puupponen:14009:sign-lang:lrec,
  author    = {Puupponen, Anna and Jantunen, Tommi and Takkinen, Ritva and Wainio, Tuija and Pippuri, Outi},
  title     = {Taking non-manuality into account in collecting and analyzing {Finnish} {Sign} {Language} video data},
  pages     = {143--148},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2014} 6th Workshop on the Representation and Processing of Sign Languages: Beyond the Manual Channel},
  maintitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Reykjavik, Iceland},
  day       = {31},
  month     = may,
  year      = {2014},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/14009.html},
  abstract  = {This paper describes our attention to research into non-manuals when collecting a large body of video data in Finnish Sign Language (FinSL). We will first of all give an overview of the data-collecting process and of the choices that we made in order for the data to be usable in research into non-manual activity (e.g. camera arrangement, video compression, and Kinect technology). Secondly, the paper will outline our plans for the analysis of the non-manual features of this data. We discuss the technological methods we plan to use in our investigation of non-manual features (i.e. computer-vision based methods) and give examples of the type of results that this kind of approach can provide us with.}
}

@inproceedings{karppa-etal-2014-slmotion:lrec,
  author    = {Karppa, Matti and Viitaniemi, Ville and Luzardo, Marcos and Laaksonen, Jorma and Jantunen, Tommi},
  title     = {{SLM}otion - An extensible sign language oriented video analysis tool},
  pages     = {1886--1891},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Loftsson, Hrafn and Maegaard, Bente and Mariani, Joseph and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios,},
  booktitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Reykjavik, Iceland},
  day       = {26--31},
  month     = may,
  year      = {2014},
  isbn      = {978-2-9517408-8-4},
  language  = {english},
  url       = {https://aclanthology.org/L14-1208},
  abstract  = {We present a software toolkit called SLMotion which provides a framework for automatic and semiautomatic analysis, feature extraction and annotation of individual sign language videos, and which can easily be adapted to batch processing of entire sign language corpora. The program follows a modular design, and exposes a Numpy-compatible Python application programming interface that makes it easy and convenient to extend its functionality through scripting. The program includes support for exporting the annotations in ELAN format. The program is released as free software, and is available for GNU/Linux and MacOS platforms.}
}

@inproceedings{viitaniemi-etal-2014-pot:lrec,
  author    = {Viitaniemi, Ville and Jantunen, Tommi and Savolainen, Leena and Karppa, Matti and Laaksonen, Jorma},
  title     = {{S}-pot - a benchmark in spotting signs within continuous signing},
  pages     = {1892--1897},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Loftsson, Hrafn and Maegaard, Bente and Mariani, Joseph and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios,},
  booktitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Reykjavik, Iceland},
  day       = {26--31},
  month     = may,
  year      = {2014},
  isbn      = {978-2-9517408-8-4},
  language  = {english},
  url       = {https://aclanthology.org/L14-1377},
  abstract  = {In this paper we present S-pot, a benchmark setting for evaluating the performance of automatic spotting of signs in continuous sign language videos. The benchmark includes 5539 video files of Finnish Sign Language, ground truth sign spotting results, a tool for assessing the spottings against the ground truth, and a repository for storing information on the results. In addition we will make our sign detection system and results made with it publicly available as a baseline for comparison and further developments.}
}

