@inproceedings{efthimiou:12025:sign-lang:lrec,
  author    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Glauert, John and Bowden, Richard and Braffort, Annelies and Collet, Christophe and Maragos, Petros and Lefebvre-Albaret, Fran{\c c}ois},
  title     = {Sign Language technologies and resources of the {Dicta-Sign} project},
  pages     = {37--44},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2012} 5th Workshop on the Representation and Processing of Sign Languages: Interactions between Corpus and Lexicon},
  maintitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {27},
  month     = may,
  year      = {2012},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/12025.html},
  abstract  = {Here we present the outcomes of Dicta-Sign FP7-ICT project. Dicta-Sign researched ways to enable communication between Deaf individuals through the development of human-computer interfaces (HCI) for Deaf users, by means of Sign Language. It has researched and developed recognition and synthesis engines for sign languages (SLs) that have brought sign recognition and generation technologies significantly closer to authentic signing. In this context, Dicta-Sign has developed several technologies demonstrated via a sign language aware Web 2.0, combining work from the fields of sign language recognition, sign language animation via avatars and sign language resources and language models development, with the goal of allowing Deaf users to make, edit, and review avatar-based sign language contributions online, similar to the way people nowadays make text-based contributions on the Web.}
}

@inproceedings{lefebvrealbaret:08007:sign-lang:lrec,
  author    = {Lefebvre-Albaret, Fran{\c c}ois and Gianni, Fr{\'e}d{\'e}rick and Dalle, Patrice},
  title     = {Toward an computer-aided sign segmentation},
  pages     = {123--128},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Hanke, Thomas and Thoutenhoofd, Ernst D. and Zwitserlood, Inge},
  booktitle = {Proceedings of the {LREC2008} 3rd Workshop on the Representation and Processing of Sign Languages: Construction and Exploitation of Sign Language Corpora},
  maintitle = {6th International Conference on Language Resources and Evaluation ({LREC} 2008)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marrakech, Morocco},
  day       = {1},
  month     = jun,
  year      = {2008},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/08007.html},
  abstract  = {Processing sentences of a sign language corpus requires a first step of temporal segmentation, which is long and tedious. To realize this segmentation more quickly, we propose an innovating method of computer-aided segmentation. This method processes motions of the dominated and dominating hands during the sign realisation. The video treatments are applied in four steps. The first one consists in tracking the hands in a video sequence using particles filtering. Then, in a second step, an operator watches the video sequence and indicates for each sign a time stamp during the sign realisation. Using this information and the trajectories of each hand, our method is able to find the beginning and the end of each sign in a third step. At the end, the operator can eventually apply some rectifications and validate the segmentation.
\par
The presented article explains the different steps, from the calculation of the head and hands 2D positions to the computer-aided determination of the temporal segmentation of the signs. The segmentation exploits a model of French Sign Language and focuses especially on the characteristics of manual sign movements. Our method detects in the video several dynamic properties as the relative hands movement (symmetries, static hands) and the movement primitives (simple or double repetition, uniform or accelerated straight movement). We also detect time spaces between two consecutive signs. Those transitions must be economical, considering the necessary energy to realize these: a movement with a complex realization will contain a sign. Those elements are then combined to each other to determine the most plausible temporal segmentation of the signed sentences. The result can be represented as a succession of signs and transitions segments.
\par
Other observations can be taken into account to obtain the temporal segmentation. We can mention the determination of the elbows 2D positions, the characterization of hand configurations and the head orientation measurement. We describe how those elements could be used to improve the segmentation reliability.
\par
The proposed method is based on motion analysis and does not use any knowledge about the words used in the processed sentences. Using the characteristics shared by the majority of French Sign Language's signs, it is possible to detect not only standard signs but also other manual iconic signs.
\par
Our segmentation results are finally compared with a traditional manual segmentation produced with an annotation software named AnColin. This comparison exhibits several possible error sources. We focus on the problem of granularity and precision of the segmentation. We also discuss about other qualitative problems such as the detection criteria of the signs start and end. The evaluation protocol of a temporal segmentation is also adressed. Finally we will raise several problems to overcome, to realize a fully automatic segmentation.}
}

@inproceedings{lefebvre-albaret-dalle-2010-video:lrec,
  author    = {Lefebvre-Albaret, Fran{\c c}ois and Dalle, Patrice},
  title     = {Video Retrieval in Sign Language Videos : How to Model and Compare Signs?},
  pages     = {3049--3054},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Maegaard, Bente and Mariani, Joseph and Odijk, Jan and Piperidis, Stelios and Rosner, Mike and Tapias, Daniel},
  booktitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {17--23},
  month     = may,
  year      = {2010},
  isbn      = {978-2-9517408-6-0},
  language  = {english},
  url       = {https://aclanthology.org/L10-1116},
  abstract  = {This paper deals with the problem of finding sign occurrences in a sign language (SL) video. It begins with an analysis of sign models and the way they can take into account the sign variability. Then, we review the most popular technics dedicated to automatic sign language processing and we focus on their adaptation to model sign variability. We present a new method to provide a parametric description of the sign as a set of continuous and discrete parameters. Signs are classified according to there categories (ballistic movements, circles ...), the symmetry between the hand movements, hand absolute and relative locations. Membership grades to sign categories and continuous parameter comparisons can be combined to estimate the similarity between two signs. We set out our system and we evaluate how much time can be saved when looking for a sign in a french sign language video. By now, our formalism only uses hand 2D locations, we finally discuss about the way of integrating other parameters as hand shape or facial expression in our framework.}
}

