The automatic translation of sign language videos into transcribed texts is rarely approached in its whole, as it implies to finely model the grammatical mechanisms that govern these languages. The presented work is a first step towards the interpretation of French sign language (LSF) by specifically targeting iconicity and spatial referencing. This paper describes the LSF-SHELVES corpus as well as the original technology that was designed and implemented to collect it. Our goal is to use deep learning methods to circumvent the use of models in spatial referencing recognition. In order to obtain training material with sufficient variability, we designed a light-weight (and low-cost) capture protocol that enabled us to collect data from a large panel of LSF signers. This protocol involves the use of a portable device providing a 3D skeleton, and of a software developed specifically for this application to facilitate the post-processing of handshapes. The LSF-SHELVES includes simple and compound iconic and spatial dynamics, organized in 6 complexity levels, representing a total of 60 sequences signed by 15 LSF signers.
@inproceedings{mertz-etal-2022-motion:lrec,
author = {Mertz, Cl{\'e}mence and Barreaud, Vincent and Le Naour, Thibaut and Lolive, Damien and Gibet, Sylvie},
title = {A Low-Cost Motion Capture Corpus in {French} {Sign} {Language} for Interpreting Iconicity and Spatial Referencing Mechanisms},
pages = {2488--2497},
editor = {Calzolari, Nicoletta and B{\'e}chet, Fr{\'e}d{\'e}ric and Blache, Philippe and Choukri, Khalid and Cieri, Christopher and Declerck, Thierry and Goggi, Sara and Isahara, Hitoshi and Maegaard, Bente and Mariani, Joseph and Mazo, H{\'e}l{\`e}ne and Odijk, Jan and Piperidis, Stelios},
booktitle = {13th International Conference on Language Resources and Evaluation ({LREC} 2022)},
publisher = {{European Language Resources Association (ELRA)}},
address = {Marseille, France},
day = {20--25},
month = jun,
year = {2022},
isbn = {979-10-95546-72-6},
language = {english},
url = {http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.265}
}