Essential grammatical information is conveyed in signed languages by clusters of events involving facial expressions and movements of the head and upper body. This poses a significant challenge for computer-based sign language recognition. Here, we present new methods for the recognition of nonmanual grammatical markers in American Sign Language (ASL) based on: (1) new 3D tracking methods for the estimation of 3D head pose and facial expressions to determine the relevant low-level features; (2) methods for higher-level analysis of component events (raised/lowered eyebrows, periodic head nods and head shakes) used in grammatical markings―with differentiation of temporal phases (onset, core, offset, where appropriate), analysis of their characteristic properties, and extraction of corresponding features; (3) a 2-level learning framework to combine low- and high-level features of differing spatio-temporal scales. This new approach achieves significantly better tracking and recognition results than our previous methods.
@inproceedings{liu-etal-2014-3d:lrec,
author = {Liu, Bo and Liu, Jingjing and Yu, Xiang and Metaxas, Dimitris and Neidle, Carol},
title = {3{D} Face Tracking and Multi-Scale, Spatio-temporal Analysis of Linguistically Significant Facial Expressions and Head Positions in {ASL}},
pages = {4512--4518},
editor = {Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Loftsson, Hrafn and Maegaard, Bente and Mariani, Joseph and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios,},
booktitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
publisher = {{European Language Resources Association (ELRA)}},
address = {Reykjavik, Iceland},
day = {26--31},
month = may,
year = {2014},
isbn = {978-2-9517408-8-4},
language = {english},
url = {http://www.lrec-conf.org/proceedings/lrec2014/pdf/370_Paper.pdf}
}