This paper reports on work related to the modelling of Human-Robot Communication on the basis of multimodal and multisensory human behaviour analysis. A primary focus in this framework of analysis is the definition of semantics of human actions in interaction, their capture and their representation in terms of behavioural patterns that, in turn, feed a multimodal human-robot communication system. Semantic analysis encompasses both oral and sign languages, as well as both verbal and non-verbal communicative signals to achieve an effective, natural interaction between elderly users with slight walking and cognitive inability and an assistive robotic platform.
@inproceedings{fotinea-etal-2016-multimodal:lrec,
author = {Fotinea, Stavroula-Evita and Efthimiou, Eleni and Koutsombogera, Maria and Dimou, Athanasia-Lida and Goulas, Theodore and Vasilaki, Kyriaki},
title = {Multimodal Resources for Human-Robot Communication Modelling},
pages = {3455--3460},
editor = {Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Goggi, Sara and Grobelnik, Marko and Maegaard, Bente and Mariani, Joseph and Mazo, H{\'e}l{\`e}ne and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios},
booktitle = {10th International Conference on Language Resources and Evaluation ({LREC} 2016)},
publisher = {{European Language Resources Association (ELRA)}},
address = {Portoro{\v z}, Slovenia},
day = {23--28},
month = may,
year = {2016},
isbn = {978-2-9517408-9-1},
language = {english},
url = {https://aclanthology.org/L16-1551}
}