@inproceedings{inan:26015:sign-lang:lrec,
  author    = {Inan, Mert and Imai, Saki and Marshall, Anna and Karel, Tessa and Alikhani, Malihe},
  title     = {Movement Coherence in High Visual Load Environments: Implications for Attention in Mixed-Hearing Classes},
  pages     = {226--238},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC2026} 12th Workshop on the Representation and Processing of Sign Languages: Language in Motion},
  maintitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-82-1},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/26015.html},
  abstract  = {Signed interpretation in movement based instruction creates high visual load environments in which spoken language, sign language, and physical demonstration compete for the same perceptual channel. We present a participatory multimodal observational study of mixed hearing movement and mindfulness classes in which Deaf, Hard of Hearing, and hearing participants practice together. Based on synchronized video recordings and instructor interviews, we examine how alignment across demonstration, signed instruction, and bodily execution is achieved and restored in real time. Drawing on theories of grounding, repair, and sign language interaction, we conceptualize movement coherence as alignment across these parallel streams and describe how breakdowns trigger observable attention shifts and distributed repair across participants, interpreters, and instructors. Across sessions, we identify recurrent coordination strategies including peer checking, freeze and scan, interpreter repositioning, tactile cueing, and pacing adjustment. Our findings provide an empirically grounded account of grounding under attentional constraint in inclusive embodied settings, with implications for sign language interpretation, multimodal discourse, and the design of accessible movement instruction. This paper includes deidentified materials derived from recorded sessions, including selected keyframes, structured interactional annotations, and anonymized instructor and participant survey responses.}
}

@inproceedings{imai-etal-2026-shape:lrec,
  author    = {Imai, Saki and Kezar, Lee and Aichler, Laurel and Inan, Mert and Walker, Erin and Wooten, Alicia and Quandt, Lorna Cobban and Alikhan, Malihe},
  title     = {How Pragmatics Shape Articulation: A Computational Case Study in {STEM} {ASL} Discourse},
  pages     = {8476--8490},
  editor    = {Piperidis, Stelios and Bel, N{\'u}ria and van den Heuvel, Henk and Ide, Nancy and Krek, Simon and Toral, Antonio},
  booktitle = {15th International Conference on Language Resources and Evaluation ({LREC} 2026)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Palma, Mallorca, Spain},
  day       = {11--16},
  month     = may,
  year      = {2026},
  isbn      = {978-2-493814-49-4},
  language  = {english},
  url       = {https://lrec.elra.info/lrec2026-main-669},
  doi       = {10.63317/2wjnaaabgz4d},
  abstract  = {Most state-of-the-art sign language models are trained on interpreter or isolated vocabulary data, which overlooks the variability that characterizes natural dialogue. However, human communication dynamically adapts to contexts and interlocutors through spatiotemporal changes and articulation style. This specifically manifests itself in educational settings, where novel vocabularies are used by teachers, and students. To address this gap, we collect a motion capture dataset of American Sign Language (ASL) STEM (Science, Technology, Engineering, and Mathematics) dialogue that enables quantitative comparison between dyadic interactive signing, solo signed lecture, and interpreted articles. Using continuous kinematic features, we disentangle dialogue-specific entrainment from individual effort reduction and show spatiotemporal changes across repeated mentions of STEM terms. On average, dialogue signs are 24.6{\%}-44.6{\%} shorter in duration than the isolated signs, and show significant reductions absent in monologue contexts. Finally, we evaluate sign embedding models on their ability to recognize STEM signs and approximate how entrained the participants become over time. Our study bridges linguistic analysis and computational modeling to understand how pragmatics shape sign articulation and its representation in sign language technologies.}
}

