@inproceedings{karpov:12012:sign-lang:lrec,
  author    = {Karpov, Alexey and {\v Z}elezn{\'y}, Milo{\v s}},
  title     = {Towards {Russian} {Sign} {Language} Synthesizer: Lexical Level},
  pages     = {83--86},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2012} 5th Workshop on the Representation and Processing of Sign Languages: Interactions between Corpus and Lexicon},
  maintitle = {8th International Conference on Language Resources and Evaluation ({LREC} 2012)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Istanbul, Turkey},
  day       = {27},
  month     = may,
  year      = {2012},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/12012.html},
  abstract  = {In this paper, we present a survey of existing Russian sign language electronic and printed resources and dictionaries. The problem of differences in dialects of Russian sign language used in various local communities of Russia and some other CIS countries is discussed in the paper. Also the first version of a computer system for synthesis of elements of Russian sign language (signed Russian and fingerspelling) is presented in the given paper. It is a universal multi-modal synthesizer both for Russian spoken language and signed Russian that is based on a model of animated 3D signing avatar. The proposed system inputs data in the text form and converts them into the audio-visual modality, synchronizing visual manual gestures and articulation with audio speech signal. Generated audio-visual signed Russian speech and spoken language is a fusion of dynamic gestures shown by the avatar{\'i}s both hands, lip movements articulating words and auditory speech, so the multimodal output is available both for the deaf and hearing-able people.}
}

@inproceedings{kagirov-etal-2020-theruslan:lrec,
  author    = {Kagirov, Ildar and Ivanko, Denis and Ryumin, Dmitry and Axyonov, Alexander and Karpov, Alexey},
  title     = {{T}he{R}u{SL}an: Database of {R}ussian {S}ign {L}anguage},
  pages     = {6079--6085},
  editor    = {Calzolari, Nicoletta and Fr{\'e}d{\'e}ric B{\'e}chet and Blache, Philippe and Choukri, Khalid and Cieri, Christopher and Declerck, Thierry and Goggi, Sara and Isahara, Hitoshi and Maegaard, Bente and Mariani, Joseph and Mazo, H{\'e}l{\`e}ne and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios},
  booktitle = {12th International Conference on Language Resources and Evaluation ({LREC} 2020)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {11--16},
  month     = may,
  year      = {2020},
  isbn      = {979-10-95546-34-4},
  language  = {english},
  url       = {https://aclanthology.org/2020.lrec-1.746},
  abstract  = {In this paper, a new Russian sign language multimedia database TheRuSLan is presented. The database includes lexical units (single words and phrases) from Russian sign language within one subject area, namely, ``food products at the supermarket'', and was collected using MS Kinect 2.0 device including both FullHD video and the depth map modes, which provides new opportunities for the lexicographical description of the Russian sign language vocabulary and enhances research in the field of automatic gesture recognition. Russian sign language has an official status in Russia, and over 120,000 deaf people in Russia and its neighboring countries use it as their first language. Russian sign language has no writing system, is poorly described and belongs to the low-resource languages. The authors formulate the basic principles of annotation of sign words, based on the collected data, and reveal the content of the collected database. In the future, the database will be expanded and comprise more lexical units. The database is explicitly made for the task of creating an automatic system for Russian sign language recognition.}
}

