@inproceedings{miyazaki:24004:sign-lang:lrec,
  author    = {Miyazaki, Taro and Tan, Sihan and Uchida, Tsubasa and Kaneko, Hiroyuki},
  title     = {Sign Language Translation with Gloss Pair Encoding},
  pages     = {32--38},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
  maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
  publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
  address   = {Torino, Italy},
  day       = {25},
  month     = may,
  year      = {2024},
  isbn      = {978-2-493814-30-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24004.html},
  abstract  = {Because sign languages are the first language for those who are born deaf or who lost their hearing in early childhood, it is better to use sign languages rather than transcribed spoken language to provide important information to these people. We have been developing a sign language computer graphics generation system to provide information to deaf people, and in this paper, we present a translation method from spoken language to sign language that can be used in the system. In general, since the number of glosses used when transcribing sign language is limited, a single meaning is often expressed by a combination of multiple sign words, i.e., the word "library" is expressed in Japanese Sign Language with two words: "book" and "building." To merge these expressions into one token, we propose gloss pair encoding (GPE), which is inspired by bite pair encoding (BPE). This technique is expected to enable more accurate handling of expressions that have a single meaning in multiple sign words. We also show that it is effective as data augmentation on the sign language side in sign language translation, which has not been done much so far.}
}

@inproceedings{uchida:24011:sign-lang:lrec,
  author    = {Uchida, Tsubasa and Miyazaki, Taro and Kaneko, Hiroyuki},
  title     = {{HamNoSys-based} Motion Editing Method for Sign Language},
  pages     = {90--99},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
  maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
  publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
  address   = {Torino, Italy},
  day       = {25},
  month     = may,
  year      = {2024},
  isbn      = {978-2-493814-30-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24011.html},
  abstract  = {We have developed a Japanese-to-Japanese Sign Language (JSL) translation system to expand sign language services for the Deaf. Although recording the motion data of isolated JSL by motion capture (MoCap) and avatar animation driven by MoCap data is effective for capturing the more natural movements of sign language, the disadvantage is that they lack the flexibility to reproduce the contextual modification of signs. We therefore propose a sign language motion data editing method based on the Hamburg Notation System for Sign Languages (HamNoSys) for use in a hybrid system that combines a MoCap data-driven technique and a phonological generation technique. The proposed method enables the editing of handshape, hand orientation, and location of the motion data based on HamNoSys components to generate contextual modifications for motion-captured citation form signs in translated gloss sequences. Experimental results demonstrate that our method achieves the flexibility to generate contextual modifications and new movements while preserving natural human-like movements without the need for additional MoCap processes.}
}

@inproceedings{inoue:24022:sign-lang:lrec,
  author    = {Inoue, Jundai and Miwa, Makoto and Sasaki, Yutaka and Hara, Daisuke},
  title     = {Enhancing Syllabic Component Classification in {Japanese} {Sign} {Language} by Pre-training on Non-Japanese Sign Language Data},
  pages     = {181--188},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Mesch, Johanna and Schulder, Marc},
  booktitle = {Proceedings of the {LREC-COLING} 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
  maintitle = {2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation ({LREC-COLING} 2024)},
  publisher = {{ELRA Language Resources Association (ELRA) and the International Committee on Computational Linguistics (ICCL)}},
  address   = {Torino, Italy},
  day       = {25},
  month     = may,
  year      = {2024},
  isbn      = {978-2-493814-30-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/24022.html},
  abstract  = {In sign languages, syllables are composed of syllabic components consisting of locations, movements, and handshapes; however, the rules of combinations of these syllabic components are still unclear. Decomposing existing syllables into syllabic components is necessary to clarify the rules. This study aims to construct an automatic syllabic component classification system for Japanese Sign Language (JSL) using deep learning. We propose a pre-training method using non-Japanese Sign Language data to achieve high performance in classifying syllabic components in a situation where the number of training JSL videos is limited. We also investigate multitask learning for syllabic component classification to share the information among the syllabic components. Experiments on the syllabic component classification for the dominant hand show that 1) pre-training with the American Sign Language (ASL) dataset improved classification performance for the movement and handshape components and 2) multitask learning did not contribute to the performance improvement of syllabic component classification. We also investigated the influence of pre-training on syllabic component classification by visualizing critical elements in videos to predict the components.}
}

@inproceedings{bono:20012:sign-lang:lrec,
  author    = {Bono, Mayumi and Sakaida, Rui and Okada, Tomohiro and Miyao, Yusuke},
  title     = {Utterance-Unit Annotation for the {JSL} Dialogue Corpus: Toward a Multimodal Approach to Corpus Linguistics},
  pages     = {13--20},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2020} 9th Workshop on the Representation and Processing of Sign Languages: Sign Language Resources in the Service of the Language Community, Technological Challenges and Application Perspectives},
  maintitle = {12th International Conference on Language Resources and Evaluation ({LREC} 2020)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {16},
  month     = may,
  year      = {2020},
  isbn      = {979-10-95546-54-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/20012.html},
  abstract  = {This paper describes a method for annotating the Japanese Sign Language (JSL) dialogue corpus. We developed a way to identify interactional boundaries and define a `utterance unit' in sign language using various multimodal features accompanying signing. The utterance unit is an original concept for segmenting and annotating sign language dialogue referring to signer's native sense from the perspectives of Conversation Analysis (CA) and Interaction Studies. First of all, we postulated that we should identify a fundamental concept of interaction-specific unit for understanding interactional mechanisms, such as turn-taking (Sacks et al. 1974), in sign-language social interactions.  Obviously, it does should not relying on a spoken language writing system for storing signings in corpora and making translations. We believe that there are two kinds of possible applications for utterance units: one is to develop corpus linguistics research for both signed and spoken corpora; the other is to build an informatics system that includes, but is not limited to, a machine translation system for sign languages.}
}

@inproceedings{miyazaki:20002:sign-lang:lrec,
  author    = {Miyazaki, Taro and Morita, Yusuke and Sano, Masanori},
  title     = {Machine Translation from Spoken Language to Sign Language using Pre-trained Language Model as Encoder},
  pages     = {139--144},
  editor    = {Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna},
  booktitle = {Proceedings of the {LREC2020} 9th Workshop on the Representation and Processing of Sign Languages: Sign Language Resources in the Service of the Language Community, Technological Challenges and Application Perspectives},
  maintitle = {12th International Conference on Language Resources and Evaluation ({LREC} 2020)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marseille, France},
  day       = {16},
  month     = may,
  year      = {2020},
  isbn      = {979-10-95546-54-2},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/20002.html},
  abstract  = {Sign language is the first language for those who were born deaf or lost their hearing in early childhood, so such individuals require services provided with sign language. To achieve flexible open-domain services with sign language, machine translations into sign language are needed. Machine translations generally require large-scale training corpora, but there are only small corpora for sign language. To overcome this data-shortage scenario, we developed a method that involves using a pre-trained language model of spoken language as the initial model of the encoder of the machine translation model. We evaluated our method by comparing it to baseline methods, including phrase-based machine translation, using only 130,000 phrase pairs of training data. Our method outperformed the baseline method, and we found that one of the reasons of translation error is from pointing, which is a special feature used in sign language. We also conducted trials to improve the translation quality for pointing. The results are somewhat disappointing, so we believe that there is still room for improving translation quality, especially for pointing.}
}

@inproceedings{brock:18012:sign-lang:lrec,
  author    = {Brock, Heike and Rengot, Juliette and Nakadai, Kazuhiro},
  title     = {Augmenting Sparse Corpora for Enhanced Sign Language Recognition and Generation},
  pages     = {15--22},
  editor    = {Bono, Mayumi and Efthimiou, Eleni and Fotinea, Stavroula-Evita and Hanke, Thomas and Hochgesang, Julie A. and Kristoffersen, Jette and Mesch, Johanna and Osugi, Yutaka},
  booktitle = {Proceedings of the {LREC2018} 8th Workshop on the Representation and Processing of Sign Languages: Involving the Language Community},
  maintitle = {11th International Conference on Language Resources and Evaluation ({LREC} 2018)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Miyazaki, Japan},
  day       = {12},
  month     = may,
  year      = {2018},
  isbn      = {979-10-95546-01-6},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/18012.html},
  abstract  = {The collection of signed utterances for recognition and generation of Sign Language (SL) is a costly and labor-intensive task. As a result, SL corpora are usually considerably smaller than their spoken language or image data counterparts. This is problematic, since the accuracy and applicability of a neural network depends largely on the quality and amount of its underlying training data. Common data augmentation strategies to increase the number of available training data are usually not applicable to the spatially and temporally constrained motion sequences of a SL corpus. In this paper, we therefore discuss possible data manipulation methods on the base of a collection of motion-captured SL sentence expressions. Evaluation of differently trained network architectures shows a significant reduction of overfitting by inclusion of the augmented data. Simultaneously, the accuracy of both sign recognition and generation was improved, indicating that the proposed data augmentation methods are beneficial for constrained and sparse data sets.}
}

@inproceedings{yu-etal-2018-sign:lrec,
  author    = {Yu, Shi and Geraci, Carlo and Abner, Natasha},
  title     = {Sign Languages and the Online World Online Dictionaries {\&} Lexicostatistics},
  pages     = {4235--4240},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Cieri, Christopher and Declerck, Thierry and Goggi, Sara and Hasida, Koiti and Isahara, Hitoshi and Maegaard, Bente and Mariani, Joseph and Mazo,  H{\'e}l{\`e}ne and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios and Tokunaga, Takenobu},
  booktitle = {11th International Conference on Language Resources and Evaluation ({LREC} 2018)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Miyazaki, Japan},
  day       = {7--12},
  month     = may,
  year      = {2018},
  isbn      = {979-10-95546-00-9},
  language  = {english},
  url       = {https://aclanthology.org/L18-1668}
}

@inproceedings{brock-nakadai-2018-deep:lrec,
  author    = {Brock, Heike and Nakadai, Kazuhiro},
  title     = {Deep {JSLC}: A Multimodal Corpus Collection for Data-driven Generation of {J}apanese {S}ign {L}anguage Expressions},
  pages     = {4247--4252},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Cieri, Christopher and Declerck, Thierry and Goggi, Sara and Hasida, Koiti and Isahara, Hitoshi and Maegaard, Bente and Mariani, Joseph and Mazo,  H{\'e}l{\`e}ne and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios and Tokunaga, Takenobu},
  booktitle = {11th International Conference on Language Resources and Evaluation ({LREC} 2018)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Miyazaki, Japan},
  day       = {7--12},
  month     = may,
  year      = {2018},
  isbn      = {979-10-95546-00-9},
  language  = {english},
  url       = {https://aclanthology.org/L18-1670}
}

@inproceedings{bono-etal-2014-colloquial:lrec,
  author    = {Bono, Mayumi and Kikuchi, Kouhei and Cibulka, Paul and Osugi, Yutaka},
  title     = {A Colloquial Corpus of {J}apanese {S}ign {L}anguage: Linguistic Resources for Observing Sign Language Conversations},
  pages     = {1898--1904},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Loftsson, Hrafn and Maegaard, Bente and Mariani, Joseph and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios,},
  booktitle = {9th International Conference on Language Resources and Evaluation ({LREC} 2014)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Reykjavik, Iceland},
  day       = {26--31},
  month     = may,
  year      = {2014},
  isbn      = {978-2-9517408-8-4},
  language  = {english},
  url       = {https://aclanthology.org/L14-1253},
  abstract  = {We began building a corpus of Japanese Sign Language (JSL) in April 2011. The purpose of this project was to increase awareness of sign language as a distinctive language in Japan. This corpus is beneficial not only to linguistic research but also to hearing-impaired and deaf individuals, as it helps them to recognize and respect their linguistic differences and communication styles. This is the first large-scale JSL corpus developed for both academic and public use. We collected data in three ways: interviews (for introductory purposes only), dialogues, and lexical elicitation. In this paper, we focus particularly on data collected during a dialogue to discuss the application of conversation analysis (CA) to signed dialogues and signed conversations. Our annotation scheme was designed not only to elucidate theoretical issues related to grammar and linguistics but also to clarify pragmatic and interactional phenomena related to the use of JSL.}
}

@inproceedings{tanaka:10034:sign-lang:lrec,
  author    = {Tanaka, Saori and Matsusaka, Yosuke and Nakazono, Kaoru},
  title     = {Development of E-Learning Service of Computer Assisted Sign Language Learning: Online Version of {CASLL}},
  pages     = {231--234},
  editor    = {Dreuw, Philippe and Efthimiou, Eleni and Hanke, Thomas and Johnston, Trevor and Mart{\'i}nez Ruiz, Gregorio and Schembri, Adam},
  booktitle = {Proceedings of the {LREC2010} 4th Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies},
  maintitle = {7th International Conference on Language Resources and Evaluation ({LREC} 2010)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Valletta, Malta},
  day       = {22--23},
  month     = may,
  year      = {2010},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/10034.html},
  abstract  = {In this study, we introduce the problems for realizing an e-learning system available online and outline some ethical issues behind these problems. The difficulties faced to us, when we were going to open Computer Assisted Sign language Learning (CASLL) system online, were one to expose the sign language movies to public with downloadable way, one for increasing the course materials, and one to enhance the collaboration between learners. The ethical discussions revealed that the reliability for the system and the collaborative work for expand the number of course materials were necessary for overcoming the difficulties. In order to realize the reliable and the collaborative e-learning system, we implemented CASLL within Moodle, an open-source Course Management System. For re-designing the system to actual use for sign language learners and teachers, we added new functions to Moodle; the protection function for the right of publicity, the wiki function to enable collaborative course editing and finally the Link function to enhance public relations. We are going to evaluate the system design from the view point of the usability for teaching, the effectivity for learning, and the utility for collaboration.}
}

@inproceedings{nagashima:08019:sign-lang:lrec,
  author    = {Nagashima, Yuji and Terauchi, Mina and Nakazono, Kaoru},
  title     = {Construction of {Japanese} {Sign} {Language} Dialogue Corpus: {KOSIGN}},
  pages     = {141--144},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Hanke, Thomas and Thoutenhoofd, Ernst D. and Zwitserlood, Inge},
  booktitle = {Proceedings of the {LREC2008} 3rd Workshop on the Representation and Processing of Sign Languages: Construction and Exploitation of Sign Language Corpora},
  maintitle = {6th International Conference on Language Resources and Evaluation ({LREC} 2008)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marrakech, Morocco},
  day       = {1},
  month     = jun,
  year      = {2008},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/08019.html},
  abstract  = {This report presents a method of building corpuses of dialogue in Japanese Sign Language (JSL) and the results of the analysis in co- occurrences of manual and non-manual signals using the corpus.
\par
We have built the sign dialogue corpus by video recording the dialogues between native JSL speakers. The purpose of building corpus is deriving electronic dictionaries such as morphological dictionary, different meaning word dictionary, allomorph dictionary and example dictionary. Example sentences are recorded for every word (key sign) those were recorded in the sign language word data base KOSIGN Ver.2. Until now, we were able to confirm a correlation of manual and non-manual signals or a characteristic appearance of sign language dialogue.
\par
As a result of the analysis, the pointing occurred to the end of sentence at high frequency. It suggested that pointing be one of the ends of sentence, and clarified the role as the conjunctive pronoun. The co-occurrence relation between the manual and non-manual signals acquired confirmed an important role to make the meaning of the expression sign language limited was achieved. Moreover, "Roll shift" and "Sandwich construction" that was the linguistic feature of sign language were confirmed, too. These information is necessary for the hearing person to study sign language.}
}

@inproceedings{tanaka:08016:sign-lang:lrec,
  author    = {Tanaka, Saori and Matsusaka, Yosuke and Nakazono, Kaoru},
  title     = {Interface Development for Computer Assisted Sign Language Learning: Compact Version of {CASLL}},
  pages     = {178--184},
  editor    = {Crasborn, Onno and Efthimiou, Eleni and Hanke, Thomas and Thoutenhoofd, Ernst D. and Zwitserlood, Inge},
  booktitle = {Proceedings of the {LREC2008} 3rd Workshop on the Representation and Processing of Sign Languages: Construction and Exploitation of Sign Language Corpora},
  maintitle = {6th International Conference on Language Resources and Evaluation ({LREC} 2008)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Marrakech, Morocco},
  day       = {1},
  month     = jun,
  year      = {2008},
  language  = {english},
  url       = {https://www.sign-lang.uni-hamburg.de/lrec/pub/08016.html},
  abstract  = {In this study, we introduce e-learning system called CASLL and demonstrate the small interface in order to implement it into the mobile video reproducers. As one of our series of previous studies for developing human interface by using Japanese Sign Language (JSL) contents [2, 3, 4], we proposed a new learning program and compare it with the existing learning program implemented in the Computer Assisted Sign Language Learning (CASLL) system [1]. In the existing learning program, users learn sign words and then try to select the appropriate Japanese translations in a natural conversation expressed by two native signers. In the proposed program, users try to segment each word from a stream of signing by manipulating a control knob on the bottom of a movie screen, and then do the same tasks in the existing learning model. The end of the segmentation task is to know how continuous signs are articulated in the natural discourse. Ten Japanese learners participated in the experiments. Five subjects learned the existing word learning program and the other five subjects learned the proposed segmentation learning program. The mean accuracy rate of the proposed program was higher than that of the existing program. The result has indicated that focusing on transitional movements has an effect for learning JSL as a second-language.
\par
Although the segmentation learning method has been shown as more effective learning method compared to the word learning program by which learners need to just memorize the meaning of words, there were some technical problems. Some learners answered that they could not see each JSL movies at once by using their own laptops to conduct the learning programs. Therefore, we needed to improve how to show JSL movies by using different sizes of screen. We define the size of movie screen as small as possible, and develop use-friendly interface with which learners can recognize whole serious of JSL movies by switching each movie side by side. We will demonstrate the interface development and see if the interface is applicable to the other sign languages.
\par
References
\par
[1] Saori Tanaka, Yosuke Matsusaka, Kuniaki Uehara: ``Segmentation Learning Method as a Proposal for Sign Language e-learning'', Human Interface, 2006 (in Japanese)
\par
[2] Saori Tanaka, Kaoru Nakazono, Masafumi Nishida, Yasuo Horiuchi, Akira Ichikawa: Analysis of Interpreter's Skill to Recognize Prosody in Japanese Sign Language, Journal of Japanese Society for Artificial Intelligence (in press).
\par
[3] Kaoru Nakazono, Saori Tanaka: Study of Spatial Configurations of Equipment for Online Sign Interpretation Service, IEICE Transaction on Information and System (in press)
\par
[4] Saori Tanaka: A Study of Non-linguistic Information in Japanese Sign Language and its Application for Assisting Learners and Interpreters, Ph.D thesis, Chiba University, 2008}
}

@inproceedings{suzuki-etal-2006-web:lrec,
  author    = {Suzuki, Emiko and Suzuki, Tomomi and Kakihana, Kyoko},
  title     = {On the Web Trilingual Sign Language Dictionary to Learn the foreign Sign Language without Learning a Target Spoken Language},
  pages     = {2307--2310},
  editor    = {Calzolari, Nicoletta and Choukri, Khalid and Gangemi, Aldo and Maegaard, Bente and Mariani, Joseph and Odijk, Jan and Tapias, Daniel},
  booktitle = {5th International Conference on Language Resources and Evaluation ({LREC} 2006)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Genoa, Italy},
  day       = {22--28},
  month     = may,
  year      = {2006},
  isbn      = {978-2-9517408-2-2},
  language  = {english},
  url       = {https://aclanthology.org/L06-1146},
  abstract  = {This paper describes a trilingual sign language dictionary (Japanese Sign Language and American Sign Language, and Korean Sign Language) which helps those who learn each sign language directly from their mother sign language. Our discussion covers two main points. The first describes the necessity of a trilingual dictionary. Since there is no universal sign language or real international sign language deaf people should learn at least four languages: they want to talk to people whose mother tongue is different from their owns, the mother sign language, the mother spoken language as the first intermediate language, the target spoken language as the second intermediate language, and the sign language in which they want to communicate. Those two spoken languages become language barriers for deaf people and our trilingual dictionary will remove the barrier. The second describes the use of computer. As the use of computers becomes widespread, it is increasingly convenient to study through computer software or Internet facilities. Our WWW dictionary system provides deaf people with an easy means of access using their mother-sign language, which means they don't have to overcome the barrier of learning a foreign spoken language. It also provides a way for people who are going to learn three sign languages to look up new vocabulary. We are further planning to examine how our dictionary system could be used to educate and assist deaf people.}
}

@inproceedings{suzuki-kakihana-2002-japanese:lrec,
  author    = {Suzuki, Emiko and Kakihana, Kyoko},
  title     = {{J}apanese and {A}merican {S}ign {L}anguage Dictionary System for {J}apanese and {E}nglish Users},
  pages     = {677--680},
  editor    = {Rodr{\'i}guez, Manuel Gonz{\'a}lez and Araujo, Carmen Paz Suarez},
  booktitle = {3rd International Conference on Language Resources and Evaluation ({LREC} 2002)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Las Palmas, Canary Islands, Spain},
  day       = {27},
  month     = may,
  year      = {2002},
  language  = {english},
  url       = {https://aclanthology.org/L02-1332},
  abstract  = {We discuss the basic ideas behind a Japanese and American Sign Language Dictionary System for Japanese and English users. Our discussion covers two main points. The first describes the necessity of a bilingual dictionary. Since there is no ``universal sign language'' or real ``international sign language,'' if Deaf people should learn at least three languages: they want to talk to people whose mother tongue is different from their owns, the mother sign language, the mother spoken language as an intermediate language, and the sign language in which they want to communicate. The second describes the use of computer. As the use of computers becomes widespread, it is increasingly convenient to study through computer software or Internet facilities. Our dictionary system provides Deaf people with an easy means of access using their mother-spoken language. It also provides a way for people who are going to learn two sign languages to look up new vocabulary. We are further planning to examine how our system could be used to educate and assist Deaf people.}
}

@inproceedings{koizumi-etal-2002-annotated:lrec,
  author    = {Koizumi, Atsuko and Sagawa, Hirohiko and Takeuchi, Masaru},
  title     = {An Annotated {J}apanese {S}ign {L}anguage Corpus},
  pages     = {927--930},
  editor    = {Rodr{\'i}guez, Manuel Gonz{\'a}lez and Araujo, Carmen Paz Suarez},
  booktitle = {3rd International Conference on Language Resources and Evaluation ({LREC} 2002)},
  publisher = {{European Language Resources Association (ELRA)}},
  address   = {Las Palmas, Canary Islands, Spain},
  day       = {27},
  month     = may,
  year      = {2002},
  language  = {english},
  url       = {https://aclanthology.org/L02-1318},
  abstract  = {Sign language is characterized by its interactivity and multimodality, which cause difficulties in data collection and annotation. To address these difficulties, we have developed a video-based Japanese sign language (JSL) corpus and a corpus tool for annotation and linguistic analysis. As the first step of linguistic annotation, we transcribed manual signs expressing lexical information as well as non-manual signs (NMSs) - including head movements, facial actions, and posture - that are used to express grammatical information. Our purpose is to extract grammatical rules from this corpus for the sign-language translation system underdevelopment. From this viewpoint, we will discuss methods for collecting elicited data, annotation required for grammatical analysis, as well as corpus tool required for annotation and grammatical analysis. As the result of annotating 2800 utterances, we confirmed that there are at least 50 kinds of NMSs in JSL, using head (seven kinds), jaw (six kinds), mouth (18 kinds), cheeks (one kind), eyebrows (four kinds), eyes (seven kinds), eye gaze (two kinds), bydy posture (five kinds). We use this corpus for designing and testing an algorithm and gram- matical rules for the sign-language translation system underdevelopment.}
}

