{"created":"2023-05-15T15:25:58.677671+00:00","id":13807,"links":{},"metadata":{"_buckets":{"deposit":"33de1a47-43ac-4b4e-979f-a313d6c53f8b"},"_deposit":{"created_by":3,"id":"13807","owners":[3],"pid":{"revision_id":0,"type":"depid","value":"13807"},"status":"published"},"_oai":{"id":"oai:sucra.repo.nii.ac.jp:00013807","sets":["94:426"]},"author_link":["16423","16422","23145","23144"],"item_119_biblio_info_8":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicIssueDates":{"bibliographicIssueDate":"2015","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"6","bibliographicPageEnd":"675","bibliographicPageStart":"664","bibliographicVolumeNumber":"45","bibliographic_titles":[{"bibliographic_title":"IEEE Transactions on Human-Machine Systems"}]}]},"item_119_date_31":{"attribute_name":"作成日","attribute_value_mlt":[{"subitem_date_issued_datetime":"2016-03-01","subitem_date_issued_type":"Created"}]},"item_119_description_19":{"attribute_name":"抄録","attribute_value_mlt":[{"subitem_description":"We propose a human–robot interaction approach for social robots that attracts and controls the attention of a target person depending on her/his current visual focus of attention. The system detects the person’s current task (attention) and estimates the level by using the “task-related contextual cues” and “gaze pattern.” The attention level is used to determine the suitable time to attract the target person’s attention toward the robot. The robot detects the interest or willingness of the target person to interact with it. Then, depending on the level of interest of the target person, the robot generates awareness and establishes a communication channel with her/him. To evaluate the performance, we conducted an experiment using our static robot to attract the target human’s attention when she/he is involved in four different tasks: reading, writing, browsing, and viewing paintings. The proposed robot determines the level of attention of the current task and considers the situation of the target person. Questionnaire measures confirmed that the proposed robot outperforms a simple attention control robot in attracting participants’ attention in an acceptable way. It also causes less disturbance and establishes effective eye contact. We implemented the system into a commercial robotic platform (Robovie-R3) to initiate interaction between visitors and the robot in a museum scenario. The robot determined the visitors’ gaze points and established a successful interaction with a success rate of 91.7%.","subitem_description_type":"Abstract"}]},"item_119_description_21":{"attribute_name":"注記","attribute_value_mlt":[{"subitem_description":"© 2015 IEEE. Personal use is permitted, but republication/redistribution requires IEEE permission.\nSee http://www.ieee.org/publications_standards/publications/rights/index.html for more information.","subitem_description_type":"Other"}]},"item_119_description_29":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"subitem_description":"text","subitem_description_type":"Other"}]},"item_119_description_30":{"attribute_name":"フォーマット","attribute_value_mlt":[{"subitem_description":"application/pdf","subitem_description_type":"Other"}]},"item_119_publisher_11":{"attribute_name":"出版者名","attribute_value_mlt":[{"subitem_publisher":"IEEE-Institute of Electrical and Electronics Engineers"}]},"item_119_relation_16":{"attribute_name":"DOI","attribute_value_mlt":[{"subitem_relation_type":"isIdenticalTo","subitem_relation_type_id":{"subitem_relation_type_id_text":"info:doi/10.1109/THMS.2015.2445856","subitem_relation_type_select":"DOI"}}]},"item_119_text_27":{"attribute_name":"版","attribute_value_mlt":[{"subitem_text_value":"[出版社版]"}]},"item_119_text_3":{"attribute_name":"著者 ローマ字","attribute_value_mlt":[{"subitem_text_value":"&EMPTY&"},{"subitem_text_value":"&EMPTY&"},{"subitem_text_value":"KOBAYASHI, Yoshinori"},{"subitem_text_value":"KUNO, Yoshinori"}]},"item_119_text_32":{"attribute_name":"アイテムID","attribute_value_mlt":[{"subitem_text_value":"A3000409"}]},"item_119_text_4":{"attribute_name":"著者 所属","attribute_value_mlt":[{"subitem_text_value":"Department of Information and Communication Engineering, University of Rajshahi"},{"subitem_text_value":"Graduate School of Science and Engineering, Saitama University"},{"subitem_text_value":"Graduate School of Science and Engineering, Saitama University, Japan Science and Technology Agency"},{"subitem_text_value":"Graduate School of Science and Engineering, Saitama University"}]},"item_119_text_9":{"attribute_name":"年月次","attribute_value_mlt":[{"subitem_text_value":"2015-12"}]},"item_119_version_type_28":{"attribute_name":"著者版フラグ","attribute_value_mlt":[{"subitem_version_resource":"http://purl.org/coar/version/c_970fb48d4fbd8a85","subitem_version_type":"VoR"}]},"item_creator":{"attribute_name":"著者","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"DAS, Dipankar"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Md., Golam Rashed"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"小林, 貴訓"},{"creatorName":"コバヤシ, ヨシノリ","creatorNameLang":"ja-Kana"}],"nameIdentifiers":[{},{}]},{"creatorNames":[{"creatorName":"久野, 義徳"},{"creatorName":"クノ, ヨシノリ","creatorNameLang":"ja-Kana"}],"nameIdentifiers":[{}]}]},"item_files":{"attribute_name":"ファイル情報","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2018-01-24"}],"displaytype":"detail","filename":"A3000409.pdf","filesize":[{"value":"933.0 kB"}],"format":"application/pdf","licensetype":"license_note","mimetype":"application/pdf","url":{"label":"A3000409.pdf","url":"https://sucra.repo.nii.ac.jp/record/13807/files/A3000409.pdf"},"version_id":"697d4b34-9251-42b7-b835-9c77d0d9c6b6"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"Gaze pattern","subitem_subject_scheme":"Other"},{"subitem_subject":"human–robot interaction","subitem_subject_scheme":"Other"},{"subitem_subject":"task-related contextual cues","subitem_subject_scheme":"Other"},{"subitem_subject":"visual focus of attention (VFOA)","subitem_subject_scheme":"Other"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourcetype":"journal article","resourceuri":"http://purl.org/coar/resource_type/c_6501"}]},"item_title":"Supporting Human–Robot Interaction Based on the Level of Visual Focus of Attention","item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Supporting Human–Robot Interaction Based on the Level of Visual Focus of Attention"}]},"item_type_id":"119","owner":"3","path":["426"],"pubdate":{"attribute_name":"公開日","attribute_value":"2016-03-02"},"publish_date":"2016-03-02","publish_status":"0","recid":"13807","relation_version_is_last":true,"title":["Supporting Human–Robot Interaction Based on the Level of Visual Focus of Attention"],"weko_creator_id":"3","weko_shared_id":-1},"updated":"2023-05-16T12:23:07.795825+00:00"}