@article{oai:sucra.repo.nii.ac.jp:00013807, author = {DAS, Dipankar and Md., Golam Rashed and 小林, 貴訓 and 久野, 義徳}, issue = {6}, journal = {IEEE Transactions on Human-Machine Systems}, month = {}, note = {We propose a human–robot interaction approach for social robots that attracts and controls the attention of a target person depending on her/his current visual focus of attention. The system detects the person’s current task (attention) and estimates the level by using the “task-related contextual cues” and “gaze pattern.” The attention level is used to determine the suitable time to attract the target person’s attention toward the robot. The robot detects the interest or willingness of the target person to interact with it. Then, depending on the level of interest of the target person, the robot generates awareness and establishes a communication channel with her/him. To evaluate the performance, we conducted an experiment using our static robot to attract the target human’s attention when she/he is involved in four different tasks: reading, writing, browsing, and viewing paintings. The proposed robot determines the level of attention of the current task and considers the situation of the target person. Questionnaire measures confirmed that the proposed robot outperforms a simple attention control robot in attracting participants’ attention in an acceptable way. It also causes less disturbance and establishes effective eye contact. We implemented the system into a commercial robotic platform (Robovie-R3) to initiate interaction between visitors and the robot in a museum scenario. The robot determined the visitors’ gaze points and established a successful interaction with a success rate of 91.7%., © 2015 IEEE. Personal use is permitted, but republication/redistribution requires IEEE permission. See http://www.ieee.org/publications_standards/publications/rights/index.html for more information., text, application/pdf}, pages = {664--675}, title = {Supporting Human–Robot Interaction Based on the Level of Visual Focus of Attention}, volume = {45}, year = {2015}, yomi = {コバヤシ, ヨシノリ and クノ, ヨシノリ} }