@inproceedings{3b5ce66684da479daf528d3e1dd1a280,
title = "CheekInput: Turning your cheek into an input surface by embedded optical sensors on a head-mounted display",
abstract = "In this paper, we propose a novel technology called {"}CheekInput{"} with a head-mounted display (HMD) that senses touch gestures by detecting skin deformation. We attached multiple photo-reflective sensors onto the bottom front frame of the HMD. Since these sensors measure the distance between the frame and cheeks, our system is able to detect the deformation of a cheek when the skin surface is touched by fingers. Our system uses a Support Vector Machine to determine the gestures: pushing face up and down, left and right. We combined these 4 directional gestures for each cheek to extend 16 possible gestures. To evaluate the accuracy of the gesture detection, we conducted a user study. The results revealed that CheekInput achieved 80.45 % recognition accuracy when gestures were made by touching both cheeks with both hands, and 74.58 % when by touching both cheeks with one hand.",
keywords = "OST-HMD, Photo-reflective sensor, Skin interface",
author = "Koki Yamashita and Takashi Kikuchi and Katsutoshi Masai and Maki Sugimoto and Thomas, {Bruce H.} and Yuta Sugiura",
note = "Funding Information: This work was supported by JSPS KAKENHI Grant Numbers JP26700017 and JP16H01741. Publisher Copyright: {\textcopyright} 2017 Association for Computing Machinery.; 23rd ACM Conference on Virtual Reality Software and Technology, VRST 2017 ; Conference date: 08-11-2017 Through 10-11-2017",
year = "2017",
month = nov,
day = "8",
doi = "10.1145/3139131.3139146",
language = "English",
series = "Proceedings of the ACM Symposium on Virtual Reality Software and Technology, VRST",
publisher = "Association for Computing Machinery",
editor = "Spencer, {Stephen N.}",
booktitle = "Proceedings - VRST 2017",
}