journal articles
![]() | Andreas Bulling; Hans Gellersen Toward Mobile Eye-Based Ħuman-Computer Interaction Journal Article IEEE Pervasive Computing, 9 (4), pp. 8–12, 2010. @article{bulling10_pcm, title = {Toward Mobile Eye-Based Ħuman-Computer Interaction}, author = {Andreas Bulling and Hans Gellersen}, url = {https://perceptual.mpi-inf.mpg.de/files/2013/03/bulling10_pcm.pdf}, year = {2010}, date = {2010-01-01}, journal = {IEEE Pervasive Computing}, volume = {9}, number = {4}, pages = {8--12}, abstract = {Research in eye-based human-computer interaction (HCI) has matured over the past 20 years with current HCI research mostly focusing on stationary eye trackers in laboratory settings. This survey of latest advances in eye-tracking equipment and automated eye movement analysis suggests a new generation of mobile eye-based interfaces that will become pervasive and seamlessly integrated into people\'s everyday lives.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Research in eye-based human-computer interaction (HCI) has matured over the past 20 years with current HCI research mostly focusing on stationary eye trackers in laboratory settings. This survey of latest advances in eye-tracking equipment and automated eye movement analysis suggests a new generation of mobile eye-based interfaces that will become pervasive and seamlessly integrated into people's everyday lives. |
conference papers
![]() | Daniel Roggen; Kilian Förster; Alberto Calatroni; Andreas Bulling; Gerhard Tröster On the issue of variability in labels and sensor configurations in activity recognition systems Inproceedings Proc. of the Workshop "How to do good activity recognition research? Experimental methodologies, evaluation metrics, and reproducibility issues" (Pervasive 2010), Helsinki, Finland, 2010. @inproceedings{roggen10_pervasive, title = {On the issue of variability in labels and sensor configurations in activity recognition systems}, author = {Daniel Roggen and Kilian Förster and Alberto Calatroni and Andreas Bulling and Gerhard Tröster}, url = {https://perceptual.mpi-inf.mpg.de/files/2013/03/roggen10_pervasive.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proc. of the Workshop "How to do good activity recognition research? Experimental methodologies, evaluation metrics, and reproducibility issues" (Pervasive 2010)}, address = {Helsinki, Finland}, abstract = {Two aspects of the design and characterization of activity recognition systems are rarely elaborated in the literature. First, the influence of system performance with variability in sensor placement and orientation is often overlooked. This is important for the deployment of robust activity recognition systems. Second, the influence of labeling variability is also overlooked, especially w.r.t. label boundary jitter and labeling errors. This is important during the development of an activity recognition system as acquiring labels is costly. We argue that there is a need to explicitly address the consequences of such variability in publications, together with the mitigation strategies that are used. Elaborating on this is required to move the state of the art towards real-world applications, such as in industrial wearable assistance applications or pervasive healthcare.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Two aspects of the design and characterization of activity recognition systems are rarely elaborated in the literature. First, the influence of system performance with variability in sensor placement and orientation is often overlooked. This is important for the deployment of robust activity recognition systems. Second, the influence of labeling variability is also overlooked, especially w.r.t. label boundary jitter and labeling errors. This is important during the development of an activity recognition system as acquiring labels is costly. We argue that there is a need to explicitly address the consequences of such variability in publications, together with the mitigation strategies that are used. Elaborating on this is required to move the state of the art towards real-world applications, such as in industrial wearable assistance applications or pervasive healthcare. |
![]() | Bernd Tessendorf; Andreas Bulling; Daniel Roggen; Thomas Stiefmeier; Gerhard Tröster; Manuela Feilner; Peter Derleth Towards Multi-Modal Context Recognition for Hearing Instruments by Analysing Eye and Head Movements Inproceedings Proc. of the 14th International Symposium on Wearable Computers, pp. 1-2, IEEE Press, 2010. @inproceedings{tessendorf10_iswc, title = {Towards Multi-Modal Context Recognition for Hearing Instruments by Analysing Eye and Head Movements}, author = {Bernd Tessendorf and Andreas Bulling and Daniel Roggen and Thomas Stiefmeier and Gerhard Tröster and Manuela Feilner and Peter Derleth}, url = {https://perceptual.mpi-inf.mpg.de/files/2013/03/tessendorf10_iswc.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proc. of the 14th International Symposium on Wearable Computers}, pages = {1-2}, publisher = {IEEE Press}, abstract = {Current hearing instruments (HI) only rely on auditory scene analysis to adapt to the situation of the user. It is for this reason that these systems are limited in the number and type of situations they can detect. We investigate how context information derived from eye and head movements can be used to resolve such situations. We focus on two example problems that are challenging for current HIs: To distinguish concentrated from interaction, and to detect whether a person is walking alone or walking while having a conversation. We collect an eleven participant (6 male, 5 female, age 24-59) dataset that covers different typical office activities. Using person-independent training and isolated recognition we achieve an average precision of 71.7% (recall: 70.1%) for recognising concentrated work and 57.2% precision (recall: 81.3%) for detecting walking while conversing.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Current hearing instruments (HI) only rely on auditory scene analysis to adapt to the situation of the user. It is for this reason that these systems are limited in the number and type of situations they can detect. We investigate how context information derived from eye and head movements can be used to resolve such situations. We focus on two example problems that are challenging for current HIs: To distinguish concentrated from interaction, and to detect whether a person is walking alone or walking while having a conversation. We collect an eleven participant (6 male, 5 female, age 24-59) dataset that covers different typical office activities. Using person-independent training and isolated recognition we achieve an average precision of 71.7% (recall: 70.1%) for recognising concentrated work and 57.2% precision (recall: 81.3%) for detecting walking while conversing. |