Sauter, Marian; Wagner, Tobias; Hirzle, Teresa; Rukzio, Enrico; Huckauf, Anke
Where are my students looking at? Using Gaze Synchronicity to Facilitate Online Learning Journal Article
In: Journal of Vision, vol. 23, no. 9, pp. 5538–5538, 2023.
BibTeX | Tags:
@article{sauter2023my,
title = {Where are my students looking at? Using Gaze Synchronicity to Facilitate Online Learning},
author = {Marian Sauter and Tobias Wagner and Teresa Hirzle and Enrico Rukzio and Anke Huckauf},
year = {2023},
date = {2023-01-01},
journal = {Journal of Vision},
volume = {23},
number = {9},
pages = {5538–5538},
publisher = {The Association for Research in Vision and Ophthalmology},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lau, Wee Kiat; Sauter, Marian; Eberhardt, Lisa Valentina; Huckauf, Anke
How we can use the eyes to understand human interaction Journal Article
In: Journal of Vision, vol. 23, no. 9, pp. 5410–5410, 2023.
BibTeX | Tags:
@article{lau2023we,
title = {How we can use the eyes to understand human interaction},
author = {Wee Kiat Lau and Marian Sauter and Lisa Valentina Eberhardt and Anke Huckauf},
year = {2023},
date = {2023-01-01},
journal = {Journal of Vision},
volume = {23},
number = {9},
pages = {5410–5410},
publisher = {The Association for Research in Vision and Ophthalmology},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hein, Ilka; Diefenbach, Sarah; Ulrich, Daniel
Designing for Technology Transparency–Transparency Cues and User Experience Journal Article
In: 2023.
BibTeX | Tags:
@article{hein2023designing,
title = {Designing for Technology Transparency–Transparency Cues and User Experience},
author = {Ilka Hein and Sarah Diefenbach and Daniel Ulrich},
year = {2023},
date = {2023-01-01},
publisher = {Gesellschaft für Informatik eV},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Grundgeiger, Tobias; Münz, Alea; Schlosser, Paul; Happel, Oliver
Supervising Multiple Operating Rooms Using a Head-Worn Display: A Longitudinal Evaluation of the Experience of Supervising Anesthesiologists and Their Co-Workers Proceedings Article
In: Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, Hamburg, Germany, 2023, ISBN: 9781450394215.
Abstract | Links | BibTeX | Tags:
@inproceedings{10.1145/3544548.3581180,
title = {Supervising Multiple Operating Rooms Using a Head-Worn Display: A Longitudinal Evaluation of the Experience of Supervising Anesthesiologists and Their Co-Workers},
author = {Tobias Grundgeiger and Alea Münz and Paul Schlosser and Oliver Happel},
url = {https://doi.org/10.1145/3544548.3581180},
doi = {10.1145/3544548.3581180},
isbn = {9781450394215},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {Hamburg, Germany},
series = {CHI '23},
abstract = {Research has explored head-worn displays (HWD) in various professional contexts. However, evaluations have been limited by short-term use, a focus on the person using the HWD, and on performance variables. In a field study, we evaluated a monocular, opaque HWD for multi-patient monitoring, which supervising anesthesiologists wore for 8-10 days each. We investigated the effect of prolonged HWD use on the experience of the supervising anesthesiologists and their co-workers using interviews and repeated observations. A reflexive thematic analysis showed (1) interaction and mindset changes over time, (2) information on the HWD is more than numbers, (3) the HWD affects co-workers' collaboration with supervisors, and (4) distraction depends on the point of view. Using activity theory, we discuss the fact that HWD use develops and changes over time and that even a single-user HWD influences the collaboration with co-workers. We conclude with implications for HWD design, implementation, and evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Woźniak, Mikołaj P.; Vöge, Sarah; Krüger, Ronja; Müller, Heiko; Koelle, Marion; Boll, Susanne
Inhabiting Interconnected Spaces: How Users Shape and Appropriate Their Smart Home Ecosystems Proceedings Article
In: Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, Hamburg, Germany, 2023, ISBN: 9781450394215.
Abstract | Links | BibTeX | Tags: interactive spaces, interconnectedness, smart home, smart home ecosystem
@inproceedings{10.1145/3544548.3581497,
title = {Inhabiting Interconnected Spaces: How Users Shape and Appropriate Their Smart Home Ecosystems},
author = {Mikołaj P. Woźniak and Sarah Vöge and Ronja Krüger and Heiko Müller and Marion Koelle and Susanne Boll},
url = {https://doi.org/10.1145/3544548.3581497},
doi = {10.1145/3544548.3581497},
isbn = {9781450394215},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {Hamburg, Germany},
series = {CHI '23},
abstract = {Over the last decade, smart home technology (SHT) has become an integral part of modern households. As a result, smart home ecosystems blend with daily social life, appropriated and integrated into personalised domestic environments. The lived experience of inhabiting smart home ecosystems, however, is not yet understood, resulting in a mismatch between ecosystem design and inhabitants’ needs. Drawing on contextual inquiry methods, we conducted an explorative interview study (N=20) with SHT users in their homes. Our thematic analysis reveals how users shape their smart home ecosystems (SHEs), considering social relationships at home, perceived ownership of SHTs, and expected key benefits. Notably, our analysis shows that household members consciously choose ‘their’ level of SHT interconnectedness, reflecting social, spatial and functional affinities between systems. Following our findings, we formulate five implications for designing future SHTs. Our work contributes insights on the dynamics and appropriation of smart home ecosystems by their inhabitants.},
keywords = {interactive spaces, interconnectedness, smart home, smart home ecosystem},
pubstate = {published},
tppubtype = {inproceedings}
}
Plabst, Lucas; Raikwar, Aditya; Oberdörfer, Sebastian; Ortega, Francisco Raul; Niebling, Florian
Exploring Unimodal Notification Interaction and Display Methods in Augmented Reality Proceedings Article
In: Proceedings of the 29th ACM Symposium on Virtual Reality Software and Technology, pp. 1–11, 2023.
BibTeX | Tags:
@inproceedings{plabst2023exploring,
title = {Exploring Unimodal Notification Interaction and Display Methods in Augmented Reality},
author = {Lucas Plabst and Aditya Raikwar and Sebastian Oberdörfer and Francisco Raul Ortega and Florian Niebling},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 29th ACM Symposium on Virtual Reality Software and Technology},
pages = {1–11},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Terzi, Pia; Diefenbach, Sarah
The Attendant Card Set: A Research and Design Tool to Consider Perspectives of Attendants versus Users When Co-Experiencing Technology Journal Article
In: Multimodal Technologies and Interaction, vol. 7, no. 11, 2023, ISSN: 2414-4088.
Abstract | Links | BibTeX | Tags:
@article{mti7110107,
title = {The Attendant Card Set: A Research and Design Tool to Consider Perspectives of Attendants versus Users When Co-Experiencing Technology},
author = {Pia Terzi and Sarah Diefenbach},
url = {https://www.mdpi.com/2414-4088/7/11/107},
doi = {10.3390/mti7110107},
issn = {2414-4088},
year = {2023},
date = {2023-01-01},
journal = {Multimodal Technologies and Interaction},
volume = {7},
number = {11},
abstract = {Although many of our interactions with technology nowadays take place in public places (e.g., using a mobile phone in public transportation), research and design on Human-Computer Interaction (HCI) has paid little attention to how this kind of technology usage affects others present—and vice versa. To illustrate the perspective of the attendant, i.e., a person who is not interacting with technology themselves but co-experiencing it as listener or viewer, we developed the so-called Attendant Card Set (ACS). In two studies, an expert survey and a student workshop, we tested its practical applicability and usefulness. It showed not only that experts assess the cards positively, i.e., helpful, informative, and relevant, but also that the cards can be used with laypersons for perspective-taking, creative ideation, and discussions. Thus, analyzing and/or comparing the experience of different types with the help of the ACS provides a unique approach to the consideration of the attendant perspective in the research and development process. Limitations of the present research and opportunities for future tool applications are discussed. In addition to establishing this concept in HCI, we also see potential in the transferability to other areas and contexts such as the design of public space or non-technological products.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ganjeh, Pantea Sanei; Chamunorwa, Michael Bosomefi; Gopal, Abhinand Parambil; Mutyala, Santosh
Vasi-Light: Using Everyday Decorative Items as Smarthome Interfaces Proceedings Article
In: Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia, pp. 565–567, Association for Computing Machinery, , Vienna, Austria,, 2023, ISBN: 9798400709210.
Abstract | Links | BibTeX | Tags: everyday objects, rich interaction, smart home, speculative design, tangible interaction
@inproceedings{10.1145/3626705.3631879,
title = {Vasi-Light: Using Everyday Decorative Items as Smarthome Interfaces},
author = {Pantea Sanei Ganjeh and Michael Bosomefi Chamunorwa and Abhinand Parambil Gopal and Santosh Mutyala},
url = {https://doi.org/10.1145/3626705.3631879},
doi = {10.1145/3626705.3631879},
isbn = {9798400709210},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia},
pages = {565–567},
publisher = {Association for Computing Machinery},
address = {, Vienna, Austria,},
series = {MUM '23},
abstract = {In today’s rapidly evolving technological landscape, the convergence of innovation and tradition has given rise to a fundamental question: Can we embrace the convenience of smart devices without sacrificing the aesthetics and emotional resonance of our cherished everyday objects? We help answer this question through our demo, Vasi-Light, a decorative vase enhanced with concealed sensors and conductive ink to allow end-users to control a smart bulb. Vasi-Light seeks to blend the beauty of familiar household items with the intelligence of modern technology. While a smart bulb and vase are used in the demo, the design process and technologies apply to various everyday object/smart appliance pairings and offer exciting opportunities for research into rich interactions with everyday objects and surfaces for future smart homes.},
keywords = {everyday objects, rich interaction, smart home, speculative design, tangible interaction},
pubstate = {published},
tppubtype = {inproceedings}
}
Kurzweg, Marco; Letter, Maximilian; Wolf, Katrin
Vibrollusion: Creating a Vibrotactile Illusion Induced by Audiovisual Touch Feedback Proceedings Article
In: Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia, pp. 185–197, Association for Computing Machinery, <conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>, 2023, ISBN: 9798400709210.
Abstract | Links | BibTeX | Tags: haptic feedback, illusion, vibration, vibrotactile, visual
@inproceedings{10.1145/3626705.3627790,
title = {Vibrollusion: Creating a Vibrotactile Illusion Induced by Audiovisual Touch Feedback},
author = {Marco Kurzweg and Maximilian Letter and Katrin Wolf},
url = {https://doi.org/10.1145/3626705.3627790},
doi = {10.1145/3626705.3627790},
isbn = {9798400709210},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia},
pages = {185–197},
publisher = {Association for Computing Machinery},
address = {<conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>},
series = {MUM '23},
abstract = {Vibrations are the dominant way to create haptic feedback for interactive systems and are most often induced by vibrotactile actuators. However, virtual content created for augmented reality usually does not support that modality, instead relying mainly on visual and auditive output. Aiming to provide haptic feedback for augmented reality in cases where real vibrations cannot be used, we explore how vibrations can be felt using vision and audio only. In a user study, a virtual 10 x 10 cm white square-shaped cuboid was influenced by animation and/or sound to induce a haptic illusion when being touched. We were able to identify a specific range where the perception of vibration was significantly stronger and more realistic compared to all other values. This was the case if the virtual object’s edges were blurred up to a range of 0.4 cm or 0.6 cm, correspondingly accompanied by sounds, where the spectrum was cut off at a frequency of 256 Hz (for 0.4 cm) or 966 Hz (for 0.6 cm). With that, we aim to enrich augmented reality systems.},
keywords = {haptic feedback, illusion, vibration, vibrotactile, visual},
pubstate = {published},
tppubtype = {inproceedings}
}
Saad, Alia; Pascher, Max; Kassem, Khaled; Heger, Roman; Liebers, Jonathan; Schneegass, Stefan; Gruenefeld, Uwe
Hand-in-Hand: Investigating Mechanical Tracking for User Identification in Cobot Interaction Proceedings Article
In: Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia, pp. 1–9, Association for Computing Machinery, <conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>, 2023, ISBN: 9798400709210.
Abstract | Links | BibTeX | Tags: behavioral biometrics, cobots, human-robot collaboration, human-robot interaction
@inproceedings{10.1145/3626705.3627771,
title = {Hand-in-Hand: Investigating Mechanical Tracking for User Identification in Cobot Interaction},
author = {Alia Saad and Max Pascher and Khaled Kassem and Roman Heger and Jonathan Liebers and Stefan Schneegass and Uwe Gruenefeld},
url = {https://doi.org/10.1145/3626705.3627771},
doi = {10.1145/3626705.3627771},
isbn = {9798400709210},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia},
pages = {1–9},
publisher = {Association for Computing Machinery},
address = {<conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>},
series = {MUM '23},
abstract = {Robots play a vital role in modern automation, with applications in manufacturing and healthcare. Collaborative robots integrate human and robot movements. Therefore, it is essential to ensure that interactions involve qualified, and thus identified, individuals. This study delves into a new approach: identifying individuals through robot arm movements. Different from previous methods, users guide the robot, and the robot senses the movements via joint sensors. We asked 18 participants to perform six gestures, revealing the potential use as unique behavioral traits or biometrics, achieving F1-score up to 0.87, which suggests direct robot interactions as a promising avenue for implicit and explicit user identification.},
keywords = {behavioral biometrics, cobots, human-robot collaboration, human-robot interaction},
pubstate = {published},
tppubtype = {inproceedings}
}
Kurzweg, Marco
Haptic Illusions through Augmenting Humans and Environments Proceedings Article
In: Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia, pp. 577–579, Association for Computing Machinery, <conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>, 2023, ISBN: 9798400709210.
Abstract | Links | BibTeX | Tags: augmented reality, haptic feedback, sensory illusions
@inproceedings{10.1145/3626705.3632613,
title = {Haptic Illusions through Augmenting Humans and Environments},
author = {Marco Kurzweg},
url = {https://doi.org/10.1145/3626705.3632613},
doi = {10.1145/3626705.3632613},
isbn = {9798400709210},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia},
pages = {577–579},
publisher = {Association for Computing Machinery},
address = {<conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>},
series = {MUM '23},
abstract = {With the evolution of hardware and technologies within the last decades, workspaces, ways of living, and consequently, the demands on interactions have changed enormously. However, haptic feedback remains a constant and critical factor in enriching the user experience. Realistic haptic feedback usually demands complex hardware integration, which is not always feasible or desired. These requirements and limitations can be reduced by producing or altering haptic experiences through sensory illusions. These illusions allow using all everyday life objects as input technology and simultaneously reduce the amount of needed hardware. There will be no need for several remote controllers or devices anymore as the objects we usually have at home or carry with us can be used as remote controllers or interacted with. Therefore, we must understand multisensory integrations and processes and explore which known illusions can be taken or modified to achieve that goal. We also aim to determine which kinds of haptic feedback can be created via illusions.},
keywords = {augmented reality, haptic feedback, sensory illusions},
pubstate = {published},
tppubtype = {inproceedings}
}
Hosseini, Masoumehsadat; Ihmels, Tjado; Chen, Ziqian; Koelle, Marion; Müller, Heiko; Boll, Susanne
Towards a Consensus Gesture Set: A Survey of Mid-Air Gestures in HCI for Maximized Agreement Across Domains Proceedings Article
In: Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, <conf-loc>, <city>Hamburg</city>, <country>Germany</country>, </conf-loc>, 2023, ISBN: 9781450394215.
Abstract | Links | BibTeX | Tags: agreement rate, application domain, Mid-air gestures, systematic literature review
@inproceedings{10.1145/3544548.3581420,
title = {Towards a Consensus Gesture Set: A Survey of Mid-Air Gestures in HCI for Maximized Agreement Across Domains},
author = {Masoumehsadat Hosseini and Tjado Ihmels and Ziqian Chen and Marion Koelle and Heiko Müller and Susanne Boll},
url = {https://doi.org/10.1145/3544548.3581420},
doi = {10.1145/3544548.3581420},
isbn = {9781450394215},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {<conf-loc>, <city>Hamburg</city>, <country>Germany</country>, </conf-loc>},
series = {CHI '23},
abstract = {Mid-air gesture-based systems are becoming ubiquitous. Many mid-air gestures control different kinds of interactive devices, applications, and systems. They are, however, still targeted at specific devices in specific domains and are not necessarily consistent across domain boundaries. A comprehensive evaluation of the transferability of gesture vocabulary between domains is also lacking. Consequently, interaction designers cannot decide which gestures to use for which domain. In this systematic literature review, we contribute to the future research agenda in this area, based on an analysis of 172 papers. As part of our analysis, we clustered gestures according to the dimensions of an existing taxonomy to identify their common characteristics in different domains, and we investigated the extent to which existing mid-air gesture sets are consistent across different domains. We derived a consensus gesture set containing 22 gestures based on agreement rates calculation and considered their transferability across different domains.},
keywords = {agreement rate, application domain, Mid-air gestures, systematic literature review},
pubstate = {published},
tppubtype = {inproceedings}
}
Tilo Flegel Mentler, Nadine Pöhler
Use Cases and Design of a Virtual Cross-Domain Control Room Simulator Working paper
2022.
@workingpaper{Mentler2022c,
title = {Use Cases and Design of a Virtual Cross-Domain Control Room Simulator},
author = {Mentler, Tilo
Flegel, Nadine
Pöhler, Jonas
Van Laerhoven, Kristof},
doi = {10.18420/muc2022-mci-ws10-291},
year = {2022},
date = {2022-09-04},
urldate = {2022-09-04},
keywords = {},
pubstate = {published},
tppubtype = {workingpaper}
}
Lukas Mecke Sarah Delgado Rodriguez, Florian Alt
SenseHandle: Investigating Human-Door Interaction Behaviour for Authentication in the Physical World Conference
2022.
@conference{Rodriguez2022,
title = {SenseHandle: Investigating Human-Door Interaction Behaviour for Authentication in the Physical World},
author = {Sarah Delgado Rodriguez, Lukas Mecke, Florian Alt},
url = {https://www.usenix.org/system/files/soups2022-poster25_delgado_rodriguez_abstract_final.pdf},
year = {2022},
date = {2022-08-07},
urldate = {2022-08-07},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Lau, Wee Kiat; Sauter, Marian; Huckauf, Anke
Small Pupils Lead to Lower Judgements of a Person’s Characteristics for Exaggerated, but Not for Realistic Pupils Journal Article
In: Behavioral Sciences, vol. 12, no. 8, pp. 283, 2022, ISSN: 2076-328X.
Abstract | Links | BibTeX | Tags:
@article{lau_SmallPupilsLead_2022,
title = {Small Pupils Lead to Lower Judgements of a Person's Characteristics for Exaggerated, but Not for Realistic Pupils},
author = {Wee Kiat Lau and Marian Sauter and Anke Huckauf},
doi = {10.3390/bs12080283},
issn = {2076-328X},
year = {2022},
date = {2022-08-01},
journal = {Behavioral Sciences},
volume = {12},
number = {8},
pages = {283},
publisher = {Multidisciplinary Digital Publishing Institute},
abstract = {Our eyes convey information about a person. The pupils may provide information regarding our emotional states when presented along with different emotional expressions. We examined the effects of pupil size and vergence on inferring other people's characteristics in neutral expression eyes. Pupil sizes were manipulated by overlaying black disks onto the pupils of the original eye images. The disk area was then changed to create small, medium, and large pupils. Vergence was simulated by shifting the medium-sized disks nasally in one eye. Pupil sizes were exaggerated for Experiment 1 and followed values from the literature for Experiment 2. The first Purkinje image from the eye photos in Experiment 2 was kept to preserve image realism. The characteristics measured were sex, age, attractiveness, trustworthiness, intelligence, valence, and arousal. Participants completed one of two online experiments and rated eight eye pictures with differently sized pupils and with vergence eyes. Both experiments were identical except for the stimuli designs. Results from Experiment 1 revealed rating differences between pupil sizes for all characteristics except sex, age, and arousal. Specifically, eyes with extremely small pupil sizes and artificial vergence received the lowest ratings compared to medium and large pupil sizes. Results from Experiment 2 only indicated weak effects of pupil size and vergence, particularly for intelligence ratings. We conclude that the pupils can influence how characteristics of another person are perceived and may be regarded as important social signals in subconscious social interaction processes. However, the effects may be rather small for neutral expressions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Abdelrahman, Yomna; Mathis, Florian; Knierim, Pascal; Kettler, Axel; Alt, Florian; Khamis, Mohamed
CueVR: Studying the Usability of Cue-based Authentication for Virtual Reality Proceedings Article
In: International Conference on Advanced Visual Interfaces (AVI 2022), 2022.
Abstract | Links | BibTeX | Tags:
@inproceedings{abdelrahman2022cuevr,
title = {CueVR: Studying the Usability of Cue-based Authentication for Virtual Reality},
author = {Yomna Abdelrahman and Florian Mathis and Pascal Knierim and Axel Kettler and Florian Alt and Mohamed Khamis},
doi = {10.1145/3531073.3531092},
year = {2022},
date = {2022-06-06},
urldate = {2022-06-06},
booktitle = {International Conference on Advanced Visual Interfaces (AVI 2022)},
abstract = {Existing virtual reality (VR) authentication schemes are either slow or prone to observation attacks. We propose CueVR, a cue-based authentication scheme that is resilient against observation attacks by design since vital cues are randomly generated and only visible to the user experiencing the VR environment. We investigate three different input modalities through an in-depth usability study (N= 20) and show that while authentication using CueVR is slower than the less secure baseline, it is faster than existing observation resilient cue-based schemes and VR schemes (4.151 s–7.025 s to enter a 4-digit PIN). Our results also indicate that using the controllers’ trackpad significantly outperforms input using mid-air gestures. We conclude by discussing how visual cues can enhance the security of VR authentication while maintaining high usability. Furthermore, we show how existing real-world authentication schemes combined with VR’s unique characteristics can advance future VR authentication procedures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hirzle, Teresa; Sauter, Marian; Wagner, Tobias; Hummel, Susanne; Rukzio, Enrico; Huckauf, Anke
Attention of Many Observers Visualized by Eye Movements Proceedings Article
In: 2022 Symposium on Eye Tracking Research and Applications, pp. 1–7, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9252-5.
Abstract | Links | BibTeX | Tags:
@inproceedings{hirzle_AttentionManyObservers_2022,
title = {Attention of Many Observers Visualized by Eye Movements},
author = {Teresa Hirzle and Marian Sauter and Tobias Wagner and Susanne Hummel and Enrico Rukzio and Anke Huckauf},
doi = {10.1145/3517031.3529235},
isbn = {978-1-4503-9252-5},
year = {2022},
date = {2022-06-01},
booktitle = {2022 Symposium on Eye Tracking Research and Applications},
pages = {1--7},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {ETRA '22},
abstract = {Interacting with a group of people requires to direct the attention of the whole group, thus requires feedback about the crowd's attention. In face-to-face interactions, head and eye movements serve as indicator for crowd attention. However, when interacting online, such indicators are not available. To substitute this information, gaze visualizations were adapted for a crowd scenario. We developed, implemented, and evaluated four types of visualizations of crowd attention in an online study with 72 participants using lecture videos enriched with audience's gazes. All participants reported increased connectedness to the audience, especially for visualizations depicting the whole distribution of gaze including spatial information. Visualizations avoiding spatial overlay by depicting only the variability were regarded as less helpful, for real-time as well as for retrospective analyses of lectures. Improving our visualizations of crowd attention has the potential for a broad variety of applications, in all kinds of social interaction and communication in groups.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lau, Wee Kiat; Chalupny, Jana; Grote, Klaudia; Huckauf, Anke
How Sign Language Expertise Can Influence the Effects of Face Masks on Non-Linguistic Characteristics Journal Article
In: Cognitive Research: Principles and Implications, vol. 7, no. 1, pp. 53, 2022, ISSN: 2365-7464.
Abstract | Links | BibTeX | Tags:
@article{lau_HowSignLanguage_2022,
title = {How Sign Language Expertise Can Influence the Effects of Face Masks on Non-Linguistic Characteristics},
author = {Wee Kiat Lau and Jana Chalupny and Klaudia Grote and Anke Huckauf},
doi = {10.1186/s41235-022-00405-6},
issn = {2365-7464},
year = {2022},
date = {2022-06-01},
journal = {Cognitive Research: Principles and Implications},
volume = {7},
number = {1},
pages = {53},
abstract = {Face masks occlude parts of the face which hinders social communication and emotion recognition. Since sign language users are known to process facial information not only perceptually but also linguistically, examining face processing in deaf signers may reveal how linguistic aspects add to perceptual information. In general, signers could be born deaf or acquire hearing loss later in life. For this study, we focused on signers who were born deaf. Specifically, we analyzed data from a sample of 59 signers who were born deaf and investigated the impacts of face masks on non-linguistic characteristics of the face. Signers rated still-image faces with and without face masks for the following characteristics: arousal and valence of three facial expressions (happy, neutral, sad), invariant characteristics (DV:sex, age), and trait-like characteristics (attractiveness, trustworthiness, approachability). Results indicated that, when compared to masked faces, signers rated no-masked faces with stronger valence intensity across all expressions. Masked faces also appeared older, albeit a tendency to look more approachable. This experiment was a repeat of a previous study conducted on hearing participants, and a post hoc comparison was performed to assess rating differences between signers and hearing people. From this comparison, signers exhibited a larger tendency to rate facial expressions more intensely than hearing people. This suggests that deaf people perceive more intense information from facial expressions and face masks are more inhibiting for deaf people than hearing people. We speculate that deaf people found face masks more approachable due to societal norms when interacting with people wearing masks. Other factors like age and face database's legitimacy are discussed.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sauter, Marian; Hirzle, Teresa; Wagner, Tobias; Hummel, Susanne; Rukzio, Enrico; Huckauf, Anke
Can Eye Movement Synchronicity Predict Test Performance With Unreliably-Sampled Data in an Online Learning Context? Proceedings Article
In: 2022 Symposium on Eye Tracking Research and Applications, pp. 1–5, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9252-5.
Abstract | Links | BibTeX | Tags:
@inproceedings{sauter_CanEyeMovement_2022,
title = {Can Eye Movement Synchronicity Predict Test Performance With Unreliably-Sampled Data in an Online Learning Context?},
author = {Marian Sauter and Teresa Hirzle and Tobias Wagner and Susanne Hummel and Enrico Rukzio and Anke Huckauf},
doi = {10.1145/3517031.3529239},
isbn = {978-1-4503-9252-5},
year = {2022},
date = {2022-06-01},
booktitle = {2022 Symposium on Eye Tracking Research and Applications},
pages = {1--5},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {ETRA '22},
abstract = {Webcam-based eye-tracking promises easy and quick data collection without the need for specific or additional eye-tracking hardware. This makes it especially attractive for educational research, in particular for modern formats, such as MOOCs. However, in order to fulfill its promises, webcam-based eye tracking has to overcome several challenges, most importantly, varying spatial and temporal resolutions. Another challenge that the educational domain faces especially, is that typically individual students are of interest in contrast to average values. In this paper, we explore whether an attention measure that is based on eye movement synchronicity of a group of students can be applied with unreliably-sampled data. Doing so we aim to reproduce earlier work that showed that, on average, eye movement synchronicity can predict performance in a comprehension quiz. We were not able to reproduce the findings with unreliably-sampled data, which highlights the challenges that lie ahead of webcam-based eye tracking in practice.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sauter, Marian; Wagner, Tobias; Huckauf, Anke
Distance between Gaze and Laser Pointer Predicts Performance in Video-Based e-Learning Independent of the Presence of an on-Screen Instructor Proceedings Article
In: 2022 Symposium on Eye Tracking Research and Applications, pp. 1–10, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9252-5.
Abstract | Links | BibTeX | Tags:
@inproceedings{sauter_DistanceGazeLaser_2022,
title = {Distance between Gaze and Laser Pointer Predicts Performance in Video-Based e-Learning Independent of the Presence of an on-Screen Instructor},
author = {Marian Sauter and Tobias Wagner and Anke Huckauf},
doi = {10.1145/3517031.3529620},
isbn = {978-1-4503-9252-5},
year = {2022},
date = {2022-06-01},
booktitle = {2022 Symposium on Eye Tracking Research and Applications},
pages = {1--10},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {ETRA '22},
abstract = {In online lectures, showing an on-screen instructor gained popularity amidst the Covid-19 pandemic. However, evidence in favor of this is mixed: they draw attention and may distract from the content. In contrast, using signaling (e.g., with a digital pointer) provides known benefits for learners. But effects of signaling were only researched in absence of an on-screen instructor. In the present explorative study, we investigated effects of an on-screen instructor on the division of learnerstextasciiacute attention; specifically, on following a digital pointer signal with their gaze. The presence of an instructor led to an increased number of fixations in the presenter area. This did neither affect learning outcomes nor gaze patterns following the pointer. The average distance between the learner's gaze and the pointer position predicts the student's quiz performance, independent of the presence of an on-screen instructor. This can also help in creating automated immediate-feedback systems for educational videos.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}