Ganjeh, Pantea Sanei; Chamunorwa, Michael Bosomefi; Gopal, Abhinand Parambil; Mutyala, Santosh
Vasi-Light: Using Everyday Decorative Items as Smarthome Interfaces Proceedings Article
In: Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia, pp. 565–567, Association for Computing Machinery, , Vienna, Austria,, 2023, ISBN: 9798400709210.
Abstract | Links | BibTeX | Tags: everyday objects, rich interaction, smart home, speculative design, tangible interaction
@inproceedings{10.1145/3626705.3631879,
title = {Vasi-Light: Using Everyday Decorative Items as Smarthome Interfaces},
author = {Pantea Sanei Ganjeh and Michael Bosomefi Chamunorwa and Abhinand Parambil Gopal and Santosh Mutyala},
url = {https://doi.org/10.1145/3626705.3631879},
doi = {10.1145/3626705.3631879},
isbn = {9798400709210},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia},
pages = {565–567},
publisher = {Association for Computing Machinery},
address = {, Vienna, Austria,},
series = {MUM '23},
abstract = {In today’s rapidly evolving technological landscape, the convergence of innovation and tradition has given rise to a fundamental question: Can we embrace the convenience of smart devices without sacrificing the aesthetics and emotional resonance of our cherished everyday objects? We help answer this question through our demo, Vasi-Light, a decorative vase enhanced with concealed sensors and conductive ink to allow end-users to control a smart bulb. Vasi-Light seeks to blend the beauty of familiar household items with the intelligence of modern technology. While a smart bulb and vase are used in the demo, the design process and technologies apply to various everyday object/smart appliance pairings and offer exciting opportunities for research into rich interactions with everyday objects and surfaces for future smart homes.},
keywords = {everyday objects, rich interaction, smart home, speculative design, tangible interaction},
pubstate = {published},
tppubtype = {inproceedings}
}
Kurzweg, Marco; Letter, Maximilian; Wolf, Katrin
Vibrollusion: Creating a Vibrotactile Illusion Induced by Audiovisual Touch Feedback Proceedings Article
In: Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia, pp. 185–197, Association for Computing Machinery, <conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>, 2023, ISBN: 9798400709210.
Abstract | Links | BibTeX | Tags: haptic feedback, illusion, vibration, vibrotactile, visual
@inproceedings{10.1145/3626705.3627790,
title = {Vibrollusion: Creating a Vibrotactile Illusion Induced by Audiovisual Touch Feedback},
author = {Marco Kurzweg and Maximilian Letter and Katrin Wolf},
url = {https://doi.org/10.1145/3626705.3627790},
doi = {10.1145/3626705.3627790},
isbn = {9798400709210},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia},
pages = {185–197},
publisher = {Association for Computing Machinery},
address = {<conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>},
series = {MUM '23},
abstract = {Vibrations are the dominant way to create haptic feedback for interactive systems and are most often induced by vibrotactile actuators. However, virtual content created for augmented reality usually does not support that modality, instead relying mainly on visual and auditive output. Aiming to provide haptic feedback for augmented reality in cases where real vibrations cannot be used, we explore how vibrations can be felt using vision and audio only. In a user study, a virtual 10 x 10 cm white square-shaped cuboid was influenced by animation and/or sound to induce a haptic illusion when being touched. We were able to identify a specific range where the perception of vibration was significantly stronger and more realistic compared to all other values. This was the case if the virtual object’s edges were blurred up to a range of 0.4 cm or 0.6 cm, correspondingly accompanied by sounds, where the spectrum was cut off at a frequency of 256 Hz (for 0.4 cm) or 966 Hz (for 0.6 cm). With that, we aim to enrich augmented reality systems.},
keywords = {haptic feedback, illusion, vibration, vibrotactile, visual},
pubstate = {published},
tppubtype = {inproceedings}
}
Saad, Alia; Pascher, Max; Kassem, Khaled; Heger, Roman; Liebers, Jonathan; Schneegass, Stefan; Gruenefeld, Uwe
Hand-in-Hand: Investigating Mechanical Tracking for User Identification in Cobot Interaction Proceedings Article
In: Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia, pp. 1–9, Association for Computing Machinery, <conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>, 2023, ISBN: 9798400709210.
Abstract | Links | BibTeX | Tags: behavioral biometrics, cobots, human-robot collaboration, human-robot interaction
@inproceedings{10.1145/3626705.3627771,
title = {Hand-in-Hand: Investigating Mechanical Tracking for User Identification in Cobot Interaction},
author = {Alia Saad and Max Pascher and Khaled Kassem and Roman Heger and Jonathan Liebers and Stefan Schneegass and Uwe Gruenefeld},
url = {https://doi.org/10.1145/3626705.3627771},
doi = {10.1145/3626705.3627771},
isbn = {9798400709210},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia},
pages = {1–9},
publisher = {Association for Computing Machinery},
address = {<conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>},
series = {MUM '23},
abstract = {Robots play a vital role in modern automation, with applications in manufacturing and healthcare. Collaborative robots integrate human and robot movements. Therefore, it is essential to ensure that interactions involve qualified, and thus identified, individuals. This study delves into a new approach: identifying individuals through robot arm movements. Different from previous methods, users guide the robot, and the robot senses the movements via joint sensors. We asked 18 participants to perform six gestures, revealing the potential use as unique behavioral traits or biometrics, achieving F1-score up to 0.87, which suggests direct robot interactions as a promising avenue for implicit and explicit user identification.},
keywords = {behavioral biometrics, cobots, human-robot collaboration, human-robot interaction},
pubstate = {published},
tppubtype = {inproceedings}
}
Kurzweg, Marco
Haptic Illusions through Augmenting Humans and Environments Proceedings Article
In: Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia, pp. 577–579, Association for Computing Machinery, <conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>, 2023, ISBN: 9798400709210.
Abstract | Links | BibTeX | Tags: augmented reality, haptic feedback, sensory illusions
@inproceedings{10.1145/3626705.3632613,
title = {Haptic Illusions through Augmenting Humans and Environments},
author = {Marco Kurzweg},
url = {https://doi.org/10.1145/3626705.3632613},
doi = {10.1145/3626705.3632613},
isbn = {9798400709210},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia},
pages = {577–579},
publisher = {Association for Computing Machinery},
address = {<conf-loc>, <city>Vienna</city>, <country>Austria</country>, </conf-loc>},
series = {MUM '23},
abstract = {With the evolution of hardware and technologies within the last decades, workspaces, ways of living, and consequently, the demands on interactions have changed enormously. However, haptic feedback remains a constant and critical factor in enriching the user experience. Realistic haptic feedback usually demands complex hardware integration, which is not always feasible or desired. These requirements and limitations can be reduced by producing or altering haptic experiences through sensory illusions. These illusions allow using all everyday life objects as input technology and simultaneously reduce the amount of needed hardware. There will be no need for several remote controllers or devices anymore as the objects we usually have at home or carry with us can be used as remote controllers or interacted with. Therefore, we must understand multisensory integrations and processes and explore which known illusions can be taken or modified to achieve that goal. We also aim to determine which kinds of haptic feedback can be created via illusions.},
keywords = {augmented reality, haptic feedback, sensory illusions},
pubstate = {published},
tppubtype = {inproceedings}
}
Hosseini, Masoumehsadat; Ihmels, Tjado; Chen, Ziqian; Koelle, Marion; Müller, Heiko; Boll, Susanne
Towards a Consensus Gesture Set: A Survey of Mid-Air Gestures in HCI for Maximized Agreement Across Domains Proceedings Article
In: Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, <conf-loc>, <city>Hamburg</city>, <country>Germany</country>, </conf-loc>, 2023, ISBN: 9781450394215.
Abstract | Links | BibTeX | Tags: agreement rate, application domain, Mid-air gestures, systematic literature review
@inproceedings{10.1145/3544548.3581420,
title = {Towards a Consensus Gesture Set: A Survey of Mid-Air Gestures in HCI for Maximized Agreement Across Domains},
author = {Masoumehsadat Hosseini and Tjado Ihmels and Ziqian Chen and Marion Koelle and Heiko Müller and Susanne Boll},
url = {https://doi.org/10.1145/3544548.3581420},
doi = {10.1145/3544548.3581420},
isbn = {9781450394215},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {<conf-loc>, <city>Hamburg</city>, <country>Germany</country>, </conf-loc>},
series = {CHI '23},
abstract = {Mid-air gesture-based systems are becoming ubiquitous. Many mid-air gestures control different kinds of interactive devices, applications, and systems. They are, however, still targeted at specific devices in specific domains and are not necessarily consistent across domain boundaries. A comprehensive evaluation of the transferability of gesture vocabulary between domains is also lacking. Consequently, interaction designers cannot decide which gestures to use for which domain. In this systematic literature review, we contribute to the future research agenda in this area, based on an analysis of 172 papers. As part of our analysis, we clustered gestures according to the dimensions of an existing taxonomy to identify their common characteristics in different domains, and we investigated the extent to which existing mid-air gesture sets are consistent across different domains. We derived a consensus gesture set containing 22 gestures based on agreement rates calculation and considered their transferability across different domains.},
keywords = {agreement rate, application domain, Mid-air gestures, systematic literature review},
pubstate = {published},
tppubtype = {inproceedings}
}
Tilo Flegel Mentler, Nadine Pöhler
Use Cases and Design of a Virtual Cross-Domain Control Room Simulator Working paper
2022.
@workingpaper{Mentler2022c,
title = {Use Cases and Design of a Virtual Cross-Domain Control Room Simulator},
author = {Mentler, Tilo
Flegel, Nadine
Pöhler, Jonas
Van Laerhoven, Kristof},
doi = {10.18420/muc2022-mci-ws10-291},
year = {2022},
date = {2022-09-04},
urldate = {2022-09-04},
keywords = {},
pubstate = {published},
tppubtype = {workingpaper}
}
Lukas Mecke Sarah Delgado Rodriguez, Florian Alt
SenseHandle: Investigating Human-Door Interaction Behaviour for Authentication in the Physical World Conference
2022.
@conference{Rodriguez2022,
title = {SenseHandle: Investigating Human-Door Interaction Behaviour for Authentication in the Physical World},
author = {Sarah Delgado Rodriguez, Lukas Mecke, Florian Alt},
url = {https://www.usenix.org/system/files/soups2022-poster25_delgado_rodriguez_abstract_final.pdf},
year = {2022},
date = {2022-08-07},
urldate = {2022-08-07},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Lau, Wee Kiat; Sauter, Marian; Huckauf, Anke
Small Pupils Lead to Lower Judgements of a Person’s Characteristics for Exaggerated, but Not for Realistic Pupils Journal Article
In: Behavioral Sciences, vol. 12, no. 8, pp. 283, 2022, ISSN: 2076-328X.
Abstract | Links | BibTeX | Tags:
@article{lau_SmallPupilsLead_2022,
title = {Small Pupils Lead to Lower Judgements of a Person's Characteristics for Exaggerated, but Not for Realistic Pupils},
author = {Wee Kiat Lau and Marian Sauter and Anke Huckauf},
doi = {10.3390/bs12080283},
issn = {2076-328X},
year = {2022},
date = {2022-08-01},
journal = {Behavioral Sciences},
volume = {12},
number = {8},
pages = {283},
publisher = {Multidisciplinary Digital Publishing Institute},
abstract = {Our eyes convey information about a person. The pupils may provide information regarding our emotional states when presented along with different emotional expressions. We examined the effects of pupil size and vergence on inferring other people's characteristics in neutral expression eyes. Pupil sizes were manipulated by overlaying black disks onto the pupils of the original eye images. The disk area was then changed to create small, medium, and large pupils. Vergence was simulated by shifting the medium-sized disks nasally in one eye. Pupil sizes were exaggerated for Experiment 1 and followed values from the literature for Experiment 2. The first Purkinje image from the eye photos in Experiment 2 was kept to preserve image realism. The characteristics measured were sex, age, attractiveness, trustworthiness, intelligence, valence, and arousal. Participants completed one of two online experiments and rated eight eye pictures with differently sized pupils and with vergence eyes. Both experiments were identical except for the stimuli designs. Results from Experiment 1 revealed rating differences between pupil sizes for all characteristics except sex, age, and arousal. Specifically, eyes with extremely small pupil sizes and artificial vergence received the lowest ratings compared to medium and large pupil sizes. Results from Experiment 2 only indicated weak effects of pupil size and vergence, particularly for intelligence ratings. We conclude that the pupils can influence how characteristics of another person are perceived and may be regarded as important social signals in subconscious social interaction processes. However, the effects may be rather small for neutral expressions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Abdelrahman, Yomna; Mathis, Florian; Knierim, Pascal; Kettler, Axel; Alt, Florian; Khamis, Mohamed
CueVR: Studying the Usability of Cue-based Authentication for Virtual Reality Proceedings Article
In: International Conference on Advanced Visual Interfaces (AVI 2022), 2022.
Abstract | Links | BibTeX | Tags:
@inproceedings{abdelrahman2022cuevr,
title = {CueVR: Studying the Usability of Cue-based Authentication for Virtual Reality},
author = {Yomna Abdelrahman and Florian Mathis and Pascal Knierim and Axel Kettler and Florian Alt and Mohamed Khamis},
doi = {10.1145/3531073.3531092},
year = {2022},
date = {2022-06-06},
urldate = {2022-06-06},
booktitle = {International Conference on Advanced Visual Interfaces (AVI 2022)},
abstract = {Existing virtual reality (VR) authentication schemes are either slow or prone to observation attacks. We propose CueVR, a cue-based authentication scheme that is resilient against observation attacks by design since vital cues are randomly generated and only visible to the user experiencing the VR environment. We investigate three different input modalities through an in-depth usability study (N= 20) and show that while authentication using CueVR is slower than the less secure baseline, it is faster than existing observation resilient cue-based schemes and VR schemes (4.151 s–7.025 s to enter a 4-digit PIN). Our results also indicate that using the controllers’ trackpad significantly outperforms input using mid-air gestures. We conclude by discussing how visual cues can enhance the security of VR authentication while maintaining high usability. Furthermore, we show how existing real-world authentication schemes combined with VR’s unique characteristics can advance future VR authentication procedures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hirzle, Teresa; Sauter, Marian; Wagner, Tobias; Hummel, Susanne; Rukzio, Enrico; Huckauf, Anke
Attention of Many Observers Visualized by Eye Movements Proceedings Article
In: 2022 Symposium on Eye Tracking Research and Applications, pp. 1–7, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9252-5.
Abstract | Links | BibTeX | Tags:
@inproceedings{hirzle_AttentionManyObservers_2022,
title = {Attention of Many Observers Visualized by Eye Movements},
author = {Teresa Hirzle and Marian Sauter and Tobias Wagner and Susanne Hummel and Enrico Rukzio and Anke Huckauf},
doi = {10.1145/3517031.3529235},
isbn = {978-1-4503-9252-5},
year = {2022},
date = {2022-06-01},
booktitle = {2022 Symposium on Eye Tracking Research and Applications},
pages = {1--7},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {ETRA '22},
abstract = {Interacting with a group of people requires to direct the attention of the whole group, thus requires feedback about the crowd's attention. In face-to-face interactions, head and eye movements serve as indicator for crowd attention. However, when interacting online, such indicators are not available. To substitute this information, gaze visualizations were adapted for a crowd scenario. We developed, implemented, and evaluated four types of visualizations of crowd attention in an online study with 72 participants using lecture videos enriched with audience's gazes. All participants reported increased connectedness to the audience, especially for visualizations depicting the whole distribution of gaze including spatial information. Visualizations avoiding spatial overlay by depicting only the variability were regarded as less helpful, for real-time as well as for retrospective analyses of lectures. Improving our visualizations of crowd attention has the potential for a broad variety of applications, in all kinds of social interaction and communication in groups.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lau, Wee Kiat; Chalupny, Jana; Grote, Klaudia; Huckauf, Anke
How Sign Language Expertise Can Influence the Effects of Face Masks on Non-Linguistic Characteristics Journal Article
In: Cognitive Research: Principles and Implications, vol. 7, no. 1, pp. 53, 2022, ISSN: 2365-7464.
Abstract | Links | BibTeX | Tags:
@article{lau_HowSignLanguage_2022,
title = {How Sign Language Expertise Can Influence the Effects of Face Masks on Non-Linguistic Characteristics},
author = {Wee Kiat Lau and Jana Chalupny and Klaudia Grote and Anke Huckauf},
doi = {10.1186/s41235-022-00405-6},
issn = {2365-7464},
year = {2022},
date = {2022-06-01},
journal = {Cognitive Research: Principles and Implications},
volume = {7},
number = {1},
pages = {53},
abstract = {Face masks occlude parts of the face which hinders social communication and emotion recognition. Since sign language users are known to process facial information not only perceptually but also linguistically, examining face processing in deaf signers may reveal how linguistic aspects add to perceptual information. In general, signers could be born deaf or acquire hearing loss later in life. For this study, we focused on signers who were born deaf. Specifically, we analyzed data from a sample of 59 signers who were born deaf and investigated the impacts of face masks on non-linguistic characteristics of the face. Signers rated still-image faces with and without face masks for the following characteristics: arousal and valence of three facial expressions (happy, neutral, sad), invariant characteristics (DV:sex, age), and trait-like characteristics (attractiveness, trustworthiness, approachability). Results indicated that, when compared to masked faces, signers rated no-masked faces with stronger valence intensity across all expressions. Masked faces also appeared older, albeit a tendency to look more approachable. This experiment was a repeat of a previous study conducted on hearing participants, and a post hoc comparison was performed to assess rating differences between signers and hearing people. From this comparison, signers exhibited a larger tendency to rate facial expressions more intensely than hearing people. This suggests that deaf people perceive more intense information from facial expressions and face masks are more inhibiting for deaf people than hearing people. We speculate that deaf people found face masks more approachable due to societal norms when interacting with people wearing masks. Other factors like age and face database's legitimacy are discussed.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sauter, Marian; Hirzle, Teresa; Wagner, Tobias; Hummel, Susanne; Rukzio, Enrico; Huckauf, Anke
Can Eye Movement Synchronicity Predict Test Performance With Unreliably-Sampled Data in an Online Learning Context? Proceedings Article
In: 2022 Symposium on Eye Tracking Research and Applications, pp. 1–5, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9252-5.
Abstract | Links | BibTeX | Tags:
@inproceedings{sauter_CanEyeMovement_2022,
title = {Can Eye Movement Synchronicity Predict Test Performance With Unreliably-Sampled Data in an Online Learning Context?},
author = {Marian Sauter and Teresa Hirzle and Tobias Wagner and Susanne Hummel and Enrico Rukzio and Anke Huckauf},
doi = {10.1145/3517031.3529239},
isbn = {978-1-4503-9252-5},
year = {2022},
date = {2022-06-01},
booktitle = {2022 Symposium on Eye Tracking Research and Applications},
pages = {1--5},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {ETRA '22},
abstract = {Webcam-based eye-tracking promises easy and quick data collection without the need for specific or additional eye-tracking hardware. This makes it especially attractive for educational research, in particular for modern formats, such as MOOCs. However, in order to fulfill its promises, webcam-based eye tracking has to overcome several challenges, most importantly, varying spatial and temporal resolutions. Another challenge that the educational domain faces especially, is that typically individual students are of interest in contrast to average values. In this paper, we explore whether an attention measure that is based on eye movement synchronicity of a group of students can be applied with unreliably-sampled data. Doing so we aim to reproduce earlier work that showed that, on average, eye movement synchronicity can predict performance in a comprehension quiz. We were not able to reproduce the findings with unreliably-sampled data, which highlights the challenges that lie ahead of webcam-based eye tracking in practice.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sauter, Marian; Wagner, Tobias; Huckauf, Anke
Distance between Gaze and Laser Pointer Predicts Performance in Video-Based e-Learning Independent of the Presence of an on-Screen Instructor Proceedings Article
In: 2022 Symposium on Eye Tracking Research and Applications, pp. 1–10, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9252-5.
Abstract | Links | BibTeX | Tags:
@inproceedings{sauter_DistanceGazeLaser_2022,
title = {Distance between Gaze and Laser Pointer Predicts Performance in Video-Based e-Learning Independent of the Presence of an on-Screen Instructor},
author = {Marian Sauter and Tobias Wagner and Anke Huckauf},
doi = {10.1145/3517031.3529620},
isbn = {978-1-4503-9252-5},
year = {2022},
date = {2022-06-01},
booktitle = {2022 Symposium on Eye Tracking Research and Applications},
pages = {1--10},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {ETRA '22},
abstract = {In online lectures, showing an on-screen instructor gained popularity amidst the Covid-19 pandemic. However, evidence in favor of this is mixed: they draw attention and may distract from the content. In contrast, using signaling (e.g., with a digital pointer) provides known benefits for learners. But effects of signaling were only researched in absence of an on-screen instructor. In the present explorative study, we investigated effects of an on-screen instructor on the division of learnerstextasciiacute attention; specifically, on following a digital pointer signal with their gaze. The presence of an instructor led to an increased number of fixations in the presenter area. This did neither affect learning outcomes nor gaze patterns following the pointer. The average distance between the learner's gaze and the pointer position predicts the student's quiz performance, independent of the presence of an on-screen instructor. This can also help in creating automated immediate-feedback systems for educational videos.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Makhsadov, Akhmajon; Degraen, Donald; Zenner, André; Kosmalla, Felix; Mushkina, Kamila; Krüger, Antonio
VRySmart: a Framework for Embedding Smart Devices in Virtual Reality Proceedings Article
In: CHI Conference on Human Factors in Computing Systems Extended Abstracts, pp. 1–8, 2022.
Abstract | Links | BibTeX | Tags:
@inproceedings{makhsadov2022vrysmart,
title = {VRySmart: a Framework for Embedding Smart Devices in Virtual Reality},
author = {Akhmajon Makhsadov and Donald Degraen and André Zenner and Felix Kosmalla and Kamila Mushkina and Antonio Krüger},
doi = {10.1145/3491101.3519717},
year = {2022},
date = {2022-05-03},
urldate = {2022-05-03},
booktitle = {CHI Conference on Human Factors in Computing Systems Extended Abstracts},
pages = {1--8},
abstract = {As immersive virtual experiences find their way into our living room entertainment, they are becoming part of our daily technological consumption. However, state-of-the-art virtual reality (VR) remains disconnected from other digital devices in our environment, such as smartphones or tablets. As context switches between acting in the virtual environment and resolving external notifications negatively influence immersion, we look towards integrating smart devices into virtual experiences. To this aim, we present the VRySmart framework. Through either optical marker tracking or simultaneous localization and mapping (SLAM), embedded smart devices can be used as VR controllers with different levels of integration while their content is incorporated into the virtual context to support the plausibility of the illusion. To investigate user impressions, we conducted a study (N = 10) where participants used a smartphone in four different virtual scenarios. Participants positively assessed smart device usage in VR. We conclude by framing implications for future work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sven Mayer Carl Oechsner, Andreas Butz
Challenges and Opportunities of Cooperative Robots as Cooking Appliances Conference
2022.
@conference{Oechsner2022,
title = {Challenges and Opportunities of Cooperative Robots as Cooking Appliances},
author = {Carl Oechsner, Sven Mayer, Andreas Butz
},
url = {https://ceur-ws.org/Vol-3154/paper14.pdf},
year = {2022},
date = {2022-04-30},
urldate = {2022-04-30},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Marion Koelle Narjes Pourjafarian, Fjolla Mjaku
2022.
@conference{Pourjafarian2022,
title = {Print-A-Sketch: A Handheld Printer for Physical Sketching of Circuits and Sensors on Everyday Surfaces},
author = { Narjes Pourjafarian, Marion Koelle, Fjolla Mjaku, Paul Strohmeier, Jürgen Steimle },
doi = {https://doi.org/10.1145/3491102.3502074},
year = {2022},
date = {2022-04-29},
urldate = {2022-04-29},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Kora Persephone Regitz Martin Feick, Anthony Tang
2022.
@conference{Feick2022,
title = {Designing Visuo-Haptic Illusions with Proxies in Virtual Reality: Exploration of Grasp, Movement Trajectory and Object Mass},
author = { Martin Feick, Kora Persephone Regitz, Anthony Tang, Antonio Krüger},
doi = {https://doi.org/10.1145/3491102.3517671},
year = {2022},
date = {2022-04-29},
urldate = {2022-04-29},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Johannes Schütte Yasmeen Abdrabou, Ahmed Shams
2022.
@conference{Abdrabou2022,
title = {”Your Eyes Tell You Have Used This Password Before”: Identifying Password Reuse from Gaze and Keystroke Dynamics},
author = { Yasmeen Abdrabou, Johannes Schütte, Ahmed Shams, Ken Pfeuffer, Daniel Buschek, Mohamed Khamis, Florian Alt},
doi = {https://doi.org/10.1145/3491102.3517531},
year = {2022},
date = {2022-04-29},
urldate = {2022-04-29},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Jonas Winter Ville Mäkelä, Jasmin Schwab
Pandemic Displays: Considering Hygiene on Public Touchscreens in the Post-Pandemic Era Conference
2022.
@conference{Mäkelä2022,
title = {Pandemic Displays: Considering Hygiene on Public Touchscreens in the Post-Pandemic Era},
author = { Ville Mäkelä, Jonas Winter, Jasmin Schwab, Michael Koch, Florian Alt
},
doi = {https://doi.org/10.1145/3491102.3501937},
year = {2022},
date = {2022-04-29},
urldate = {2022-04-29},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Uwe Gruenefeld Alia Saad, Lukas Mecke
2022.
@conference{Saad2022,
title = {Mask removal isn’t always convenient in public! – The Impact of the Covid-19 Pandemic on Device Usage and User Authentication},
author = { Alia Saad, Uwe Gruenefeld, Lukas Mecke, Marion Koelle, Florian Alt, Stefan Schneegass
},
doi = {https://doi.org/10.1145/3491101.3519804},
year = {2022},
date = {2022-04-28},
urldate = {2022-04-28},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}