{"id":1990,"date":"2024-09-30T21:50:35","date_gmt":"2024-09-30T16:20:35","guid":{"rendered":"https:\/\/icmi.acm.org\/2024\/?page_id=1990"},"modified":"2025-10-13T10:36:13","modified_gmt":"2025-10-13T00:36:13","slug":"sessions","status":"publish","type":"page","link":"https:\/\/icmi.acm.org\/2025\/sessions\/","title":{"rendered":"Sessions"},"content":{"rendered":"<p>[et_pb_section fb_built=&#8221;1&#8243; admin_label=&#8221;section&#8221; _builder_version=&#8221;4.14.4&#8243; background_enable_image=&#8221;off&#8221; custom_padding=&#8221;3px||0px|||&#8221; global_colors_info=&#8221;{}&#8221; theme_builder_area=&#8221;post_content&#8221;][et_pb_row admin_label=&#8221;row&#8221; _builder_version=&#8221;4.14.4&#8243; background_size=&#8221;initial&#8221; background_position=&#8221;top_left&#8221; background_repeat=&#8221;repeat&#8221; width=&#8221;90%&#8221; min_height=&#8221;1612.7px&#8221; custom_margin=&#8221;|auto|221px|auto||&#8221; custom_padding=&#8221;4px|||||&#8221; global_colors_info=&#8221;{}&#8221; theme_builder_area=&#8221;post_content&#8221;][et_pb_column type=&#8221;4_4&#8243; _builder_version=&#8221;3.25&#8243; custom_padding=&#8221;|||&#8221; global_colors_info=&#8221;{}&#8221; custom_padding__hover=&#8221;|||&#8221; theme_builder_area=&#8221;post_content&#8221;][et_pb_text _builder_version=&#8221;4.14.4&#8243; _module_preset=&#8221;default&#8221; text_font=&#8221;||||||||&#8221; text_text_color=&#8221;#4f4f4f&#8221; text_font_size=&#8221;13px&#8221; header_4_text_color=&#8221;#282562&#8243; header_4_line_height=&#8221;2em&#8221; header_5_text_color=&#8221;#6292C2&#8243; header_5_line_height=&#8221;1.6em&#8221; custom_margin=&#8221;||0px|||&#8221; custom_padding=&#8221;||0px|||&#8221; hover_enabled=&#8221;0&#8243; global_colors_info=&#8221;{}&#8221; theme_builder_area=&#8221;post_content&#8221; sticky_enabled=&#8221;0&#8243;]<\/p>\n<h4><b>Sessions<\/b><\/h4>\n<h5>Conference Program Overview (click to enlarge)<\/h5>\n<p><a href=\"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47.png\" target=\"_blank\" rel=\"noopener\"><img loading=\"lazy\" decoding=\"async\" src=\"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47.png\" width=\"1555\" height=\"790\" alt=\"\" class=\"wp-image-2571 alignnone size-full\" srcset=\"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47.png 1555w, https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47-1280x650.png 1280w, https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47-980x498.png 980w, https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47-480x244.png 480w\" sizes=\"auto, (min-width: 0px) and (max-width: 480px) 480px, (min-width: 481px) and (max-width: 980px) 980px, (min-width: 981px) and (max-width: 1280px) 1280px, (min-width: 1281px) 1555px, 100vw\" \/><\/a><\/p>\n<p>Download the detailed program here: <a href=\"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Detailed-Program-2025-3.pdf\" rel=\"attachment noopener wp-att-2574\" target=\"_blank\">Detailed Program 2025<\/a>\u200b<a href=\"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/09\/ICMI-2025-Program-Overview.pdf\" rel=\"attachment wp-att-2466\"><\/a><\/p>\n<p>Proceedings: <a href=\"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/toc.html\" target=\"_blank\" rel=\"noopener\" title=\"ICMI 2025 Proceedings\">toc\u00a0<\/a><\/p>\n<p>[\/et_pb_text][et_pb_toggle title=&#8221;Adjunct Events Day 1 &#8211; Monday, 13 October 2025&#8243; _builder_version=&#8221;4.14.4&#8243; _module_preset=&#8221;default&#8221; global_colors_info=&#8221;{}&#8221; theme_builder_area=&#8221;post_content&#8221;]<\/p>\n<h4><span style=\"font-weight: 400;\">Adjunct Events Day 1 &#8211; Monday, 13 October 2025<\/span><span style=\"font-weight: 400;\"><\/span><\/h4>\n<p><span style=\"font-weight: 400;\"><\/span><\/p>\n<h4><span style=\"font-weight: 400;\">08:00-17:00 Registration<\/span><span style=\"font-weight: 400;\"><\/span><\/h4>\n<p><span style=\"font-weight: 400;\"><\/span><\/p>\n<h4><span style=\"font-weight: 400;\">09:00 &#8211; 17:30 Doctoral Consortium<\/span><\/h4>\n<table border=\"1\" style=\"width: 513px; height: 1162px;\">\n<tbody>\n<tr style=\"height: 24px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 24px;\">\n<div class=\"gmail_quote\">09:00 &#8211; 09:10<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 24px;\">\n<div class=\"gmail_quote\">Opening and Welcome<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 24px;\"><\/td>\n<\/tr>\n<tr style=\"height: 24px;\">\n<td colspan=\"3\" class=\"gmail_quote\" style=\"width: 503px; height: 24px;\">\n<div class=\"gmail_quote\">Session 1: LLMs for multimodal interactions<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 48px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 48px;\">\n<div class=\"gmail_quote\">09:10 &#8211; 09:35<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 48px;\">\n<div class=\"gmail_quote\">Cognitive Effort Analysis in Digital Learning Environments<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 48px;\">\n<div class=\"gmail_quote\">Shayla Sharmin<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 72px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 72px;\">\n<div class=\"gmail_quote\">09:35 &#8211; 10:00<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 72px;\">\n<div class=\"gmail_quote\">Enhancing Accessibility in Animation: A Context-Aware Audio Description System for Visually Impaired Children<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 72px;\">\n<div class=\"gmail_quote\">Md Fahad Bin Zamal<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 96px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 96px;\">\n<div class=\"gmail_quote\">10:00 &#8211; 10:25<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 96px;\">\n<div class=\"gmail_quote\">Designing Multimodal Nonverbal Communication Cues for Multirobot Supervision Through Event Detection and Policy Mapping<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 96px;\">\n<div class=\"gmail_quote\">Richard Attfield<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 24px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 24px;\">\n<div class=\"gmail_quote\">10:25 &#8211; 10:45<\/div>\n<\/td>\n<td colspan=\"2\" class=\"gmail_quote\" style=\"width: 351.578px; height: 24px;\">\n<div class=\"gmail_quote\">Short Break 1<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 24px;\">\n<td colspan=\"3\" class=\"gmail_quote\" style=\"width: 503px; height: 24px;\">\n<div class=\"gmail_quote\">Session 2: Cognitive and emotion state modelling<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 72px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 72px;\">\n<div class=\"gmail_quote\">10:45 &#8211; 11:05<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 72px;\">\n<div class=\"gmail_quote\">Towards Intelligent Adaption in Cognitive Assistance Systems through Physiological Computing<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 72px;\">\n<div class=\"gmail_quote\">Jordan Schneider<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 48px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 48px;\">\n<div class=\"gmail_quote\">11:05 &#8211; 11:30<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 48px;\">\n<div class=\"gmail_quote\">Towards Context-sensitive Emotion Recognition<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 48px;\">\n<div class=\"gmail_quote\">Sayak Mukherjee<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 72px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 72px;\">\n<div class=\"gmail_quote\">11:30 &#8211; 11:55<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 72px;\">\n<div class=\"gmail_quote\">Differentiating Frustration from Cognitive Workload in a Dual-task System<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 72px;\">\n<div class=\"gmail_quote\">Heting Wang<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 72px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 72px;\">\n<div class=\"gmail_quote\">11:55 &#8211; 12:20<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 72px;\">\n<div class=\"gmail_quote\">Multimodal Analysis of Caregiving Interactions in Simulation-Based Training<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 72px;\">\n<div class=\"gmail_quote\">Behdokht Kiafar<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 24px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 24px;\">\n<div class=\"gmail_quote\">12:30 &#8211; 13:30<\/div>\n<\/td>\n<td colspan=\"2\" class=\"gmail_quote\" style=\"width: 351.578px; height: 24px;\">\n<div class=\"gmail_quote\">Lunch Break<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 24px;\">\n<td colspan=\"3\" class=\"gmail_quote\" style=\"width: 503px; height: 24px;\">\n<div class=\"gmail_quote\">Session 3: Social interaction &amp; behaviours<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 72px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 72px;\">\n<div class=\"gmail_quote\">13:30 &#8211; 13:55<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 72px;\">\n<div class=\"gmail_quote\">Decoding Social Interaction to Understand Traumatic Behaviours in Social Dynamics<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 72px;\">\n<div class=\"gmail_quote\">Mr Pritesh Nalinbhai Contractor<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 48px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 48px;\">\n<div class=\"gmail_quote\">13:55 &#8211; 14:20<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 48px;\">\n<div class=\"gmail_quote\">Multimodal Conversational Events Estimation in Complex Social Scenes<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 48px;\">\n<div class=\"gmail_quote\">Litian Li<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 72px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 72px;\">\n<div class=\"gmail_quote\">14:20 &#8211; 14:45<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 72px;\">\n<div class=\"gmail_quote\">Modeling Social Dynamics from Multimodal Cues in Natural Conversations<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 72px;\">\n<div class=\"gmail_quote\">Kevin Hyekang Joo<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 24px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 24px;\">\n<div class=\"gmail_quote\">14:45 &#8211; 15:00<\/div>\n<\/td>\n<td colspan=\"2\" class=\"gmail_quote\" style=\"width: 351.578px; height: 24px;\">\n<div class=\"gmail_quote\">Short Break 2<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 24px;\">\n<td colspan=\"3\" class=\"gmail_quote\" style=\"width: 503px; height: 24px;\">\n<div class=\"gmail_quote\">Session 4: Virtual Reality and interaction<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 48px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 48px;\">\n<div class=\"gmail_quote\">15:00 &#8211; 15:25<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 48px;\">\n<div class=\"gmail_quote\">Designing and Evaluating Gen-AI for Cultural Resilience<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 48px;\">\n<div class=\"gmail_quote\">Ka Hei Carrie Lau<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 72px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 72px;\">\n<div class=\"gmail_quote\">15:25 &#8211; 15:50<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 72px;\">\n<div class=\"gmail_quote\">Towards Seamless Interaction: Neuroadaptive Virtual Reality Interfaces for Target Selection<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 72px;\">\n<div class=\"gmail_quote\">Jalynn Blu Nicoly<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 96px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 96px;\">\n<div class=\"gmail_quote\">15:50 &#8211; 16:15<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 96px;\">\n<div class=\"gmail_quote\">Developing Virtual Reality (VR) Simulations with Embedded User Analytics for Cognitive Rehabilitation in PTSD Veterans<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 96px;\">\n<div class=\"gmail_quote\">Ravi Varman Selvakumaran<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 24px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 24px;\">\n<div class=\"gmail_quote\">16:15 &#8211; 16:25<\/div>\n<\/td>\n<td colspan=\"2\" class=\"gmail_quote\" style=\"width: 351.578px; height: 24px;\">\n<div class=\"gmail_quote\">Short Break 3<\/div>\n<\/td>\n<\/tr>\n<tr style=\"height: 24px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 24px;\">\n<div class=\"gmail_quote\">16:25 &#8211; 17:25<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 24px;\">\n<div class=\"gmail_quote\">Panel session<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 24px;\"><\/td>\n<\/tr>\n<tr style=\"height: 24px;\">\n<td class=\"gmail_quote\" style=\"width: 145.422px; height: 24px;\">\n<div class=\"gmail_quote\">17:25 &#8211; 17:30<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 253.969px; height: 24px;\">\n<div class=\"gmail_quote\">Closing<\/div>\n<\/td>\n<td class=\"gmail_quote\" style=\"width: 91.6094px; height: 24px;\"><\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<h4><span style=\"font-weight: 400;\"><\/span><\/h4>\n<h4><span style=\"font-weight: 400;\">08:00 &#8211; 16:00 <\/span><a href=\"http:\/\/aap-workshop.net\/\"><span style=\"font-weight: 400;\">AAP Workshop<\/span><\/a><\/h4>\n<table style=\"height: 2043px; width: 500px;\" border=\"1\">\n<tbody>\n<tr style=\"height: 79px;\">\n<td style=\"width: 125.384px; height: 79px;\">09:00<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 79px;\"><span>Opening: Zakia Hammal, Steffen Walter, and Nadia Berthouze<\/span><\/td>\n<\/tr>\n<tr style=\"height: 55px;\">\n<td style=\"width: 125.384px; height: 55px;\">09:00-10:00<\/td>\n<td style=\"width: 154.474px; height: 55px;\"><span>Invited talk<\/span><\/td>\n<td style=\"width: 199.631px; height: 55px;\">TBD<\/td>\n<\/tr>\n<tr style=\"height: 55px;\">\n<td style=\"width: 125.384px; height: 55px;\"><strong>10:00-10:30<\/strong><\/td>\n<td style=\"width: 154.474px; height: 55px;\"><strong><span>Coffee Break<\/span><\/strong><\/td>\n<td style=\"width: 199.631px; height: 55px;\"><\/td>\n<\/tr>\n<tr style=\"height: 79px;\">\n<td style=\"width: 125.384px; height: 79px;\">10:30-12:00<\/td>\n<td style=\"width: 154.474px; height: 79px;\"><span>Paper presentation<\/span><\/td>\n<td style=\"width: 199.631px; height: 79px;\"><\/td>\n<\/tr>\n<tr style=\"height: 103px;\">\n<td style=\"width: 125.384px; height: 103px;\">10:30-10:45<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 103px;\"><span>Canonical Time Series Features for Pain Classification by Sai Revanth Reddy Boda et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 127px;\">\n<td style=\"width: 125.384px; height: 127px;\"><strong><span>\u00a0<\/span><\/strong>10:45-11:00<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 127px;\"><span>When Features Matter More than Sequence: A Case for Tabular In Context Learning in Pain Classification by Richard A. A. Jonker et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 127px;\">\n<td style=\"width: 125.384px; height: 127px;\"><strong><span>\u00a0<\/span><\/strong>11:00-11:15<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 127px;\"><span>Feel the Pain: An Interpretable Multimodal Approach for Physiological Signal-Based Pain Detection by Tahia Tazin et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 103px;\">\n<td style=\"width: 125.384px; height: 103px;\"><strong><span>\u00a0<\/span><\/strong>11:15-11:30<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 103px;\"><span>Tiny-BioMoE: a Lightweight Embedding Model for Biosignal Analysis by Stefanos Gkikas et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 127px;\">\n<td style=\"width: 125.384px; height: 127px;\"><strong><span>\u00a0<\/span><\/strong>11:30-11:45<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 127px;\"><span>The AI4Pain Grand Challenge 2025: Advancing Pain Assessment with Multimodal Physiological Signals by Raul Fernandez Rojas et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 127px;\">\n<td style=\"width: 125.384px; height: 127px;\"><strong><span>\u00a0<\/span><\/strong>11:45-12:00<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 127px;\"><span>PainXtract: A Multimodal System for Multiclass Pain Classification Using Physiological Signals by Anup Kumar Gupta et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 55px;\">\n<td style=\"width: 125.384px; height: 55px;\"><strong>12:00-13:30<\/strong><\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 55px;\"><strong><span>Lunch break<\/span><\/strong><\/td>\n<\/tr>\n<tr style=\"height: 55px;\">\n<td style=\"width: 125.384px; height: 55px;\">13:30-16:00<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 55px;\"><span>Paper presentation<\/span><\/td>\n<\/tr>\n<tr style=\"height: 127px;\">\n<td style=\"width: 125.384px; height: 127px;\"><strong><span>\u00a0<\/span><\/strong>13:30-13:45<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 127px;\"><span>A Multimodal Deep Learning Exploration for Pain Intensity Classification by Javier Orlando Pinzon-Arenas et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 127px;\">\n<td style=\"width: 125.384px; height: 127px;\"><strong><span>\u00a0<\/span><\/strong>13:45-14:00<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 127px;\"><span>Explaining Pain by Combining Deep Learning Models and Physiology-Driven Ensembles using PPG, EDA, and Respiration by Miguel Javierre et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 103px;\">\n<td style=\"width: 125.384px; height: 103px;\">\n<p><strong><span>\u00a0<\/span><\/strong><\/p>\n<p>14:00-14:15<\/p>\n<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 103px;\"><span>EnsembleIQ-Pain: Intelligent Cluster Calibration for Personalized Pain Detection by Rupal Agarwal et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 103px;\">\n<td style=\"width: 125.384px; height: 103px;\"><strong><span>\u00a0<\/span><\/strong>14:15-14:30<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 103px;\"><span>Painthenticate: Feature Engineering on Multimodal Physiological Signals by Sajeeb Datta et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 55px;\">\n<td style=\"width: 125.384px; height: 55px;\"><strong>14:30-15:00<\/strong><\/td>\n<td style=\"width: 154.474px; height: 55px;\"><strong><span>Coffee Break<\/span><\/strong><\/td>\n<td style=\"width: 199.631px; height: 55px;\"><\/td>\n<\/tr>\n<tr style=\"height: 127px;\">\n<td style=\"width: 125.384px; height: 127px;\"><strong><span>\u00a0<\/span><\/strong>15:00-15:15<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 127px;\"><span>Investigation into Unimodal Versus Multimodal Pain Recognition from Physiological Signals by Anis Elebiary et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 127px;\">\n<td style=\"width: 125.384px; height: 127px;\"><strong><span>\u00a0<\/span><\/strong>15:15-15:30<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 127px;\"><span>Efficient Pain Recognition via Respiration Signals: A Single Cross-Attention Transformer Multi-Window Fusion Pipeline by Stefanos Gkikas et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 127px;\">\n<td style=\"width: 125.384px; height: 127px;\"><strong><span>\u00a0<\/span><\/strong>15:30-15:45<\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 127px;\"><span>Multi-Representation Diagrams for Pain Recognition: Integrating Various Electrodermal Activity Signals into a Single Image by Stefanos Gkikas et al.<\/span><\/td>\n<\/tr>\n<tr style=\"height: 55px;\">\n<td style=\"width: 125.384px; height: 55px;\"><strong>15:45-16:00<\/strong><\/td>\n<td colspan=\"2\" style=\"width: 359.73px; height: 55px;\"><strong>Closing<\/strong><\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<p>&nbsp;<\/p>\n<h4><span style=\"font-weight: 400;\">9:00 &#8211; 12:30 Deepfake Tutorial<\/span><\/h4>\n<p><i><span style=\"font-weight: 400;\">Abhinav Dhall, Zhixi Cai, Shreya Ghosh<\/span><\/i><\/p>\n<p><i><span style=\"font-weight: 400;\"><\/span><\/i><\/p>\n<p><i><span style=\"font-weight: 400;\"><\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">13:30 &#8211; 18:00 <\/span><a href=\"https:\/\/sites.google.com\/view\/ccmi2025\/home\"><span style=\"font-weight: 400;\">CCMI Workshop<\/span><\/a><\/h4>\n<table border=\"1\">\n<tbody>\n<tr>\n<td><span style=\"font-weight: 400;\">13:15<\/span><\/td>\n<td colspan=\"2\"><span style=\"font-weight: 400;\">Opening and Welcome<\/span><\/td>\n<\/tr>\n<tr>\n<td colspan=\"3\">\n<p><span style=\"font-weight: 400;\">Session 1<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Session Chair: Koji Inoue<\/span><\/p>\n<\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">13:20<\/span><\/td>\n<td><span style=\"font-weight: 400;\">Benchmarking Visual Generative Models through Cultural Lens: A Case Study with Singapore-Centric Multi-Cultural Context <\/span><i><span style=\"font-weight: 400;\">(Long, 20 min)<\/span><\/i><\/td>\n<td><i><span style=\"font-weight: 400;\">Ali Koksal, Loke Mei Hwan, Hui Li Tan, Nancy F. Chen\u00a0<\/span><\/i><\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">13:40<\/span><\/td>\n<td>\n<p><span style=\"font-weight: 400;\">Culture-Aware Multimodal Personality Prediction using Audio, Pose, and Cultural Embeddings<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">(Short, 15 min)<\/span><\/i><\/p>\n<\/td>\n<td><i><span style=\"font-weight: 400;\">Islam J A M Samiul, Khalid ZAMAN, Marius Funk, Masashi Unoki, Yukiko Nakano, Shogo Okada\u00a0<\/span><\/i><\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">13:55<\/span><\/td>\n<td><span style=\"font-weight: 400;\">Invited Talk 1: Multimodal Deepfake Detection Across Cultures and Languages<\/span><\/td>\n<td><i><span style=\"font-weight: 400;\">Abhinav Dhall<\/span><\/i><\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">14:30<\/span><\/td>\n<td colspan=\"2\"><span style=\"font-weight: 400;\">Break<\/span><\/td>\n<\/tr>\n<tr>\n<td colspan=\"3\"><span style=\"font-weight: 400;\">Session 2 Session Chair: Shogo Okada<\/span><\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">15:00<\/span><\/td>\n<td><span style=\"font-weight: 400;\">Multimodal grounding in HRI using two types of nods in Japanese and Finnish <\/span><i><span style=\"font-weight: 400;\">(Long, 20 min)<\/span><\/i><\/td>\n<td><i><span style=\"font-weight: 400;\">Taiga Mori, Kristiina Jokinen, Leo Huovinen, Biju Thankachan<\/span><\/i><\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">15:20<\/span><\/td>\n<td><span style=\"font-weight: 400;\">Analyzing Multimodal Multifunctional Interactions in Multiparty Conversations via Functional Spectrum Factorization <\/span><i><span style=\"font-weight: 400;\">(Long, 20 min)<\/span><\/i><\/td>\n<td><i><span style=\"font-weight: 400;\">Momoka Tajima, Issa Tamura, Kazuhiro Otsuka<\/span><\/i><\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">15:40<\/span><\/td>\n<td><span style=\"font-weight: 400;\">MultiGen: Child-Friendly Multilingual Speech Generator with LLMs <\/span><i><span style=\"font-weight: 400;\">(Short 15 min)<\/span><\/i><\/td>\n<td><i><span style=\"font-weight: 400;\">Xiaoxue Gao, Huayun Zhang, Nancy F. Chen<\/span><\/i><\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">15:55<\/span><\/td>\n<td><span style=\"font-weight: 400;\">Contextualized Visual Storytelling for Conversational Chatbot in Education <\/span><i><span style=\"font-weight: 400;\">(Short, 15 min)<\/span><\/i><\/td>\n<td><i><span style=\"font-weight: 400;\">Hui Li Tan, Gu Ying, Liyuan Li, Mei Chee Leong, Nancy F. Chen<\/span><\/i><\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">16:10<\/span><\/td>\n<td colspan=\"2\"><span style=\"font-weight: 400;\">Break<\/span><\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">16:20<\/span><\/td>\n<td><span style=\"font-weight: 400;\">Invited talk: Cross-cultural studies on human-human and human-agent interaction<\/span><\/td>\n<td><i><span style=\"font-weight: 400;\">Yukiko Nakano<\/span><\/i><\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">16:55<\/span><\/td>\n<td><span style=\"font-weight: 400;\">Panel discussion<\/span><\/td>\n<td><i><span style=\"font-weight: 400;\">Yukiko Nakano, Abhinav Dhall, Liu Zhengyuan, Shogo Okada<\/span><\/i><\/td>\n<\/tr>\n<tr>\n<td><span style=\"font-weight: 400;\">17:25<\/span><\/td>\n<td colspan=\"2\"><span style=\"font-weight: 400;\">Closing<\/span><\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<h4><\/h4>\n<p>[\/et_pb_toggle][et_pb_toggle title=&#8221;Main conference Day 1 &#8211; Tuesday, 14 October 2025&#8243; _builder_version=&#8221;4.14.4&#8243; _module_preset=&#8221;default&#8221; global_colors_info=&#8221;{}&#8221; theme_builder_area=&#8221;post_content&#8221;]<\/p>\n<h4><b>Main conference<\/b><\/h4>\n<h4><span style=\"font-weight: 400;\">Day 1 &#8211; Tuesday, 14 October 2025<\/span><span style=\"font-weight: 400;\"><\/span><\/h4>\n<p><span style=\"font-weight: 400;\"><\/span><\/p>\n<h4><span style=\"font-weight: 400;\">08:30 &#8211; 09:00 Welcome<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">ICMI 2025 General Chairs and Program Chairs<\/span><\/p>\n<h4><span style=\"font-weight: 400;\">09:00 &#8211; 10:00 Keynote<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Yukiko Nakano<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Multimodal Task Analysis in Wearable Contexts<\/span><\/p>\n<p>Julien Epps<\/p>\n<h4><span style=\"font-weight: 400;\">10:00 &#8211; 10:30 Break<\/span><\/h4>\n<p><span style=\"font-weight: 400;\"><\/span><\/p>\n<h4><span style=\"font-weight: 400;\">10:30 &#8211; 12:00 Oral Session 1: Affect &amp; Behaviour Understanding<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Shohgo Okada<\/span><\/p>\n<p><span style=\"font-weight: 400;\">10:30 &#8211; 10:48<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Multimodal Behavioral Characterization of Dyadic Alliance in Support Groups<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Kevin Hyekang Joo, Zongjian Li, Yunwen Wang, Yuanfeixue Nan, Mina Kian, Shriya Upadhyay, Maja Mataric, Lynn Carol Miller, Mohammad Soleymani<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">10:48 &#8211; 11:06<\/span><\/p>\n<p><span style=\"font-weight: 400;\">What makes you say yes? An investigation of mental state and personality in persuasion during a dyadic conversation<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Siyuan Chen<\/span><\/i><span style=\"font-weight: 400;\">\u00a0<\/span><\/p>\n<p><span style=\"font-weight: 400;\">11:06 &#8211; 11:24 (Best paper award nominee)\u00a0<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Decoding Affective States without Labels: Bimodal Image-brain Supervision<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Vadym Gryshchuk, Maria Maistro, Christina Lioma, Tuukka Ruotsalo<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">11:24 &#8211; 11:42<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Can Adaptive Interviewer Robots Based on Social Signals Make a Better Impression on Interviewees and Encourage Self-Disclosure?<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Fuminori Nagasawa, Shogo Okada<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">11:42 &#8211; 12:00<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Foundation Feature-Guided Hierarchical Fusion of EEG-Physiological for Emotion Estimation<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Haifeng Zhang, Von Ralph Dane Marquez Herbuela, Yukie Nagai<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">12:00-13:30 Lunch<\/span><\/h4>\n<h4><span style=\"font-weight: 400;\"><\/span><\/h4>\n<h4><span style=\"font-weight: 400;\">13:30-14:30 Keynote (<span>Sustained Accomplishment Award<\/span> Talk)<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Ramanathan Subramanian<\/span><\/p>\n<p><span style=\"font-weight: 400;\">From audio, through haptics to augmented reality: travels in multimodal interaction <\/span><b><i>Stephen Brewster<\/i><\/b><span style=\"font-weight: 400;\">\u00a0<\/span><\/p>\n<h4><span style=\"font-weight: 400;\">14:30-15:30 Blue Sky Papers<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Alessandro Vinciarelli<\/span><\/p>\n<p><span style=\"font-weight: 400;\">14:30 &#8211; 15:00<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Human Authenticity and Flourishing in an AI-Driven World: Edmund&#8217;s Journey and the Call for Mindfulness<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Sebastian Zepf, Mark Colley<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">15:00 &#8211; 15:30<\/span><\/p>\n<p><span style=\"font-weight: 400;\">MUSE: A Multimodal, Generative, and Symbolic Framework for Human Experience Modeling<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Mohammad Rashedul Hasan<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">15:30-16:00 Break<\/span><\/h4>\n<h4><span style=\"font-weight: 400;\">16:00-18:00\u00a0 Poster Session 1 (including DC posters)<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Madhawa Perera<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Emotion and Affect<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Privileged Contrastive Pretraining for Multimodal Affect Modelling<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Kosmas Pinitas, Konstantinos Makantasis, Georgios Yannakakis<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">A Multifaceted Multi-Agent Framework for Zero-Shot Emotion Analysis and Recognition of Symbolic Music<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Jiahao Zhao, Yunjia Li, Kazuyoshi Yoshii<\/span><\/i><\/p>\n<p><span>Disentangling Cross-Modal Interactions for Enhanced Multimodal Emotion Recognition in Conversation<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Jian Ding, Bo Zhang, Dailin Li, Jian Wang, Hongfei Lin<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Write! Draw! Move!: Investigating the Effects of Positive and Negative Self-Reflection on Emotion through Self-Expression Modalities<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Golnaz Moharrer, Kavya Rajendran, Rowena Pinto, Andrea Kleinsmith<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Gesture and Behavior Generation<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Motion Diffusion Autoencoders: Enabling Attribute Manipulation in Human Motion Demonstrated on Karate Techniques<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Anthony Richardson, Felix Putze<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">DifussionCleft: Facial Anomaly Synthesis Guided by Text<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Karen Rosero, Lucas M Harrison, Alex A Kane, Rami R. Hallac, Carlos Busso<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Gesture and Behavior Recognition<\/span><\/p>\n<p><span style=\"font-weight: 400;\">WatchHAR: Real-time On-device Human Activity Recognition System for Smartwatches<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Taeyoung Yeon, Vasco Xu, Henry Hoffman, Karan Ahuja<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Disentangling Perceptual Ambiguity in Multifunctional Nonverbal Behaviors in Conversations via Tensor Spectrum Decomposition<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Issa Tamura, Momoka Tajima, Shiro Kumano, Kazuhiro Otsuka<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Predicting End-of-turn and Backchannel Based on Multimodal Voice Activity Prediction Model<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Ryo Ishii, Shin-ichiro Eitoku, Ryota Yokoyama, Junichi Sawase<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">PI-STGCNN: A Spatio-Temporal Graph Convolutional Neural Network with Partial Interaction Optimization for Human Trajectory Prediction<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Zhuangzhuang Chen<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Time-channel Adaptive Fusion and Hierarchical Attention Mechanism for Dynamic Hand Gesture Recognition<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Longjie Huang, Jianhai Liu, Yong Gu, Kai Jiang, Haibo Li<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Leveraging Pre-Trained Transformers and Facial Embeddings for Multimodal Hirability Prediction in Job Interviews<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Eric Fithian, Theodora Chaspari<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Health &amp; Wellbeing<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Punctual or Continuous? Analyzing Depression Traces in Language and Paralanguage with Multiple Instance Learning<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Rawan Alsarrani, Anna Esposito, Alessandro Vinciarelli<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">VitaStress: A Multimodal Dataset for Stress Detection<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Paul Schreiber, Simon Burbach, Beyza Cinar, Lennart Mackert, Maria Maleshkova<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">BiFuseNet: A Multimodal Network for Estimating Blood Alcohol Concentration via Bidirectional Hierarchical Fusion<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Abdullah Tariq, Martin Masek, Zulqarnain Gilani, Arooba Maqsood<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Investigating differences in Paramedic trainees\u2019 multimodal interaction during low and high physiological synchrony<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Vasundhara Joshi, Surely Akiri, Sanaz Taherzadeh, Gary B Williams, Andrea Kleinsmith<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">A multimodal Framework for exploring behavioural cues for automatic Stress Detection<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Rebecca Valerio, Marwa Mahmoud<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">16:30-18:00\u00a0 Doctoral Consortium Poster Session<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Modeling Social Dynamics from Multimodal Cues in Natural Conversations<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Kevin Hyekang Joo<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Developing Virtual Reality (VR) Simulations with Embedded User Analytics for Cognitive Rehabilitation in PTSD Veterans<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Ravi Varman Selvakumaran<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Enhancing Accessibility in Animation: A Context-Aware Audio Description System for Visually Impaired Children<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Md Fahad Bin Zamal<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Designing Multimodal Nonverbal Communication Cues for Multirobot Supervision Through Event Detection and Policy Mapping<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Richard John Attfield<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Towards Context-sensitive Emotion Recognition<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Sayak Mukherjee<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Towards Intelligent Adaption in Cognitive Assistance Systems through Physiological Computing<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Jordan Schneider<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Multimodal Conversational Events Estimation in Complex Social Scenes<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Litian Li<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Decoding Social Interaction to Understand Traumatic Behaviors in Social Dynamics<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Pritesh Nalinbhai Contractor<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Towards Seamless Interaction: Neuroadaptive Virtual Reality Interfaces for Target Selection<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Jalynn Blu Nicoly, Shayla Sharmin\u00a0<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Multimodal Analysis of Caregiving Interactions in Simulation-Based Training<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Behdokht Kiafar, Ka Hei Carrie Lau<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Differentiating Frustration from Cognitive Workload in a Dual-task System<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Heting Wang<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">18:00 -19:00\u00a0 Welcome Reception<\/span><\/h4>\n<p>[\/et_pb_toggle][et_pb_toggle title=&#8221;Main conference Day 2 &#8211; Wednesday, 15 October 2025&#8243; _builder_version=&#8221;4.14.4&#8243; _module_preset=&#8221;default&#8221; global_colors_info=&#8221;{}&#8221; theme_builder_area=&#8221;post_content&#8221;]<\/p>\n<h4><span style=\"font-weight: 400;\">09:00-10:00 Keynote\u00a0<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Gelareh Mohammadi<\/span><\/p>\n<h4><span style=\"font-weight: 400;\">Multimodal AI for Transforming Industries and Empowering Social Interaction<\/span><\/h4>\n<p><i><span style=\"font-weight: 400;\">Fang Chen\u00a0<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">10:00-10:30 Break<\/span><\/h4>\n<h4><span style=\"font-weight: 400;\">10:30-12:00 Oral Session 2: Health &amp; Wellbeing<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Maria Maleshkova<\/span><\/p>\n<p><span style=\"font-weight: 400;\">10:30 &#8211; 10:48<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Evaluating the Efficacy of Pulse Transit Time between Palm and Forehead in Blood Pressure Estimation<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Chu Chu Qiu, Jing Wei Chin, Tsz Tai Chan, Kwan Long Wong, Richard Hau Yue So<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">10:48 &#8211; 11:06<\/span><\/p>\n<p><span style=\"font-weight: 400;\">From Lab to Wrist: Bridging Metabolic Monitoring and Consumer Wearables for Heart Rate and Oxygen Consumption Modeling<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Barak Gahtan, Sanketh Vedula, Gil Samuelly Leichtag, Einat Kodesh, Alex Bronstein<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">11:06 &#8211; 11:24 (Best paper award nominee)<\/span><\/p>\n<p><span style=\"font-weight: 400;\">SpikEy: Preventing Drink Spiking using Wearables<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Zhigang Yin, Ngoc Thi Nguyen, Agustin Zuniga, Mohan Liyanage, Petteri Nurmi, Huber Flores<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">11:24 &#8211; 11:42<\/span><\/p>\n<p><span style=\"font-weight: 400;\">From Speech and PPG to EDA: Stress Detection Based on Cross-Modal Fine-Tuning of Foundation Models<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Alia Ahmed Al Dossary, Mathieu Chollet, Alessandro Vinciarelli<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">11:42 &#8211; 12:00\u00a0<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Psychological and Neurophysiological Indicators of Stress and Relaxation in Immersive Virtual Reality Environments: A Multimodal Approach<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Ankit Arvind Prasad, Shashank Laxmikant Bidwai, Ashutosh Jitendra Zawar, Diven Ashwani Ahuja, Apostolos Kalatzis, Vishnunarayan Girishan Prabhu<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">12:00-13:30 Lunch<\/span><\/h4>\n<h4><span style=\"font-weight: 400;\">13:30-15:00 Oral Session 3: Interaction Design<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Silvia Rossi<\/span><\/p>\n<p><span style=\"font-weight: 400;\">13:30 &#8211; 13:48 (Best paper award nominee)\u00a0<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Exploring the effects of force feedback on VR Keyboards with varying visual designs<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Zhenxing Li, Jari Kangas, Ahmed Farooq, Roope Raisamo<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">13:48 &#8211; 14:06 (Best paper award nominee)\u00a0<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Functional Near-Infrared Spectroscopy (fNIRS) Analysis of Interaction Techniques in Touchscreen-Based Educational Gaming<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Shayla Sharmin, Elham Bakhshipour, Mohammad Fahim Abrar, Behdokht Kiafar, Pinar Kullu, Nancy Getchell, Roghayeh Leila Barmaki<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">14:06 &#8211; 14:24<\/span><\/p>\n<p><span style=\"font-weight: 400;\">AirSpartOne: One-Handed Distal Pointing for Large Displays on Mobile Devices and in Midair<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Martin Birlouez, Yosra Rekik, Laurent Grisoni<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">14:24 &#8211; 14:42<\/span><\/p>\n<p><span style=\"font-weight: 400;\">StoryDiffusion: How to Support UX Storyboarding With Generative-AI<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Zhaohui Liang, Xiaoyu Zhang, Kevin Ma, Zhao Liu, Xipei Ren, Kosa Goucher-Lambert, Can Liu<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">14:42 &#8211; 15:00<\/span><\/p>\n<p><span style=\"font-weight: 400;\">A Scenario-Based Design Pack for Exploring Multimodal Human\u2013GenAI Relations<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Josh Andres, Chris Danta, Andrea Bianchi, Sahar Farzanfar, Gloria Milena Fernandez-Nieto, Alexa Becker, Tara Capel, Frances Liddell, Shelby Hagemann, Ned Cooper, Sungyeon Hong, Li Lin, Eduardo Benitez Sandoval, Anna Brynskov, Hubert Dariusz Zaj\u0192\u00d6c, Zhuying Li, Tianyi Zhang, Arngeir Berge<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">15:00-15:30 Break<\/span><\/h4>\n<h4><span style=\"font-weight: 400;\">15:30-16:30 <a href=\"https:\/\/icmi.acm.org\/2025\/grand-challenges\/\">Grand Challenge Session<\/a><\/span><\/h4>\n<ul>\n<li>3:30pm: 5 min \u2013 Welcome and intro<\/li>\n<li>3:35pm: 25 min \u2013 Keynote speaker: Olympia Yarger<\/li>\n<li>4:00pm: 15 min \u2013 Matthew Vestal: Introduction to Grand Challenge and summary paper presentation<\/li>\n<li>4:15pm: 10 min \u2013 Elane Peng: mIoG: An Evaluation Metric for Multispectral Instance Segmentation in Robotics<\/li>\n<li>4:25pm: 5min \u2013 Concluding remarks<\/li>\n<\/ul>\n<h4><span style=\"font-weight: 400;\">16:00-18:00 Poster Session 2 and Demos<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Roland Goecke<\/span><\/p>\n<p><span style=\"text-decoration: underline;\"><span style=\"font-weight: 400;\">Topic: Interaction Design<\/span><\/span><\/p>\n<p><span style=\"font-weight: 400;\">1. A Systematic Review of Fusion Methods for the User-Centered Design of Multimodal Interfaces<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Ronja Heinrich, Chris Zimmerer, Martin Fischbach, Marc Erich Latoschik<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">2. <\/span><span style=\"font-weight: 400;\">Exploring Sound-to-Sound Personalization for Accessible Digital Media<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Dhruv Jain, Jason Miller<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">3. When Words Fall Short: The Case for Conversational Interfaces that Don\u2019t Listen<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">James Simpson, Hamish Stening, Gaurav Patil, Patrick Nalepka, Mark Dras, Rachel W. Kallen, Simon Hosking, Michael J Richardson, Deborah Richards<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">4. Pinching Visuo-haptic Display: Investigating Cross-Modal Effects of Visual Textures on Electrostatic Cloth Tactile Sensations<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Takekazu Kitagishi, Chun Wei Ooi, Yuichi Hiroi, Jun Rekimoto<\/span><\/i><\/p>\n<p><span style=\"text-decoration: underline;\"><span style=\"font-weight: 400;\">Topic: LLMs for interactions<\/span><\/span><\/p>\n<p><span style=\"font-weight: 400;\">1. Using a Secondary Channel to Display the Internal Empathic Resonance of LLM-Driven Agents for Mental Health Support<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Matthias Schmidmaier, Jonathan Rupp, Sven Mayer<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">2. Few-shot Fine-grained Image Classification with Interpretable Prompt Learning through Distribution Alignment<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Dongliang Guo, Handong Zhao, Ryan Rossi, Sungchul Kim, Nedim Lipka, Tong Yu, Sheng Li<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">3. Multimodal Synthetic Data Finetuning and Model Collapse: Insights from VLMs and Diffusion Models<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Zizhao Hu, Mohammad Rostami, Jesse Thomason<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">4. Multimodal Behavioral Patterns Analysis with Eye-Tracking and LLM-Based Reasoning<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Dongyang Guo, Yasmeen Abdrabou, Enkeleda Thaqi, Enkelejda Kasneci<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">5. Talking-to-Build: How LLM-Assisted Interface Shapes Player Performance and Experience in Minecraft<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Lei Wang, Xin Sun, Y Li, Jie Li, Massimo Poesio, Julian Frommel, Koen Hindriks, Jiahuan Pei<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">6. Large Language Models For Multimodal User Interaction in Virtual Environments<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Ahmed Sayed, Kevin Pfeil<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">7. Understanding and Supporting Multimodal AI Chat Interactions of DHH College Students: an Empirical Study<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Nan Zhuang, Yanni Ma, Xin Zhao, Wang Ying, Shaolong Chai, Shitong Weng, Mengru Xue, Yuxi Mao, Cheng Yao<\/span><\/i><\/p>\n<p><span style=\"text-decoration: underline;\"><span style=\"font-weight: 400;\">Interacting with Social robots<\/span><\/span><\/p>\n<p><span style=\"font-weight: 400;\">1. When Robots Listen: Predicting Empathy Valence from Multimodal Storytelling Data<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Jiayu Wang, Himadri Shekhar Mondal, Tom Gedeon, Md Zakir Hossain<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">2. USER-VLM 360: Personalized Vision Language Models with User-aware Tuning for Social Human-Robot Interactions<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Hamed Rahimi, Adil Bahaj, Mouad Abrini, Mahdi Khoramshahi, Mounir Ghogho, Mohamed Chetouani<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">3. Demographic User Modeling for Social Robotics with Multimodal Pre-trained Models<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Hamed Rahimi, Mouad Abrini, Jeanne Malecot, Ying Lai, Adrien Jacquet <\/span><\/i><i><span style=\"font-weight: 400;\">Cr\u00e9tides<\/span><\/i><i><span style=\"font-weight: 400;\">, Mahdi Khoramshahi, Mohamed Chetouani<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">16:00-18:00 Demo Session (Poster session)<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Roland Goecke<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Simulated Insight, Real-World Impact: Enhancing Driving Safety with CARLA-Simulated Personalized Lessons and Eye-Tracking Risk Coaching<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Wenbin Gan, Minh-son Dao, Koji Zettsu<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Affective and Physiological Responses to Immersive Intangible Cultural Heritage Experiences in Extended Reality<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Fasih Haider, Sofia de la Fuente Garcia, Alicia N\u00fa\u00f1ez Garc\u00eda, Saturnino Luz<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">SocialWise: LLM-Agentic Conversation Therapy for Individuals with Autism Spectrum Disorder to Enhance Communication Skills<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Albert Tang<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">The Human Record Needle: A Novel Interface for Embodied Music Interaction<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Brandon Waylan Ables<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">PoseDoc: An Interactive Tool for Efficient Keypoint Annotation in Human Pose Estimation<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Chengyu Fan, Tahiya Chowdhury<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Realtime Multimodal Emotion Estimation using Behavioral and Neurophysiological Data<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Von Ralph Dane Marquez Herbuela, Yukie Nagai<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">A Multilingual Telegram Chatbot for Mental Health Data Collection<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Danila Mamontov, Alexey Karpov, Wolfgang Minker<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Improving Deepfake Understanding through Simplified Explanations<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Abhijeet Narang, Parul Gupta, Liuyijia Su, Abhinav Dhall<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">The Crock of Shh: A Whispering Water Interface for Reshaping Reality<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Brandon Waylan Ables<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">19:00 &#8211; 22:00 Banquet Dinner<\/span><\/h4>\n<p>[\/et_pb_toggle][et_pb_toggle title=&#8221;Main conference Day 3 &#8211; Thursday, 16 October 2025&#8243; _builder_version=&#8221;4.14.4&#8243; _module_preset=&#8221;default&#8221; hover_enabled=&#8221;0&#8243; global_colors_info=&#8221;{}&#8221; theme_builder_area=&#8221;post_content&#8221; sticky_enabled=&#8221;0&#8243;]<\/p>\n<h4><span style=\"font-weight: 400;\">09:00-10:00 Keynote<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Yukiko Nakano<\/span><\/p>\n<h4><span style=\"font-weight: 400;\">Designing for Meaningful Oversight: Human and Organisational Agency in Multimodal AI Systems<\/span><span style=\"font-weight: 400;\">\u00a0<\/span><\/h4>\n<p><i><span style=\"font-weight: 400;\">Liming Zhu<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">10:00-10:30 Break<\/span><\/h4>\n<h4><span style=\"font-weight: 400;\">10:30-12:00 Oral Session 4: Safe &amp; Inclusive Interactions\u00a0<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Carlos Busso<\/span><\/p>\n<p><span style=\"font-weight: 400;\">10:30 &#8211; 10:48<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Lightweight Transformers for Isolated Sign Language Recognition<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Cristina Luna-Jim\u00e9nez, Lennart Eing, Annalena Bea Aicher, Fabrizio Nunnari, Elisabeth Andr\u00e9<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">10:48 &#8211; 11:06<\/span><\/p>\n<p><span style=\"font-weight: 400;\">All of That in 15 Minutes? Exploring Privacy Perceptions Across Cognitive Abilities via Ad-hoc LLM-Generated Profiles Inferred from Social Media Use<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Kirill Kronhardt, Sebastian Hoffmann, Fabian Adelt, Max Pascher, Jens Gerken<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">11:06 &#8211; 11:24<\/span><\/p>\n<p><span style=\"font-weight: 400;\">SignFlow: End-to-End Sign Language Generation for One-to-Many Modeling using Conditional Flow Matching<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Nabeela Khan, Bowen Wu, Sihan Tan, Carlos Toshinori Ishi<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">11:24 &#8211; 11:42<\/span><\/p>\n<p><span style=\"font-weight: 400;\">MENA: A Multimodal Framework for Analyzing Caregiver Emotions and Competencies in AR Geriatric Simulations<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Behdokht Kiafar, Pavan Uttej Ravva, Salam Daher, Asif Ahmmed, Roghayeh Leila Barmaki<\/span><\/i><\/p>\n<p><i><span style=\"font-weight: 400;\">(virtual talk: <a href=\"https:\/\/drive.google.com\/file\/d\/15YqqjxKQn-IeCEiG6viGwDm2JVW-vsPs\/view?usp=drive_link\" target=\"_blank\" rel=\"noopener\">Link<\/a>)<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">11:42 &#8211; 12:00<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Multimodal LLM using Federated Visual Instruction Tuning for Visually Impaired<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Ankith Bala, Alina Vereshchaka<\/span><\/i><\/p>\n<p><i><span style=\"font-weight: 400;\">(virtual talk: <a href=\"https:\/\/eur03.safelinks.protection.outlook.com\/?url=https%3A%2F%2Fdrive.google.com%2Ffile%2Fd%2F1l83va7ZHVO5y98XsZg5MXsjxWRj2oO5W%2Fview%3Fusp%3Dsharing&amp;data=05%7C02%7CTanaya.Guha%40glasgow.ac.uk%7C0a0d6548836543f35e4b08de00a72803%7C6e725c29763a4f5081f22e254f0133c8%7C1%7C0%7C638948915100858283%7CUnknown%7CTWFpbGZsb3d8eyJFbXB0eU1hcGkiOnRydWUsIlYiOiIwLjAuMDAwMCIsIlAiOiJXaW4zMiIsIkFOIjoiTWFpbCIsIldUIjoyfQ%3D%3D%7C0%7C%7C%7C&amp;sdata=N7yJT5DhW%2F%2BIU2M%2BJLKmEkHcAorbZZz4RubX2k%2Bx2rY%3D&amp;reserved=0\" target=\"_blank\" rel=\"noopener\">Link<\/a>)<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">12:00-13:30 Lunch<\/span><\/h4>\n<h4><span style=\"font-weight: 400;\">13:30-15:00 Oral Session 5: Conversational Dynamics<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Md Zakir Hossain<\/span><\/p>\n<p><span style=\"font-weight: 400;\">13:30 &#8211; 13:48<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Enhancing Gaze Prediction in Multi-Party Conversations via Speaker-Aware Multimodal Adaptation<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Meng-Chen Lee, Zhigang Deng<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">13:48 &#8211; 14:06 (Best paper award nominee)\u00a0<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Real-time Generation of Various Types of Nodding for Avatar Attentive Listening System<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Kazushi Kato, Koji Inoue, Divesh Lala, Keiko Ochi, Tatsuya Kawahara<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">14:06 &#8211; 14:24<\/span><\/p>\n<div>Converting Spatial to Social: Using Persistent Homology to <span style=\"font-weight: 400;\"><span>Understand Social Groups<\/span><\/span><\/div>\n<p><i><span style=\"font-weight: 400;\">Valerie K. Chen, Claire Liang, Julie Shah, Sean Andrist<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">14:24 &#8211; 14:42<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Multimodal Analysis of Disagreement in Dyadic Conversations: An Approach Based on Emotion Recognition<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Areej Buker, Emily Smith, Olga Perepelkina, Alessandro Vinciarelli<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">14:42 &#8211; 15:00 (Best paper award nominee)\u00a0<\/span><\/p>\n<p><span style=\"font-weight: 400;\">Speech-to-Joy: Self-Supervised Features for Enjoyment Prediction in Human\u2013Robot Conversation<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Ricardo Santana, Bahar Irfan, Erik Lagerstedt, Gabriel Skantze, Andre Pereira<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">15:00-15:30 Break<\/span><\/h4>\n<h4><span style=\"font-weight: 400;\">15:30-17:30 Poster Session 3 (including LBR papers)<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Session Chair: Laxminarayen NV<\/span><\/p>\n<p><span style=\"text-decoration: underline;\"><span style=\"font-weight: 400;\">Topic: Multiparty Interactions<\/span><\/span><\/p>\n<p><span style=\"font-weight: 400;\">1. Multimodal Quantitative Measures for Multiparty Behavior Evaluation<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Ojas Shirekar, Wim Pouw, Chenxu Hao, Vrushank Phadnis, Thabo Beeler, Chirag Raman<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">2. Beyond Utterance: Understanding Group Problem Solving through Discussion Sequences<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Zhuoxu Duan, Zhengye Yang, Brooke Foucault Welles, Richard J. Radke<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">3. Learning Multimodal Motion Cues for Online End-of-Turn Prediction in Multi-Party Dialogue<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Meng-Chen Lee, Zhigang Deng<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">4. Team Dynamics in Human-AI Collaboration: Effects on Confidence, Satisfaction, and Accountability<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Mamehgol Yousefi, Ahmad Shahi, Mos Sharifi, Alvaro J Jorge Romera, Simon Hoermann, Thammathip Piumsomboon<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">5. A Multimodal Classroom Video Question-Answering Framework for Automated Understanding of Collaborative Learning<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Nithin Sivakumaran, Chia-Yu Yang, Abhay Zala, Shoubin Yu, Daeun Hong, Xiaotian Zou, Elias Stengel-Eskin, Dan Carpenter, Wookhee Min, Cindy Hmelo-Silver, Jonathan Rowe, James Lester, Mohit Bansal<\/span><\/i><\/p>\n<p><span style=\"text-decoration: underline;\"><span style=\"font-weight: 400;\">Topic: Safe &amp; Inclusive Interactions<\/span><\/span><\/p>\n<p><span style=\"font-weight: 400;\">1. Causal Explanation of the Quality of Parent-Child Interactions with Multimodal Behavioral Features<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Katherine Guerrerio, Lujie Karen Chen, Lisa Berlin, Brenda Jones Harden<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">2. Seeing, Hearing, Feeling: Designing Multimodal Alerts for Critical Drone Scenarios<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Nina Knieriemen, Anke Hirsch, Muhammad Moiz Sakha, Florian Daiber, Hannah Kolb, Simone M. H<\/span><\/i><i><span style=\"font-weight: 400;\">\u00fc<\/span><\/i><i><span style=\"font-weight: 400;\">ning, Frederik Wiehr, Antonio Kr<\/span><\/i><i><span style=\"font-weight: 400;\">\u00fc<\/span><\/i><i><span style=\"font-weight: 400;\">ger<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">3. Unobtrusive Universal Acoustic Adversarial Attacks on Speech Foundation Models in the Wild<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Jayden Fassett, Anjila Budathoki, Jack Morris, Qin Hu, Yi Ding<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">4. A Multilingual, Multimodal Dataset for Disinformation and Out-of-Context Analysis with Rich Supportive Information<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Shuhan Cui, Hanrui Wang, Ching-Chun Chang, Huy H. Nguyen, Isao Echizen<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">5. MERD: A Multimodal Emotional Response Dataset from 360\u00b0 VR Videos Across Different Age Groups<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Qiang Chen, Shikun Zhou, Yuming Fang, Dan Luo, Tingsong Lu<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">6. Knowledge Graphs and Fine-Grained Visual Features: A Potent Duo Against Cheapfakes<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Tuan-Vinh La, Minh-Hieu Nguyen, Minh-son Dao<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">7. Analyzing Character Representation in Media Content using Multimodal Foundation Model: Effectiveness and Trust<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Evdoxia Taka, Debadyuti Bhattacharya, Joanne Garde-Hansen, Sanjay Sharma, Tanaya Guha<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">8. A Block-Level Fine-Graining Framework for Multimodal Fusion in Federated Learning<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Guozhi Zhang, Mengying Jia, Shuyan Feng, Zixuan Liu<\/span><\/i><\/p>\n<p><span style=\"text-decoration: underline;\"><span style=\"font-weight: 400;\">Topic: XR<\/span><\/span><\/p>\n<p><span style=\"font-weight: 400;\">1. Adaptive Gen-AI Guidance in Virtual Reality: A Multimodal Exploration of Engagement in Neapolitan Pizza-Making<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Ka Hei Carrie Lau, Sema Sen, Philipp Stark, Efe Bozkir, Enkelejda Kasneci<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">2. Please Let Me Think: The Influence of Conversational Fillers on Transparency and Perception of Waiting Time when Interacting with a Conversational AI in Virtual Reality<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">David Obremski, Paula Friedrich, Carolin Wienrich<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">3. Exploring the Impact of Distance on XR Selection Techniques<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Becky Spittle, Maite Frutos-Pascual, Chris Creed, Ian Williams<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">15:30-17:30 Late Breaking Results (Poster Session)<\/span><\/h4>\n<p><span style=\"font-weight: 400;\">Multimodal Analysis of Listener\u2019s Active Listening Behaviors in Speed Dating Dialogues<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Asahi Ogushi, Naoki Azuma, Daichi Shikama, Ryo Ishii, Toshiki Onishi, Akihiro Miyata<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">TGN-PL: Learning to Socialize Using Privileged Information and Temporal Graph Networks<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Jouh Yeong Chew, Joanne Taery Kim, Sehoon Ha<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">You Like This Robot? I Don&#8217;t! How Individual Differences Influence Perceptions of Robot Teammates in Virtual Reality<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Karla Bransky, Penny Sweetser<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">SBM: Social Behavior Model for Human-Like Action Generation<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Jouh Yeong Chew, Zhi-Yi Lin, Xucong Zhang<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">A New LLM-Powered Communication Metric: Information Sharing as a Predictor of Team Performance<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Xinyun Hu, Penny Sweetser<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">A Platform for Experimenting with Non-Verbal Communication: Inserting facial displays of misunderstanding into live conversations.<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Ella Cullen<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Bridging Video and Symbols: A Hybrid AI for Edge Traffic-Risk Reasoning<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Minh-Son Dao, Thi-Mai-Phuong Nguyen, Swe Nwe Nwe Htun, Koji Zettsu<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Identifying Participant Roles in Online Group Discussions<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Kazuki Kodaira, Kazuki Nakaya, Jie Zeng, Hiyori Toda, Fumio Nihei, Ryo Ishii, Yukiko Nakano<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Most DAIC-WoZ Depression Classifiers Are Invalid, They Don\u2019t Learn Task-Specific Features: Preliminary Findings From a Large-Scale Reproducibility Study<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Santosh Varma Patapati, Ishan Pendyala, Murari Ambati, Pranav Kunadharaju, Pranav Kokati, Amit Adiraju Narasimha, Trisanth Srinivasan<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">LociVR: Design of a Virtual Reality Prototype for Memory Training<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Cancan Jin, Yanze Gao, Zirui Yu, Ningning Xu<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">When Pose Estimation Fails: Measuring Occlusion for Reliable Multimodal Interaction<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Chengyu Fan, Tahiya Chowdhury<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">From Behavior to Interaction: Understanding User Intents in Metaverse Experience<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Ningning Xu, Lingyun Yu, Qinglin Mao, Kaiwen Li, Yifei Chen, Haibo Zhou, Xu Sun<\/span><\/i><\/p>\n<p><span style=\"font-weight: 400;\">Large Language Models as Perceivers of Dynamic Full-Body Expressions of Emotion<\/span><\/p>\n<p><i><span style=\"font-weight: 400;\">Huakun Liu, Miao Cheng, Xin Wei, Felix Dollack, Victor Schneider, Hideaki Uchiyama, Yoshifumi Kitamura, Kiyoshi Kiyokawa, Monica Perusquia-Hernandez<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">17:30-18:00 Closing Ceremony\u00a0<\/span><\/h4>\n<p>[\/et_pb_toggle][et_pb_toggle title=&#8221;Adjunct Events Day 2 &#8211; Friday, 17 October 2025&#8243; _builder_version=&#8221;4.14.4&#8243; _module_preset=&#8221;default&#8221; hover_enabled=&#8221;0&#8243; global_colors_info=&#8221;{}&#8221; theme_builder_area=&#8221;post_content&#8221; sticky_enabled=&#8221;0&#8243;]<\/p>\n<h4><span style=\"font-weight: 400;\">Adjunct Events Day 2 &#8211; Friday, 17 October 2025<\/span><\/h4>\n<h4><span style=\"font-weight: 400;\">9:00 &#8211; 12:00 Human-AI Interaction Tutorial<\/span><\/h4>\n<p><i><span style=\"font-weight: 400;\">Madhawa Perera, Md Zakir Hossain, Alexander Krumpholz, Tom Gedeon<\/span><\/i><\/p>\n<h4><span style=\"font-weight: 400;\">12:30 &#8211; 15:00\u00a0 <\/span><a href=\"https:\/\/sites.google.com\/view\/hariworkshop\"><span style=\"font-weight: 400;\">HRAI Workshop<\/span><\/a><\/h4>\n<p>[\/et_pb_toggle][\/et_pb_column][\/et_pb_row][\/et_pb_section]<\/p>\n","protected":false},"excerpt":{"rendered":"<p>Sessions Conference Program Overview (click to enlarge) Download the detailed program here: Detailed Program 2025\u200b Proceedings: toc\u00a0Adjunct Events Day 1 &#8211; Monday, 13 October 2025 08:00-17:00 Registration 09:00 &#8211; 17:30 Doctoral Consortium 09:00 &#8211; 09:10 Opening and Welcome Session 1: LLMs for multimodal interactions 09:10 &#8211; 09:35 Cognitive Effort Analysis in Digital Learning Environments Shayla [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":0,"parent":0,"menu_order":0,"comment_status":"closed","ping_status":"closed","template":"","meta":{"_et_pb_use_builder":"on","_et_pb_old_content":"<!-- wp:paragraph -->\n<p>This is an example page. It's different from a blog post because it will stay in one place and will show up in your site navigation (in most themes). Most people start with an About page that introduces them to potential site visitors. It might say something like this:<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:quote -->\n<blockquote class=\"wp-block-quote\"><p>Hi there! I'm a bike messenger by day, aspiring actor by night, and this is my website. I live in Los Angeles, have a great dog named Jack, and I like pi\u00f1a coladas. (And gettin' caught in the rain.)<\/p><\/blockquote>\n<!-- \/wp:quote -->\n\n<!-- wp:paragraph -->\n<p>...or something like this:<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:quote -->\n<blockquote class=\"wp-block-quote\"><p>The XYZ Doohickey Company was founded in 1971, and has been providing quality doohickeys to the public ever since. Located in Gotham City, XYZ employs over 2,000 people and does all kinds of awesome things for the Gotham community.<\/p><\/blockquote>\n<!-- \/wp:quote -->\n\n<!-- wp:paragraph -->\n<p>As a new WordPress user, you should go to <a href=\"https:\/\/icmi.acm.org\/2025-\/wp-admin\/\">your dashboard<\/a> to delete this page and create new pages for your content. Have fun!<\/p>\n<!-- \/wp:paragraph -->","_et_gb_content_width":"","inline_featured_image":false,"footnotes":""},"class_list":["post-1990","page","type-page","status-publish","hentry"],"yoast_head":"<!-- This site is optimized with the Yoast SEO plugin v24.2 - https:\/\/yoast.com\/wordpress\/plugins\/seo\/ -->\n<title>Sessions - ICMI 2025 :: 27th ACM International Conference on Multimodal Interaction<\/title>\n<meta name=\"robots\" content=\"index, follow, max-snippet:-1, max-image-preview:large, max-video-preview:-1\" \/>\n<link rel=\"canonical\" href=\"https:\/\/icmi.acm.org\/2025\/sessions\/\" \/>\n<meta property=\"og:locale\" content=\"en_US\" \/>\n<meta property=\"og:type\" content=\"article\" \/>\n<meta property=\"og:title\" content=\"Sessions - ICMI 2025 :: 27th ACM International Conference on Multimodal Interaction\" \/>\n<meta property=\"og:description\" content=\"Sessions Conference Program Overview (click to enlarge) Download the detailed program here: Detailed Program 2025\u200b Proceedings: toc\u00a0Adjunct Events Day 1 - Monday, 13 October 2025 08:00-17:00 Registration 09:00 - 17:30 Doctoral Consortium 09:00 - 09:10 Opening and Welcome Session 1: LLMs for multimodal interactions 09:10 - 09:35 Cognitive Effort Analysis in Digital Learning Environments Shayla [&hellip;]\" \/>\n<meta property=\"og:url\" content=\"https:\/\/icmi.acm.org\/2025\/sessions\/\" \/>\n<meta property=\"og:site_name\" content=\"ICMI 2025 :: 27th ACM International Conference on Multimodal Interaction\" \/>\n<meta property=\"article:modified_time\" content=\"2025-10-13T00:36:13+00:00\" \/>\n<meta property=\"og:image\" content=\"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47.png\" \/>\n\t<meta property=\"og:image:width\" content=\"1555\" \/>\n\t<meta property=\"og:image:height\" content=\"790\" \/>\n\t<meta property=\"og:image:type\" content=\"image\/png\" \/>\n<meta name=\"twitter:card\" content=\"summary_large_image\" \/>\n<meta name=\"twitter:label1\" content=\"Est. reading time\" \/>\n\t<meta name=\"twitter:data1\" content=\"18 minutes\" \/>\n<script type=\"application\/ld+json\" class=\"yoast-schema-graph\">{\"@context\":\"https:\/\/schema.org\",\"@graph\":[{\"@type\":\"WebPage\",\"@id\":\"https:\/\/icmi.acm.org\/2025\/sessions\/\",\"url\":\"https:\/\/icmi.acm.org\/2025\/sessions\/\",\"name\":\"Sessions - ICMI 2025 :: 27th ACM International Conference on Multimodal Interaction\",\"isPartOf\":{\"@id\":\"https:\/\/icmi.acm.org\/2025\/#website\"},\"primaryImageOfPage\":{\"@id\":\"https:\/\/icmi.acm.org\/2025\/sessions\/#primaryimage\"},\"image\":{\"@id\":\"https:\/\/icmi.acm.org\/2025\/sessions\/#primaryimage\"},\"thumbnailUrl\":\"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47.png\",\"datePublished\":\"2024-09-30T16:20:35+00:00\",\"dateModified\":\"2025-10-13T00:36:13+00:00\",\"breadcrumb\":{\"@id\":\"https:\/\/icmi.acm.org\/2025\/sessions\/#breadcrumb\"},\"inLanguage\":\"en-US\",\"potentialAction\":[{\"@type\":\"ReadAction\",\"target\":[\"https:\/\/icmi.acm.org\/2025\/sessions\/\"]}]},{\"@type\":\"ImageObject\",\"inLanguage\":\"en-US\",\"@id\":\"https:\/\/icmi.acm.org\/2025\/sessions\/#primaryimage\",\"url\":\"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47.png\",\"contentUrl\":\"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47.png\"},{\"@type\":\"BreadcrumbList\",\"@id\":\"https:\/\/icmi.acm.org\/2025\/sessions\/#breadcrumb\",\"itemListElement\":[{\"@type\":\"ListItem\",\"position\":1,\"name\":\"Home\",\"item\":\"https:\/\/icmi.acm.org\/2025\/\"},{\"@type\":\"ListItem\",\"position\":2,\"name\":\"Sessions\"}]},{\"@type\":\"WebSite\",\"@id\":\"https:\/\/icmi.acm.org\/2025\/#website\",\"url\":\"https:\/\/icmi.acm.org\/2025\/\",\"name\":\"ICMI 2025 :: 27th ACM International Conference on Multimodal Interaction\",\"description\":\"Australia\",\"potentialAction\":[{\"@type\":\"SearchAction\",\"target\":{\"@type\":\"EntryPoint\",\"urlTemplate\":\"https:\/\/icmi.acm.org\/2025\/?s={search_term_string}\"},\"query-input\":{\"@type\":\"PropertyValueSpecification\",\"valueRequired\":true,\"valueName\":\"search_term_string\"}}],\"inLanguage\":\"en-US\"}]}<\/script>\n<!-- \/ Yoast SEO plugin. -->","yoast_head_json":{"title":"Sessions - ICMI 2025 :: 27th ACM International Conference on Multimodal Interaction","robots":{"index":"index","follow":"follow","max-snippet":"max-snippet:-1","max-image-preview":"max-image-preview:large","max-video-preview":"max-video-preview:-1"},"canonical":"https:\/\/icmi.acm.org\/2025\/sessions\/","og_locale":"en_US","og_type":"article","og_title":"Sessions - ICMI 2025 :: 27th ACM International Conference on Multimodal Interaction","og_description":"Sessions Conference Program Overview (click to enlarge) Download the detailed program here: Detailed Program 2025\u200b Proceedings: toc\u00a0Adjunct Events Day 1 - Monday, 13 October 2025 08:00-17:00 Registration 09:00 - 17:30 Doctoral Consortium 09:00 - 09:10 Opening and Welcome Session 1: LLMs for multimodal interactions 09:10 - 09:35 Cognitive Effort Analysis in Digital Learning Environments Shayla [&hellip;]","og_url":"https:\/\/icmi.acm.org\/2025\/sessions\/","og_site_name":"ICMI 2025 :: 27th ACM International Conference on Multimodal Interaction","article_modified_time":"2025-10-13T00:36:13+00:00","og_image":[{"width":1555,"height":790,"url":"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47.png","type":"image\/png"}],"twitter_card":"summary_large_image","twitter_misc":{"Est. reading time":"18 minutes"},"schema":{"@context":"https:\/\/schema.org","@graph":[{"@type":"WebPage","@id":"https:\/\/icmi.acm.org\/2025\/sessions\/","url":"https:\/\/icmi.acm.org\/2025\/sessions\/","name":"Sessions - ICMI 2025 :: 27th ACM International Conference on Multimodal Interaction","isPartOf":{"@id":"https:\/\/icmi.acm.org\/2025\/#website"},"primaryImageOfPage":{"@id":"https:\/\/icmi.acm.org\/2025\/sessions\/#primaryimage"},"image":{"@id":"https:\/\/icmi.acm.org\/2025\/sessions\/#primaryimage"},"thumbnailUrl":"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47.png","datePublished":"2024-09-30T16:20:35+00:00","dateModified":"2025-10-13T00:36:13+00:00","breadcrumb":{"@id":"https:\/\/icmi.acm.org\/2025\/sessions\/#breadcrumb"},"inLanguage":"en-US","potentialAction":[{"@type":"ReadAction","target":["https:\/\/icmi.acm.org\/2025\/sessions\/"]}]},{"@type":"ImageObject","inLanguage":"en-US","@id":"https:\/\/icmi.acm.org\/2025\/sessions\/#primaryimage","url":"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47.png","contentUrl":"https:\/\/icmi.acm.org\/2025\/wp-content\/uploads\/2025\/10\/Screenshot-from-2025-10-10-10-05-47.png"},{"@type":"BreadcrumbList","@id":"https:\/\/icmi.acm.org\/2025\/sessions\/#breadcrumb","itemListElement":[{"@type":"ListItem","position":1,"name":"Home","item":"https:\/\/icmi.acm.org\/2025\/"},{"@type":"ListItem","position":2,"name":"Sessions"}]},{"@type":"WebSite","@id":"https:\/\/icmi.acm.org\/2025\/#website","url":"https:\/\/icmi.acm.org\/2025\/","name":"ICMI 2025 :: 27th ACM International Conference on Multimodal Interaction","description":"Australia","potentialAction":[{"@type":"SearchAction","target":{"@type":"EntryPoint","urlTemplate":"https:\/\/icmi.acm.org\/2025\/?s={search_term_string}"},"query-input":{"@type":"PropertyValueSpecification","valueRequired":true,"valueName":"search_term_string"}}],"inLanguage":"en-US"}]}},"_links":{"self":[{"href":"https:\/\/icmi.acm.org\/2025\/wp-json\/wp\/v2\/pages\/1990","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/icmi.acm.org\/2025\/wp-json\/wp\/v2\/pages"}],"about":[{"href":"https:\/\/icmi.acm.org\/2025\/wp-json\/wp\/v2\/types\/page"}],"author":[{"embeddable":true,"href":"https:\/\/icmi.acm.org\/2025\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/icmi.acm.org\/2025\/wp-json\/wp\/v2\/comments?post=1990"}],"version-history":[{"count":91,"href":"https:\/\/icmi.acm.org\/2025\/wp-json\/wp\/v2\/pages\/1990\/revisions"}],"predecessor-version":[{"id":2577,"href":"https:\/\/icmi.acm.org\/2025\/wp-json\/wp\/v2\/pages\/1990\/revisions\/2577"}],"wp:attachment":[{"href":"https:\/\/icmi.acm.org\/2025\/wp-json\/wp\/v2\/media?parent=1990"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}