@InProceedings{SiyangSong2023, author="Siyang Song and Micol Spitale and Cheng Luo and German Barquero and Cristina Palmero and Sergio Escalera and Michel Valstar and Tobias Baur and Fabien Ringeval and Elisabeth Andre and Hatice Gunes", title="REACT2023: The First Multiple Appropriate Facial Reaction Generation Challenge", booktitle="Proceedings of the 31st ACM International Conference on Multimedia", year="2023", pages="9620--9624", abstract="The Multiple Appropriate Facial Reaction Generation Challenge (REACT2023) is the first competition event focused on evaluating multimedia processing and machine learning techniques for generating human-appropriate facial reactions in various dyadic interaction scenarios, with all participants competing strictly under the same conditions. The goal of the challenge is to provide the first benchmark test set for multi-modal information processing and to foster collaboration among the audio, visual, and audio-visual behaviour analysis and behaviour generation (a.k.a generative AI) communities, to compare the relative merits of the approaches to automatic appropriate facial reaction generation under different spontaneous dyadic interaction conditions. This paper presents: (i) the novelties, contributions and guidelines of the REACT2023 challenge; (ii) the dataset utilized in the challenge; and (iii) the performance of the baseline systems on the two proposed sub-challenges: Offline Multiple Appropriate Facial Reaction Generation and Online Multiple Appropriate Facial Reaction Generation, respectively. The challenge baseline code is publicly available at https://github.com/reactmultimodalchallenge/baseline\_react2023.", optnote="HUPBA", optnote="exported from refbase (http://158.109.8.37/show.php?record=3931), last updated on Thu, 25 Jan 2024 15:25:24 +0100", opturl="https://dl.acm.org/doi/10.1145/3581783.3612832" }