@InProceedings{AitorAlvarez-Gila2022, author="Aitor Alvarez-Gila and Joost Van de Weijer and Yaxing Wang and Estibaliz Garrote", title="MVMO: A Multi-Object Dataset for Wide Baseline Multi-View Semantic Segmentation", booktitle="29th IEEE International Conference on Image Processing", year="2022", optkeywords="multi-view", optkeywords="cross-view", optkeywords="semantic segmentation", optkeywords="synthetic dataset", abstract="We present MVMO (Multi-View, Multi-Object dataset): a synthetic dataset of 116,000 scenes containing randomly placed objects of 10 distinct classes and captured from 25 camera locations in the upper hemisphere. MVMO comprises photorealistic, path-traced image renders, together with semantic segmentation ground truth for every view. Unlike existing multi-view datasets, MVMO features wide baselines between cameras and high density of objects, which lead to large disparities, heavy occlusions and view-dependent object appearance. Single view semantic segmentation is hindered by self and inter-object occlusions that could benefit from additional viewpoints. Therefore, we expect that MVMO will propel research in multi-view semantic segmentation and cross-view semantic transfer. We also provide baselines that show that new research is needed in such fields to exploit the complementary information of multi-view setups 1 .", optnote="LAMP", optnote="exported from refbase (http://158.109.8.37/show.php?record=3781), last updated on Tue, 25 Apr 2023 15:53:03 +0200", doi="10.1109/ICIP46576.2022.9897955", opturl="https://ieeexplore.ieee.org/document/9897955", file=":http://158.109.8.37/files/AWW2022.pdf:PDF" }