@InProceedings{JavierMarin2010, author="Javier Marin and David Vazquez and David Geronimo and Antonio Lopez", title="Learning Appearance in Virtual Scenarios for Pedestrian Detection", booktitle="23rd IEEE Conference on Computer Vision and Pattern Recognition", year="2010", pages="137--144", optkeywords="Pedestrian Detection", optkeywords="Domain Adaptation", abstract="Detecting pedestrians in images is a key functionality to avoid vehicle-to-pedestrian collisions. The most promising detectors rely on appearance-based pedestrian classifiers trained with labelled samples. This paper addresses the following question: can a pedestrian appearance model learnt in virtual scenarios work successfully for pedestrian detection in real images? (Fig. 1). Our experiments suggest a positive answer, which is a new and relevant conclusion for research in pedestrian detection. More specifically, we record training sequences in virtual scenarios and then appearance-based pedestrian classifiers are learnt using HOG and linear SVM. We test such classifiers in a publicly available dataset provided by Daimler AG for pedestrian detection benchmarking. This dataset contains real world images acquired from a moving car. The obtained result is compared with the one given by a classifier learnt using samples coming from real images. The comparison reveals that, although virtual samples were not specially selected, both virtual and real based training give rise to classifiers of similar performance.", optnote="ADAS", optnote="exported from refbase (http://158.109.8.37/show.php?record=1304), last updated on Mon, 15 May 2017 10:25:32 +0200", isbn="978-1-4244-6984-0", issn="1063-6919", doi="10.1109/CVPR.2010.5540218", file=":http://158.109.8.37/files/MVG2010.pdf:PDF", language="English" }