@Article{DavidVazquez2014, author="David Vazquez and Javier Marin and Antonio Lopez and Daniel Ponsa and David Geronimo", title="Virtual and Real World Adaptation for Pedestrian Detection", journal="IEEE Transactions on Pattern Analysis and Machine Intelligence", year="2014", volume="36", number="4", pages="797--809", optkeywords="Domain Adaptation", optkeywords="Pedestrian Detection", abstract="Pedestrian detection is of paramount interest for many applications. Most promising detectors rely on discriminatively learnt classifiers, i.e., trained with annotated samples. However, the annotation step is a human intensive and subjective task worth to be minimized. By using virtual worlds we can automatically obtain precise and rich annotations. Thus, we face the question: can a pedestrian appearance model learnt in realistic virtual worlds work successfully for pedestrian detection in realworld images?. Conducted experiments show that virtual-world based training can provide excellent testing accuracy in real world, but it can also suffer the dataset shift problem as real-world based training does. Accordingly, we have designed a domain adaptation framework, V-AYLA, in which we have tested different techniques to collect a few pedestrian samples from the target domain (real world) and combine them with the many examples of the source domain (virtual world) in order to train a domain adapted pedestrian classifier that will operate in the target domain. V-AYLA reports the same detection accuracy than when training with many human-provided pedestrian annotations and testing with real-world images of the same domain. To the best of our knowledge, this is the first work demonstrating adaptation of virtual and real worlds for developing an object detector.", optnote="ADAS; 600.057; 600.054; 600.076", optnote="exported from refbase (http://158.109.8.37/show.php?record=2275), last updated on Tue, 24 Feb 2015 10:27:08 +0100", issn="0162-8828", doi="10.1109/TPAMI.2013.163", file=":http://158.109.8.37/files/vml2013.pdf:PDF" }