@InProceedings{SergiGarciaBordils2023, author="Sergi Garcia Bordils and Dimosthenis Karatzas and Mar{\c{c}}al Rusi{\~n}ol", title="Accelerating Transformer-Based Scene Text Detection and Recognition via Token Pruning", booktitle="17th International Conference on Document Analysis and Recognition", year="2023", volume="14192", pages="106--121", optkeywords="Scene Text Detection", optkeywords="Scene Text Recognition", optkeywords="Transformer Acceleration", abstract="Scene text detection and recognition is a crucial task in computer vision with numerous real-world applications. Transformer-based approaches are behind all current state-of-the-art models and have achieved excellent performance. However, the computational requirements of the transformer architecture makes training these methods slow and resource heavy. In this paper, we introduce a new token pruning strategy that significantly decreases training and inference times without sacrificing performance, striking a balance between accuracy and speed. We have applied this pruning technique to our own end-to-end transformer-based scene text understanding architecture. Our method uses a separate detection branch to guide the pruning of uninformative image features, which significantly reduces the number of tokens at the input of the transformer. Experimental results show how our network is able to obtain competitive results on multiple public benchmarks while running at significantly higher speeds.", optnote="DAG", optnote="exported from refbase (http://158.109.8.37/show.php?record=3907), last updated on Tue, 30 Jan 2024 15:53:50 +0100", opturl="https://link.springer.com/chapter/10.1007/978-3-031-41731-3_7" }