@inproceedings{caglayan-etal-2021-cross-lingual, title = {{Cross-lingual Visual Pre-training for Multimodal Machine Translation}}, author = "Caglayan, Ozan and Kuyu, Menekse and Amac, Mustafa Sercan and Madhyastha, Pranava and Erdem, Erkut and Erdem, Aykut and Specia, Lucia", booktitle = "Proceedings of the 16th Conference of the {E}uropean Chapter of the Association for Computational Linguistics: Short Papers", month = apr, year = "2021", address = "online", publisher = "Association for Computational Linguistics", abstract = "Pre-trained language models have been shown to substantially improve performance in many natural language tasks. Although the early focus of such models was single language pre-training, recent advances have resulted in cross-lingual and visual pre-training methods. In this paper, we combine these two approaches to learn visually-grounded cross-lingual representations. Specifically, we extend the translation language modelling (Lample and Conneau, 2019) with masked region classification, and perform pre-training with three-way parallel vision & language corpora. We show that when fine-tuned for multimodal machine translation, these models obtain state- of-the-art performance. We also provide qualitative insights into the usefulness of the learned grounded representations.", }