--- language: - en config_names: - query - corpus - qrels tags: - information-retrieval - multimodal-retrieval dataset_info: - config_name: qrels features: - name: query-id dtype: string - name: Q0 dtype: string - name: corpus-id dtype: string - name: score dtype: int8 splits: - name: test num_examples: 3609 - config_name: corpus features: - name: id dtype: string - name: modality dtype: string - name: text dtype: string - name: image dtype: image splits: - name: corpus num_examples: 138794 - config_name: query features: - name: id dtype: string - name: modality dtype: string - name: text dtype: string - name: image dtype: image splits: - name: test num_examples: 3609 configs: - config_name: qrels data_files: - split: test path: qrels.parquet - config_name: corpus data_files: - split: corpus path: corpus.parquet - config_name: query data_files: - split: test path: query.parquet license: cc0-1.0 --- ## ReMuQ Image-Text to Text Retrieval Converted from https://github.com/luomancs/ReMuQ ``` @inproceedings{luo-etal-2023-end, title = "End-to-end Knowledge Retrieval with Multi-modal Queries", author = "Luo, Man and Fang, Zhiyuan and Gokhale, Tejas and Yang, Yezhou and Baral, Chitta", editor = "Rogers, Anna and Boyd-Graber, Jordan and Okazaki, Naoaki", booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = jul, year = "2023", address = "Toronto, Canada", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.acl-long.478", doi = "10.18653/v1/2023.acl-long.478", pages = "8573--8589", } ```