Datasets:

Modalities:
Tabular
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
Dask
File size: 13,043 Bytes
5a28b58
 
 
 
cef953d
5a28b58
0a735a1
 
 
 
637a1b2
5a28b58
 
637a1b2
 
 
 
 
5a28b58
 
 
 
 
 
 
 
637a1b2
5a28b58
637a1b2
 
 
 
cef953d
637a1b2
 
4eb3e64
 
637a1b2
 
0a735a1
 
637a1b2
 
92f6e66
cef953d
637a1b2
 
5a28b58
 
637a1b2
 
5a28b58
 
637a1b2
 
bdf5f78
 
637a1b2
 
5a28b58
cef953d
637a1b2
 
4eb3e64
 
637a1b2
 
0a735a1
 
637a1b2
 
5a28b58
 
637a1b2
 
5a28b58
637a1b2
 
 
 
5a28b58
637a1b2
 
bdf5f78
637a1b2
 
 
 
bdf5f78
637a1b2
5a28b58
 
cef953d
5a28b58
637a1b2
 
 
 
4eb3e64
 
 
 
637a1b2
0a735a1
 
637a1b2
 
 
 
 
 
 
 
 
5a28b58
 
 
 
 
 
 
 
637a1b2
5a28b58
 
637a1b2
 
bdf5f78
 
637a1b2
 
5a28b58
cef953d
637a1b2
 
4eb3e64
 
637a1b2
 
0a735a1
637a1b2
 
0a735a1
637a1b2
 
5a28b58
 
637a1b2
5a28b58
637a1b2
 
bdf5f78
 
637a1b2
 
 
f632524
637a1b2
 
 
f632524
 
 
 
454b0ca
f632524
637a1b2
 
cc9b5c0
 
 
 
5a28b58
 
66b5d3b
5a28b58
66b5d3b
 
67ae3da
 
 
 
 
 
 
 
 
66b5d3b
67ae3da
66b5d3b
 
 
4ae7884
 
 
 
 
66b5d3b
 
 
 
 
c87f461
 
66b5d3b
 
 
 
 
 
 
05e5f66
66b5d3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
05e5f66
66b5d3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
05e5f66
66b5d3b
 
 
 
 
 
 
 
 
 
05e5f66
66b5d3b
 
 
 
 
 
 
 
 
 
 
 
 
 
df23a7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
05e5f66
 
 
df23a7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66b5d3b
d5ae9e6
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
---
configs:
- config_name: ConditionalQA-corpus
  data_files:
  - split: test
    path: ConditionalQA/corpus/*
- config_name: ConditionalQA-corpus_coref
  data_files:
  - split: test
    path: ConditionalQA/corpus_coref/*
- config_name: ConditionalQA-docs
  data_files:
  - split: test
    path: ConditionalQA/docs/*
- config_name: ConditionalQA-keyphrases
  data_files:
  - split: test
    path: ConditionalQA/keyphrases/*
- config_name: ConditionalQA-qrels
  data_files:
  - split: train
    path: ConditionalQA/qrels/train.parquet
  - split: dev
    path: ConditionalQA/qrels/dev.parquet
  - split: test
    path: ConditionalQA/qrels/test.parquet
- config_name: ConditionalQA-queries
  data_files:
  - split: train
    path: ConditionalQA/queries/train.parquet
  - split: dev
    path: ConditionalQA/queries/dev.parquet
  - split: test
    path: ConditionalQA/queries/test.parquet
- config_name: Genomics-corpus
  data_files:
  - split: test
    path: Genomics/corpus/*
- config_name: Genomics-corpus_coref
  data_files:
  - split: test
    path: Genomics/corpus_coref/*
- config_name: Genomics-docs
  data_files:
  - split: test
    path: Genomics/docs/*
- config_name: Genomics-keyphrases
  data_files:
  - split: test
    path: Genomics/keyphrases/*
- config_name: Genomics-qrels
  data_files:
  - split: test
    path: Genomics/qrels/test.parquet
- config_name: Genomics-queries
  data_files:
  - split: test
    path: Genomics/queries/test.parquet
- config_name: MIRACL-corpus
  data_files:
  - split: test
    path: MIRACL/corpus/*
- config_name: MIRACL-corpus_coref
  data_files:
  - split: test
    path: MIRACL/corpus_coref/*
- config_name: MIRACL-docs
  data_files:
  - split: test
    path: MIRACL/docs/*
- config_name: MIRACL-keyphrases
  data_files:
  - split: test
    path: MIRACL/keyphrases/*
- config_name: MIRACL-qrels
  data_files:
  - split: train
    path: MIRACL/qrels/train.parquet
  - split: dev
    path: MIRACL/qrels/dev.parquet
  - split: test
    path: MIRACL/qrels/test.parquet
- config_name: MIRACL-queries
  data_files:
  - split: train
    path: MIRACL/queries/train.parquet
  - split: dev
    path: MIRACL/queries/dev.parquet
  - split: test
    path: MIRACL/queries/test.parquet
- config_name: MSMARCO-corpus
  data_files:
  - split: test
    path: MSMARCO/corpus/*
- config_name: MSMARCO-corpus_coref
  data_files:
  - split: test
    path: MSMARCO/corpus_coref/*
- config_name: MSMARCO-docs
  data_files:
  - split: test
    path: MSMARCO/docs/*
- config_name: MSMARCO-keyphrases
  data_files:
  - split: test
    path: MSMARCO/keyphrases/*
- config_name: MSMARCO-qrels
  data_files:
  - split: train
    path: MSMARCO/qrels/train.parquet
  - split: dev
    path: MSMARCO/qrels/dev.parquet
  - split: test
    path: MSMARCO/qrels/test.parquet
- config_name: MSMARCO-queries
  data_files:
  - split: train
    path: MSMARCO/queries/train.parquet
  - split: dev
    path: MSMARCO/queries/dev.parquet
  - split: test
    path: MSMARCO/queries/test.parquet
- config_name: NaturalQuestions-corpus
  data_files:
  - split: test
    path: NaturalQuestions/corpus/*
- config_name: NaturalQuestions-corpus_coref
  data_files:
  - split: test
    path: NaturalQuestions/corpus_coref/*
- config_name: NaturalQuestions-docs
  data_files:
  - split: test
    path: NaturalQuestions/docs/*
- config_name: NaturalQuestions-keyphrases
  data_files:
  - split: test
    path: NaturalQuestions/keyphrases/*
- config_name: NaturalQuestions-qrels
  data_files:
  - split: dev
    path: NaturalQuestions/qrels/dev.parquet
  - split: test
    path: NaturalQuestions/qrels/test.parquet
- config_name: NaturalQuestions-queries
  data_files:
  - split: dev
    path: NaturalQuestions/queries/dev.parquet
  - split: test
    path: NaturalQuestions/queries/test.parquet
- config_name: nq-hard
  data_files:
  - split: test
    path: NaturalQuestions/nq-hard/*
dataset_info:
  features:
  - name: doc_id
    dtype: string
  - name: title
    dtype: string
  - name: passage_ids
    sequence: string
  - name: passages
    sequence: string
  - name: is_candidate
    sequence: bool
  splits:
  - name: test
    num_bytes: 13421074669
    num_examples: 5758285
  download_size: 7956252663
  dataset_size: 13421074669
---

# DAPR: Document-Aware Passage Retrieval

This datasets repo contains the queries, passages/documents and judgements for the data used in the [DAPR](https://arxiv.org/abs/2305.13915) paper.

DAPR is a benchmark for document-aware passage retrieval: given a (large) collection of documents, relevant passages within these documents for a given query are required to be returned. 

A key focus of DAPR is forcing/encouraging retrieval systems to utilize the document-level context which surrounds the relevant passages. An example is shown below:

<img src='https://raw.githubusercontent.com/UKPLab/acl2024-dapr/main/imgs/motivative-example.png' width='300'>

> In this example, the query asks for a musician or a group who has ever played at a certain venue. However, the gold relevant passage mentions only the reference noun, "the venue" but its actual name, "the Half Moon, Putney". The model thus needs to explore the context from the belonging document of the passage, which in this case means coreference resolution.


## Overview

For the DAPR benchmark, it contains 5 datasets:
| Dataset | #Queries (test) | #Documents | #Passages
| --- |  --- | --- | --- |
| [MS MARCO](https://microsoft.github.io/msmarco/) | 2,722 | 1,359,163 | 2,383,023* |
| [Natural Questions](https://ai.google.com/research/NaturalQuestions) | 3,610 | 108,626 | 2,682,017|
| [MIRACL](https://project-miracl.github.io/) | 799 | 5,758,285 |32,893,221|
| [Genomics](https://dmice.ohsu.edu/trec-gen/) | 62 | 162,259 |12,641,127|
| [ConditionalQA](https://haitian-sun.github.io/conditionalqa/) | 271 | 652 |69,199|

And additionally, NQ-hard, the hard subset of queries from Natural Questions is also included (516 in total). These queries are hard because understanding the document context (e.g. coreference, main topic, multi-hop reasoning, and acronym) is necessary for retrieving the relevant passages.

> Notes: for MS MARCO, its documents do not provide the gold paragraph segmentation and we only segment the document by keeping the judged passages (from the MS MARCO Passage Ranking task) standing out while leaving the rest parts surrounding these passages. These passages are marked by `is_candidate==true`.

> For Natural Questions, the training split is not provided because the duplidate timestamps cannot be compatible with the queries/qrels/corpus format. Please refer to https://public.ukp.informatik.tu-darmstadt.de/kwang/dapr/data/NaturalQuestions/ for the training split.

## Load the dataset
### Loading the passages
One can load the passages like this:
```python
from datasets import load_dataset

dataset_name = "ConditionalQA"
passages = load_dataset("UKPLab/dapr", f"{dataset_name}-corpus", split="test")
for passage in passages:
    passage["_id"]  # passage id
    passage["text"]  # passage text
    passage["title"]  # doc title
    passage["doc_id"]
    passage["paragraph_no"]  # the paragraph number within the document
    passage["total_paragraphs"]  # how many paragraphs/passages in total in the document
    passage["is_candidate"]  # is this passage a candidate for retrieval
```

Or strem the dataset without downloading it beforehand:
```python
from datasets import load_dataset

dataset_name = "ConditionalQA"
passages = load_dataset(
    "UKPLab/dapr", f"{dataset_name}-corpus", split="test", streaming=True
)
for passage in passages:
    passage["_id"]  # passage id
    passage["text"]  # passage text
    passage["title"]  # doc title
    passage["doc_id"]
    passage["paragraph_no"]  # the paragraph number within the document
    passage["total_paragraphs"]  # how many paragraphs/passages in total in the document
    passage["is_candidate"]  # is this passage a candidate for retrieval
```

### Loading the qrels
The qrels split contains the query relevance annotation, i.e., it contains the relevance score for (query, passage) pairs.
```python
from datasets import load_dataset

dataset_name = "ConditionalQA"
qrels = load_dataset("UKPLab/dapr", f"{dataset_name}-qrels", split="test")
for qrel in qrels:
    qrel["query_id"]  # query id (the text is available in ConditionalQA-queries)
    qrel["corpus_id"]  # passage id
    qrel["score"]  # gold judgement

```
We present the NQ-hard dataset in an extended format of the normal qrels with additional columns:
```python
from datasets import load_dataset

qrels = load_dataset("UKPLab/dapr", "nq-hard", split="test")
for qrel in qrels:
    qrel["query_id"]  # query id (the text is available in ConditionalQA-queries)
    qrel["corpus_id"]  # passage id
    qrel["score"]  # gold judgement

    # Additional columns:
    qrel["query"]  # query text
    qrel["text"]  # passage text
    qrel["title"]  # doc title
    qrel["doc_id"]
    qrel["categories"]  # list of categories about this query-passage pair
    qrel["url"]  # url to the document in Wikipedia
```

## Retrieval and Evaluation
The following shows an example, how the dataset can be used to build a semantic search application.
> This example is based on [clddp](https://github.com/kwang2049/clddp/tree/main) (`pip install -U cldpp`). One can further explore this [example](https://github.com/kwang2049/clddp/blob/main/examples/search_fiqa.sh) for convenient multi-GPU exact search.

```python
# Please install cldpp with `pip install -U cldpp`
from clddp.retriever import Retriever, RetrieverConfig, Pooling, SimilarityFunction
from clddp.dm import Separator
from typing import Dict
from clddp.dm import Query, Passage
import torch
import pytrec_eval
import numpy as np
from datasets import load_dataset


# Define the retriever (DRAGON+ from https://arxiv.org/abs/2302.07452)
class DRAGONPlus(Retriever):
    def __init__(self) -> None:
        config = RetrieverConfig(
            query_model_name_or_path="facebook/dragon-plus-query-encoder",
            passage_model_name_or_path="facebook/dragon-plus-context-encoder",
            shared_encoder=False,
            sep=Separator.blank,
            pooling=Pooling.cls,
            similarity_function=SimilarityFunction.dot_product,
            query_max_length=512,
            passage_max_length=512,
        )
        super().__init__(config)


# Load data:
passages = load_dataset("UKPLab/dapr", "ConditionalQA-corpus", split="test")
queries = load_dataset("UKPLab/dapr", "ConditionalQA-queries", split="test")
qrels_rows = load_dataset("UKPLab/dapr", "ConditionalQA-qrels", split="test")
qrels: Dict[str, Dict[str, float]] = {}
for qrel_row in qrels_rows:
    qid = qrel_row["query_id"]
    pid = qrel_row["corpus_id"]
    rel = qrel_row["score"]
    qrels.setdefault(qid, {})
    qrels[qid][pid] = rel

# Encode queries and passages: (refer to https://github.com/kwang2049/clddp/blob/main/examples/search_fiqa.sh for multi-GPU exact search)
retriever = DRAGONPlus()
retriever.eval()
queries = [Query(query_id=query["_id"], text=query["text"]) for query in queries]
passages = [
    Passage(passage_id=passage["_id"], text=passage["text"]) for passage in passages
]
query_embeddings = retriever.encode_queries(queries)
with torch.no_grad():  # Takes around a minute on a V100 GPU
    passage_embeddings, passage_mask = retriever.encode_passages(passages)

# Calculate the similarities and keep top-K:
similarity_scores = torch.matmul(
    query_embeddings, passage_embeddings.t()
)  # (query_num, passage_num)
topk = torch.topk(similarity_scores, k=10)
topk_values: torch.Tensor = topk[0]
topk_indices: torch.LongTensor = topk[1]
topk_value_lists = topk_values.tolist()
topk_index_lists = topk_indices.tolist()

# Run evaluation with pytrec_eval:
retrieval_scores: Dict[str, Dict[str, float]] = {}
for query_i, (values, indices) in enumerate(zip(topk_value_lists, topk_index_lists)):
    query_id = queries[query_i].query_id
    retrieval_scores.setdefault(query_id, {})
    for value, passage_i in zip(values, indices):
        passage_id = passages[passage_i].passage_id
        retrieval_scores[query_id][passage_id] = value
evaluator = pytrec_eval.RelevanceEvaluator(
    query_relevance=qrels, measures=["ndcg_cut_10"]
)
query_performances: Dict[str, Dict[str, float]] = evaluator.evaluate(retrieval_scores)
ndcg = np.mean([score["ndcg_cut_10"] for score in query_performances.values()])
print(ndcg)  # 0.21796083196880855
```

## Note
This dataset was created with `datasets==2.15.0`. Make sure to use this or a newer version of the datasets library.

## Citation
If you use the code/data, feel free to cite our publication [DAPR: A Benchmark on Document-Aware Passage Retrieval](https://arxiv.org/abs/2305.13915):
```bibtex 
@article{wang2023dapr,
    title = "DAPR: A Benchmark on Document-Aware Passage Retrieval",
    author = "Kexin Wang and Nils Reimers and Iryna Gurevych", 
    journal= "arXiv preprint arXiv:2305.13915",
    year = "2023",
    url = "https://arxiv.org/abs/2305.13915",
}
```