Giguru Scheuer commited on
Commit
a50bb4c
·
1 Parent(s): b572f80

Added 'test_collection'

Browse files
Files changed (2) hide show
  1. test.py +4 -0
  2. trec-cast-2019-multi-turn.py +35 -6
test.py CHANGED
@@ -1,4 +1,8 @@
1
  from datasets import load_dataset
 
 
 
 
2
  qrels = load_dataset('trec-cast-2019-multi-turn.py', 'qrels')
3
  qrels.items()
4
 
 
1
  from datasets import load_dataset
2
+
3
+ collection = load_dataset('trec-cast-2019-multi-turn.py')
4
+ collection.items()
5
+
6
  qrels = load_dataset('trec-cast-2019-multi-turn.py', 'qrels')
7
  qrels.items()
8
 
trec-cast-2019-multi-turn.py CHANGED
@@ -20,11 +20,22 @@ import csv
20
 
21
  # Find for instance the citation on arxiv or on the dataset repo/website
22
  _CITATION = """\
 
 
 
 
 
 
 
 
23
  """
24
 
25
  # You can copy an official description
26
  _DESCRIPTION = """\
27
-
 
 
 
28
  """
29
 
30
  _HOMEPAGE = "http://www.treccast.ai"
@@ -37,12 +48,16 @@ _URL = "https://huggingface.co/datasets/uva-irlab/trec-cast-2019-multi-turn/reso
37
  _URLs = {
38
  'topics': _URL+"cast2019_test_annotated.tsv",
39
  'qrels': _URL+"2019qrels.txt",
 
 
 
 
40
  }
41
 
42
 
43
  class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder):
44
  """
45
- Voskarides et al. have preprocessed CANARD in different ways depending on their experiment.
46
  """
47
 
48
  VERSION = datasets.Version("1.0.0")
@@ -59,12 +74,19 @@ class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder):
59
  # data = datasets.load_dataset('my_dataset', 'first_domain')
60
  # data = datasets.load_dataset('my_dataset', 'second_domain')
61
  BUILDER_CONFIGS = [
62
- datasets.BuilderConfig(name="qrels", version=VERSION, description=""),
63
- datasets.BuilderConfig(name="topics", version=VERSION, description=""),
 
 
 
 
 
 
 
64
  ]
65
 
66
  # It's not mandatory to have a default configuration. Just use one if it make sense.
67
- DEFAULT_CONFIG_NAME = None
68
 
69
  def _info(self):
70
  # This is the name of the configuration selected in BUILDER_CONFIGS above
@@ -82,6 +104,10 @@ class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder):
82
  'rank': datasets.Value("string"),
83
  })),
84
  })
 
 
 
 
85
  return datasets.DatasetInfo(
86
  # This is the description that will appear on the datasets page.
87
  description=_DESCRIPTION,
@@ -138,7 +164,7 @@ class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder):
138
  for qid in qrels.keys():
139
  yield qid, {'qid': qid, 'qrels': qrels[qid]}
140
 
141
- if split == 'topics':
142
  topics_file = csv.reader(open(file), delimiter="\t")
143
  topics = defaultdict(list)
144
  for row in topics_file:
@@ -152,3 +178,6 @@ class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder):
152
  query = queries[idx]
153
  qid = f"{conversation_id}_{str(idx+1)}"
154
  yield qid, ({'query': query, 'history': queries[:idx], 'qid': qid})
 
 
 
 
20
 
21
  # Find for instance the citation on arxiv or on the dataset repo/website
22
  _CITATION = """\
23
+ @misc{dalton2020trec,
24
+ title={TREC CAsT 2019: The Conversational Assistance Track Overview},
25
+ author={Jeffrey Dalton and Chenyan Xiong and Jamie Callan},
26
+ year={2020},
27
+ eprint={2003.13624},
28
+ archivePrefix={arXiv},
29
+ primaryClass={cs.IR}
30
+ }
31
  """
32
 
33
  # You can copy an official description
34
  _DESCRIPTION = """\
35
+ The Conversational Assistance Track (CAsT) is a new track for TREC 2019 to facilitate Conversational Information
36
+ Seeking (CIS) research and to create a large-scale reusable test collection for conversational search systems.
37
+ The document corpus is 38,426,252 passages from the TREC Complex Answer Retrieval (CAR) and Microsoft MAchine
38
+ Reading COmprehension (MARCO) datasets.
39
  """
40
 
41
  _HOMEPAGE = "http://www.treccast.ai"
 
48
  _URLs = {
49
  'topics': _URL+"cast2019_test_annotated.tsv",
50
  'qrels': _URL+"2019qrels.txt",
51
+ 'test_collection': {
52
+ 'msmarco': 'https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz',
53
+ 'car': "http://trec-car.cs.unh.edu/datareleases/v2.0/paragraphCorpus.v2.0.tar.xz",
54
+ }
55
  }
56
 
57
 
58
  class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder):
59
  """
60
+
61
  """
62
 
63
  VERSION = datasets.Version("1.0.0")
 
74
  # data = datasets.load_dataset('my_dataset', 'first_domain')
75
  # data = datasets.load_dataset('my_dataset', 'second_domain')
76
  BUILDER_CONFIGS = [
77
+ datasets.BuilderConfig(name="qrels",
78
+ version=VERSION,
79
+ description=""),
80
+ datasets.BuilderConfig(name="topics",
81
+ version=VERSION,
82
+ description="The topics contain the queries, query IDs and their history."),
83
+ datasets.BuilderConfig(name="test_collection",
84
+ version=VERSION,
85
+ description="The test collection will provide the passages of TREC CAR and MSMARCO"),
86
  ]
87
 
88
  # It's not mandatory to have a default configuration. Just use one if it make sense.
89
+ DEFAULT_CONFIG_NAME = "test_collection"
90
 
91
  def _info(self):
92
  # This is the name of the configuration selected in BUILDER_CONFIGS above
 
104
  'rank': datasets.Value("string"),
105
  })),
106
  })
107
+ elif self.config.name == 'test_collection':
108
+ features = datasets.Features({
109
+ "docid": datasets.Value("string"),
110
+ })
111
  return datasets.DatasetInfo(
112
  # This is the description that will appear on the datasets page.
113
  description=_DESCRIPTION,
 
164
  for qid in qrels.keys():
165
  yield qid, {'qid': qid, 'qrels': qrels[qid]}
166
 
167
+ elif split == 'topics':
168
  topics_file = csv.reader(open(file), delimiter="\t")
169
  topics = defaultdict(list)
170
  for row in topics_file:
 
178
  query = queries[idx]
179
  qid = f"{conversation_id}_{str(idx+1)}"
180
  yield qid, ({'query': query, 'history': queries[:idx], 'qid': qid})
181
+
182
+ else:
183
+ raise NotImplementedError(f"'{split}' is not yet implemented")