Giguru Scheuer commited on
Commit
de8df7e
·
1 Parent(s): ad4fa41

Initial commit to test in other repository

Browse files
Files changed (2) hide show
  1. test.py +6 -0
  2. trec_cast_2019_multi_turn.py +154 -0
test.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ qrels = load_dataset('trec_cast_2019_multi_turn.py', 'qrels')
3
+ qrels.items()
4
+
5
+ topics = load_dataset('trec_cast_2019_multi_turn.py', 'topics')
6
+ topics.items()
trec_cast_2019_multi_turn.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import json
16
+ from collections import defaultdict
17
+
18
+ import datasets
19
+ import csv
20
+
21
+ # Find for instance the citation on arxiv or on the dataset repo/website
22
+ _CITATION = """\
23
+ """
24
+
25
+ # You can copy an official description
26
+ _DESCRIPTION = """\
27
+
28
+ """
29
+
30
+ _HOMEPAGE = "http://www.treccast.ai"
31
+
32
+ _LICENSE = ""
33
+
34
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
35
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
36
+ _URL = "https://huggingface.co/datasets/uva-irlab/trec-cast-2019-multi-turn/resolve/main/"
37
+ _URLs = {
38
+ 'topics': _URL+"cast2019_test_annotated.tsv",
39
+ 'qrels': _URL+"2019qrels.txt",
40
+ }
41
+
42
+
43
+ class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder):
44
+ """
45
+ Voskarides et al. have preprocessed CANARD in different ways depending on their experiment.
46
+ """
47
+
48
+ VERSION = datasets.Version("1.0.0")
49
+
50
+ # This is an example of a dataset with multiple configurations.
51
+ # If you don't want/need to define several sub-sets in your dataset,
52
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
53
+
54
+ # If you need to make complex sub-parts in the datasets with configurable options
55
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
56
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
57
+
58
+ # You will be able to load one or the other configurations in the following list with
59
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
60
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
61
+ BUILDER_CONFIGS = [
62
+ datasets.BuilderConfig(name="qrels", version=VERSION, description=""),
63
+ datasets.BuilderConfig(name="topics", version=VERSION, description=""),
64
+ ]
65
+
66
+ # It's not mandatory to have a default configuration. Just use one if it make sense.
67
+ DEFAULT_CONFIG_NAME = None
68
+
69
+ def _info(self):
70
+ # This is the name of the configuration selected in BUILDER_CONFIGS above
71
+ if self.config.name == "topics":
72
+ features = datasets.Features({
73
+ "qid": datasets.Value("string"),
74
+ "history": datasets.features.Sequence(feature=datasets.Value('string')),
75
+ "query": datasets.Value("string"),
76
+ })
77
+ elif self.config.name == "qrels":
78
+ features = datasets.Features({
79
+ "qid": datasets.Value("string"),
80
+ "qrels": datasets.features.Sequence(feature=datasets.Features({
81
+ 'docno': datasets.Value("string"),
82
+ 'rank': datasets.Value("string"),
83
+ })),
84
+ })
85
+ return datasets.DatasetInfo(
86
+ # This is the description that will appear on the datasets page.
87
+ description=_DESCRIPTION,
88
+ # This defines the different columns of the dataset and their types
89
+ features=features, # Here we define them above because they are different between the two configurations
90
+ # If there's a common (input, target) tuple from the features,
91
+ # specify them here. They'll be used if as_supervised=True in
92
+ # builder.as_dataset.
93
+ supervised_keys=None,
94
+ # Homepage of the dataset for documentation
95
+ homepage=_HOMEPAGE,
96
+ # License for the dataset if available
97
+ license=_LICENSE,
98
+ # Citation for the dataset
99
+ citation=_CITATION,
100
+ )
101
+
102
+ def _split_generators(self, dl_manager):
103
+ """Returns SplitGenerators."""
104
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
105
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
106
+
107
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
108
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
109
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
110
+ my_urls = _URLs[self.config.name]
111
+ downloaded_files = dl_manager.download_and_extract(my_urls)
112
+ return [
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TEST,
115
+ gen_kwargs={ # These kwargs will be passed to _generate_examples
116
+ "file": downloaded_files,
117
+ "split": self.config.name
118
+ },
119
+ ),
120
+ ]
121
+
122
+ def _generate_examples(
123
+ self, file, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
124
+ ):
125
+ """ Yields examples as (key, example) tuples. """
126
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
127
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
128
+
129
+ if split == 'qrels':
130
+ qrels_file = csv.reader(open(file), delimiter=" ")
131
+ qrels = defaultdict(list)
132
+ for row in qrels_file:
133
+ qid = row[0]
134
+ docno = row[2]
135
+ rank = row[3]
136
+ qrels[qid].append({'rank': rank, 'docno': docno})
137
+
138
+ for qid in qrels.keys():
139
+ yield qid, {'qid': qid, 'qrels': qrels[qid]}
140
+
141
+ if split == 'topics':
142
+ topics_file = csv.reader(open(file), delimiter="\t")
143
+ topics = defaultdict(list)
144
+ for row in topics_file:
145
+ qid, query = row
146
+ conversation_id, question_number = qid.split('_')
147
+ topics[conversation_id].append(query)
148
+
149
+ for conversation_id in topics.keys():
150
+ queries = topics[conversation_id] # type: list
151
+ for idx in range(len(queries)):
152
+ query = queries[idx]
153
+ qid = f"{conversation_id}_{str(idx+1)}"
154
+ yield qid, ({'query': query, 'history': queries[:idx], 'qid': qid})