Pavankalyan commited on
Commit
ddacfed
·
verified ·
1 Parent(s): ff38014

Delete WikiDO.py

Browse files
Files changed (1) hide show
  1. WikiDO.py +0 -173
WikiDO.py DELETED
@@ -1,173 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- import csv
17
- import json
18
- import os
19
-
20
- import datasets
21
-
22
-
23
- # TODO: Add BibTeX citation
24
- # Find for instance the citation on arxiv or on the dataset repo/website
25
- _CITATION = """\
26
- @InProceedings{huggingface:dataset,
27
- title = {A great new dataset},
28
- author={huggingface, Inc.
29
- },
30
- year={2020}
31
- }
32
- """
33
-
34
- # You can copy an official description
35
- _DESCRIPTION = """\
36
- To address this gap, we introduce WikiDO (drawn from Wikipedia Ddiversity Observatory), a new cross-modal retrieval benchmark to assess the OOD generalization capabilities of pretrained VLMs.
37
- This consists of 380K image-text pairs from Wikipedia with domain labels, along with carefully curated, human-verified in-distribution (ID) and OOD test sets of size 3K each.
38
- The image-text pairs are very diverse in topics.
39
- """
40
-
41
- # Add a link to an official homepage for the dataset here
42
- _HOMEPAGE = "https://neurips-wikido.github.io/WIKIDO/"
43
-
44
- # Add the licence for the dataset here if you can find it
45
- _LICENSE = "CC-by-SA-4.0"
46
-
47
- # TODO: Add link to the official dataset URLs here
48
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
- _URLS = {
51
- "images":"./images.zip",
52
- }
53
-
54
-
55
- # Name of the dataset usually matches the script name with CamelCase instead of snake_case
56
- class WikiDO(datasets.GeneratorBasedBuilder):
57
- """ Short description of my dataset."""
58
-
59
- VERSION = datasets.Version("1.1.0")
60
-
61
- # This is an example of a dataset with multiple configurations.
62
- # If you don't want/need to define several sub-sets in your dataset,
63
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
64
-
65
- # If you need to make complex sub-parts in the datasets with configurable options
66
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
67
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
68
-
69
- # You will be able to load one or the other configurations in the following list with
70
- # data = datasets.load_dataset('my_dataset', 'first_domain')
71
- # data = datasets.load_dataset('my_dataset', 'second_domain')
72
-
73
- # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
74
-
75
- def _info(self):
76
- # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
77
- features = datasets.Features(
78
- {
79
- "image": datasets.Value("string"),
80
- "orig_cap": datasets.Value("string"),
81
- "caption": datasets.Value("string"),
82
- "topic": datasets.Value("string"),
83
- "page_id": datasets.Value("string"),
84
- "page_title": datasets.Value("string")
85
- }
86
- )
87
-
88
- return datasets.DatasetInfo(
89
- # This is the description that will appear on the datasets page.
90
- description=_DESCRIPTION,
91
-
92
- # This defines the different columns of the dataset and their types
93
- features=features, # Here we define them above because they are different between the two configurations
94
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
95
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
96
- # supervised_keys=("sentence", "label"),
97
- # Homepage of the dataset for documentation
98
- homepage=_HOMEPAGE,
99
- # License for the dataset if available
100
- license=_LICENSE,
101
- # Citation for the dataset
102
- citation=_CITATION,
103
- )
104
-
105
- def _split_generators(self, dl_manager):
106
- # Define the local paths to the dataset files
107
- data_dir = os.path.dirname(__file__)
108
-
109
- return [
110
- datasets.SplitGenerator(
111
- name=datasets.Split.TRAIN,
112
- gen_kwargs={
113
- "filepath": os.path.join(data_dir, "train_100k.jsonl"),
114
- "split": "train_100k",
115
- },
116
- ),
117
- datasets.SplitGenerator(
118
- name=datasets.Split.TRAIN,
119
- gen_kwargs={
120
- "filepath": os.path.join(data_dir, "train_200k.jsonl"),
121
- "split": "train_200k",
122
- },
123
- ),
124
- # datasets.SplitGenerator(
125
- # name=datasets.Split.TRAIN,
126
- # gen_kwargs={
127
- # "filepath": os.path.join(data_dir, "train_full.jsonl"),
128
- # "split": "train_full",
129
- # },
130
- # ),
131
- # datasets.SplitGenerator(
132
- # name=datasets.Split.VALIDATION,
133
- # gen_kwargs={
134
- # "filepath": os.path.join(data_dir, "val_full.jsonl"),
135
- # "split": "val_full",
136
- # },
137
- # ),
138
- # datasets.SplitGenerator(
139
- # name=datasets.Split.VALIDATION,
140
- # gen_kwargs={
141
- # "filepath": os.path.join(data_dir, "val_verified.jsonl"),
142
- # "split": "val_verified(1k)",
143
- # },
144
- # ),
145
- # datasets.SplitGenerator(
146
- # name=datasets.Split.TEST,
147
- # gen_kwargs={
148
- # "filepath": os.path.join(data_dir, "id_test_verified.jsonl"),
149
- # "split": "id_test_verified(3k)",
150
- # },
151
- # ),
152
- # datasets.SplitGenerator(
153
- # name=datasets.Split.TEST,
154
- # gen_kwargs={
155
- # "filepath": os.path.join(data_dir, "ood_test_full.jsonl"),
156
- # "split": "ood_test_full",
157
- # },
158
- # ),
159
- # datasets.SplitGenerator(
160
- # name=datasets.Split.TEST,
161
- # gen_kwargs={
162
- # "filepath": os.path.join(data_dir, "ood_test_verified.jsonl"),
163
- # "split": "ood_test_verified(3k)",
164
- # },
165
- # ),
166
- ]
167
-
168
- def _generate_examples(self, filepath, split):
169
- # Open the file and read the data
170
- with open(filepath, encoding="utf-8") as f:
171
- for id_, line in enumerate(f):
172
- data = json.loads(line)
173
- yield id_, data