eduvedras commited on
Commit
c4e15f8
·
verified ·
1 Parent(s): 9387da1

New balanced dataset

Browse files
Files changed (5) hide show
  1. VQG.py +114 -0
  2. images.tar.gz +3 -0
  3. metadata_test.csv +0 -0
  4. metadata_train.csv +0 -0
  5. metadata_validation.csv +0 -0
VQG.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """SQUAD: The Stanford Question Answering Dataset."""
18
+
19
+
20
+ import json
21
+
22
+ import datasets
23
+ from datasets.tasks import QuestionAnsweringExtractive
24
+ import pandas as pd
25
+
26
+
27
+ logger = datasets.logging.get_logger(__name__)
28
+
29
+
30
+ _CITATION = """\
31
+ @article{2016arXiv160605250R,
32
+ author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
33
+ Konstantin and {Liang}, Percy},
34
+ title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
35
+ journal = {arXiv e-prints},
36
+ year = 2016,
37
+ eid = {arXiv:1606.05250},
38
+ pages = {arXiv:1606.05250},
39
+ archivePrefix = {arXiv},
40
+ eprint = {1606.05250},
41
+ }
42
+ """
43
+
44
+ _DESCRIPTION = """\
45
+ Visual questions for data science
46
+ """
47
+
48
+ _URL = "https://huggingface.co/datasets/eduvedras/VQG/resolve/main/images.tar.gz"
49
+
50
+ _METADATA_URLS = {
51
+ "train": "https://huggingface.co/datasets/eduvedras/VQG/resolve/main/metadata_train.csv",
52
+ "validation": "https://huggingface.co/datasets/eduvedras/VQG/resolve/main/metadata_validation.csv",
53
+ "test": "https://huggingface.co/datasets/eduvedras/VQG/resolve/main/metadata_test.csv"
54
+ },
55
+
56
+ class VQGTargz(datasets.GeneratorBasedBuilder):
57
+ """SQUAD: The Stanford Question Answering Dataset. Version 1.1."""
58
+
59
+ def _info(self):
60
+ return datasets.DatasetInfo(
61
+ description=_DESCRIPTION,
62
+ features=datasets.Features(
63
+ {
64
+ "Id": datasets.Value("string"),
65
+ "Question": datasets.Value("string"),
66
+ "Chart": datasets.Image(),
67
+ "Chart_name": datasets.Value("string"),
68
+ }
69
+ ),
70
+ # No default supervised_keys (as we have to pass both question
71
+ # and context as input).
72
+ supervised_keys=None,
73
+ homepage="https://huggingface.co/datasets/eduvedras/VQG",
74
+ citation=_CITATION,
75
+ task_templates=[
76
+ QuestionAnsweringExtractive(
77
+ question_column="question", context_column="context", answers_column="answers"
78
+ )
79
+ ],
80
+ )
81
+
82
+ def _split_generators(self, dl_manager):
83
+ path = dl_manager.download(_URL)
84
+ image_iters = dl_manager.iter_archive(path)
85
+ #split_metadata_path = dl_manager.download(_METADATA_URLS)
86
+ metadata_train_path = "https://huggingface.co/datasets/eduvedras/VQG/resolve/main/metadata_train.csv"
87
+ metadata_validation_path = "https://huggingface.co/datasets/eduvedras/VQG/resolve/main/metadata_validation.csv"
88
+ metadata_test_path = "https://huggingface.co/datasets/eduvedras/VQG/resolve/main/metadata_test.csv"
89
+
90
+ return [
91
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"images": image_iters,
92
+ "metadata_path": metadata_train_path}),
93
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"images": image_iters,
94
+ "metadata_path": metadata_validation_path}),
95
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"images": image_iters,
96
+ "metadata_path": metadata_test_path}),
97
+ ]
98
+
99
+ def _generate_examples(self, images, metadata_path):
100
+ """This function returns the examples in the raw (text) form."""
101
+ metadata = pd.read_csv(metadata_path)
102
+ idx = 0
103
+ for index, row in metadata.iterrows():
104
+ for filepath, image in images:
105
+ filepath = filepath.split('/')[-1]
106
+ if row['Chart'] in filepath:
107
+ yield idx, {
108
+ "Chart": {"path": filepath, "bytes": image.read()},
109
+ "Question": row['Question'],
110
+ "Id": row['Id'],
111
+ "Chart_name": row['Chart'],
112
+ }
113
+ break
114
+ idx += 1
images.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b8a9b93accf013b9174e2cbfc817ce2c0d4dc1ded6e929061fd53908fd4b5b9
3
+ size 133
metadata_test.csv ADDED
The diff for this file is too large to render. See raw diff
 
metadata_train.csv ADDED
The diff for this file is too large to render. See raw diff
 
metadata_validation.csv ADDED
The diff for this file is too large to render. See raw diff