Commit
·
5f25043
1
Parent(s):
d27a542
Update demo2.py
Browse files
demo2.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
-
#
|
|
|
2 |
#
|
3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
# you may not use this file except in compliance with the License.
|
@@ -11,128 +12,101 @@
|
|
11 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
-
# TODO: Address all TODOs and remove all explanatory comments
|
15 |
-
"""TODO: Add a description here."""
|
16 |
-
|
17 |
|
|
|
|
|
18 |
import csv
|
19 |
import json
|
20 |
-
import os
|
21 |
|
22 |
import datasets
|
|
|
|
|
23 |
|
|
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
},
|
32 |
-
year=
|
|
|
|
|
|
|
|
|
33 |
}
|
34 |
"""
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
40 |
"""
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
# TODO: Add the licence for the dataset here if you can find it
|
46 |
-
_LICENSE = ""
|
47 |
-
|
48 |
-
# TODO: Add link to the official dataset URLs here
|
49 |
-
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
50 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
51 |
-
train_url = "https://dev-s3.outer.fuxi.netease.com/models/075d0c8a362646ad98a0c7a70286e93a/train.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20220620T023414Z&X-Amz-SignedHeaders=host&X-Amz-Expires=14515&X-Amz-Credential=HHRC8E0HQWZYBIVXJSZE%2F20220620%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Signature=10b7e6d8b179ef7849241fbadac6d29be4723fc25d15a5d5bd541082e2b81931"
|
52 |
-
test_url = "https://dev-s3.outer.fuxi.netease.com/models/075d0c8a362646ad98a0c7a70286e93a/test.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20220620T023414Z&X-Amz-SignedHeaders=host&X-Amz-Expires=14515&X-Amz-Credential=HHRC8E0HQWZYBIVXJSZE%2F20220620%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Signature=11c3ae701c7aa1e2444e479b76eb1a1bf1258c8afa2f67ab759beb3956350e2e"
|
53 |
|
54 |
_URLS = {
|
55 |
-
|
56 |
"train": train_url,
|
57 |
"test": test_url,
|
58 |
}
|
59 |
|
60 |
|
61 |
-
|
62 |
-
class DemoDataset(datasets.GeneratorBasedBuilder):
|
63 |
-
"""TODO: Short description of my dataset."""
|
64 |
-
|
65 |
-
VERSION = datasets.Version("1.0.0")
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
70 |
|
71 |
-
# If you need to make complex sub-parts in the datasets with configurable options
|
72 |
-
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
73 |
-
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
74 |
|
75 |
-
|
76 |
-
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
77 |
-
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
78 |
BUILDER_CONFIGS = [
|
79 |
-
|
|
|
|
|
|
|
|
|
80 |
]
|
81 |
|
82 |
def _info(self):
|
83 |
-
features = datasets.Features(
|
84 |
-
{
|
85 |
-
"index": datasets.Value("string"),
|
86 |
-
"text": datasets.Value("string"),
|
87 |
-
"label": datasets.Value("string")
|
88 |
-
}
|
89 |
-
)
|
90 |
-
|
91 |
return datasets.DatasetInfo(
|
92 |
description=_DESCRIPTION,
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
#
|
101 |
-
|
102 |
-
|
|
|
103 |
citation=_CITATION,
|
|
|
|
|
|
|
|
|
|
|
104 |
)
|
105 |
|
106 |
def _split_generators(self, dl_manager):
|
107 |
-
|
108 |
-
|
109 |
return [
|
110 |
-
datasets.SplitGenerator(
|
111 |
-
|
112 |
-
gen_kwargs={
|
113 |
-
"filepath": download_files['train']
|
114 |
-
},
|
115 |
-
),
|
116 |
-
datasets.SplitGenerator(
|
117 |
-
name=datasets.Split.TEST,
|
118 |
-
# These kwargs will be passed to _generate_examples
|
119 |
-
gen_kwargs={
|
120 |
-
"filepath": download_files['test']
|
121 |
-
},
|
122 |
-
),
|
123 |
]
|
124 |
|
125 |
-
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
126 |
def _generate_examples(self, filepath):
|
|
|
127 |
with open(filepath, encoding="utf-8") as f:
|
128 |
-
|
129 |
-
for key, row in enumerate(
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
"index": row["index"],
|
136 |
-
"text": row["text"],
|
137 |
-
"label": row["label"]
|
138 |
-
}
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
|
3 |
#
|
4 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
# you may not use this file except in compliance with the License.
|
|
|
12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
|
|
|
|
|
|
15 |
|
16 |
+
# Lint as: python3
|
17 |
+
"""SQUAD: The Stanford Question Answering Dataset."""
|
18 |
import csv
|
19 |
import json
|
|
|
20 |
|
21 |
import datasets
|
22 |
+
from datasets import TextClassification
|
23 |
+
|
24 |
|
25 |
+
logger = datasets.logging.get_logger(__name__)
|
26 |
|
27 |
+
|
28 |
+
_CITATION = """\
|
29 |
+
@article{2016arXiv160605250R,
|
30 |
+
author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
|
31 |
+
Konstantin and {Liang}, Percy},
|
32 |
+
title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
|
33 |
+
journal = {arXiv e-prints},
|
34 |
+
year = 2016,
|
35 |
+
eid = {arXiv:1606.05250},
|
36 |
+
pages = {arXiv:1606.05250},
|
37 |
+
archivePrefix = {arXiv},
|
38 |
+
eprint = {1606.05250},
|
39 |
}
|
40 |
"""
|
41 |
|
42 |
+
_DESCRIPTION = """\
|
43 |
+
Stanford Question Answering Dataset (SQuAD) is a reading comprehension \
|
44 |
+
dataset, consisting of questions posed by crowdworkers on a set of Wikipedia \
|
45 |
+
articles, where the answer to every question is a segment of text, or span, \
|
46 |
+
from the corresponding reading passage, or the question might be unanswerable.
|
47 |
"""
|
48 |
|
49 |
+
train_url = "https://dev-s3.outer.fuxi.netease.com/models/075d0c8a362646ad98a0c7a70286e93a/train.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20220620T072759Z&X-Amz-SignedHeaders=host&X-Amz-Expires=604&X-Amz-Credential=HHRC8E0HQWZYBIVXJSZE%2F20220620%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Signature=8c6493e5c5b9375d4b4af496e90b7767cc8a22abb247d562993b2593fce71873"
|
50 |
+
test_url = "https://dev-s3.outer.fuxi.netease.com/models/075d0c8a362646ad98a0c7a70286e93a/test.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20220620T072759Z&X-Amz-SignedHeaders=host&X-Amz-Expires=604&X-Amz-Credential=HHRC8E0HQWZYBIVXJSZE%2F20220620%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Signature=2711f1125f654b392ca7a57809dc39b0819f2211dd77510a6fe9ea84dd164f1a"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
_URLS = {
|
|
|
53 |
"train": train_url,
|
54 |
"test": test_url,
|
55 |
}
|
56 |
|
57 |
|
58 |
+
class Demo2Config(datasets.BuilderConfig):
|
|
|
|
|
|
|
|
|
59 |
|
60 |
+
def __init__(self, **kwargs):
|
61 |
+
super(Demo2Config, self).__init__(**kwargs)
|
|
|
62 |
|
|
|
|
|
|
|
63 |
|
64 |
+
class Demo2(datasets.GeneratorBasedBuilder):
|
|
|
|
|
65 |
BUILDER_CONFIGS = [
|
66 |
+
Demo2Config(
|
67 |
+
name="plain_text",
|
68 |
+
version=datasets.Version("1.0.0", ""),
|
69 |
+
description="Plain text",
|
70 |
+
),
|
71 |
]
|
72 |
|
73 |
def _info(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
return datasets.DatasetInfo(
|
75 |
description=_DESCRIPTION,
|
76 |
+
features=datasets.Features(
|
77 |
+
{
|
78 |
+
"index": datasets.Value("string"),
|
79 |
+
"text": datasets.Value("string"),
|
80 |
+
"label": datasets.Value("string")
|
81 |
+
}
|
82 |
+
),
|
83 |
+
# No default supervised_keys (as we have to pass both question
|
84 |
+
# and context as input).
|
85 |
+
supervised_keys=None,
|
86 |
+
homepage="https://rajpurkar.github.io/SQuAD-explorer/",
|
87 |
citation=_CITATION,
|
88 |
+
task_templates=[
|
89 |
+
TextClassification(
|
90 |
+
text_column="text", label_column="label"
|
91 |
+
)
|
92 |
+
],
|
93 |
)
|
94 |
|
95 |
def _split_generators(self, dl_manager):
|
96 |
+
downloaded_files = dl_manager.download_and_extract(_URLS)
|
97 |
+
|
98 |
return [
|
99 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
100 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["test"]}),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
]
|
102 |
|
|
|
103 |
def _generate_examples(self, filepath):
|
104 |
+
logger.info("generating examples from = %s", filepath)
|
105 |
with open(filepath, encoding="utf-8") as f:
|
106 |
+
demo2 = csv.DictReader(f)
|
107 |
+
for key, row in enumerate(demo2):
|
108 |
+
yield key, {
|
109 |
+
"id": key,
|
110 |
+
"text": row['text'],
|
111 |
+
"label": row['label'],
|
112 |
+
}
|
|
|
|
|
|
|
|