parquet-converter commited on
Commit
583abf0
·
1 Parent(s): 38bfb57

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,30 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- test.csv filter=lfs diff=lfs merge=lfs -text
29
- train.csv filter=lfs diff=lfs merge=lfs -text
30
- val.csv filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,5 +0,0 @@
1
- For details, please refer to the following links.
2
-
3
- Github repo: https://github.com/amazon-research/SC2QA-DRIL
4
-
5
- Paper: [Generating Self-Contained and Summary-Centric Question Answer Pairs via Differentiable Reward Imitation Learning](https://arxiv.org/pdf/2109.04689.pdf)
 
 
 
 
 
 
val.csv → plain_text/sc2q_commoncrawl-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a961e7d8be528aad2068ec5aac75bb9930d94066afffa3192922523f029c9096
3
- size 30450007
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7263b1f3739d25d9117054bd4df6da5c2b7a605d14561192e522d0828c7bda4c
3
+ size 23551278
train.csv → plain_text/sc2q_commoncrawl-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7beb2806b0eaf8c9a4306b968a580bf1e66d74a31cd796e42a8a5208fa260289
3
- size 226095686
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40252d522513d1f42cf97ccc3876365ba4fb6ac9006edf346f7f59d6ddc1837e
3
+ size 135146451
test.csv → plain_text/sc2q_commoncrawl-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5da4095515c164738ea9a6f49ee3facc4e11e2ebaacf5ceff7579e611d58d4a6
3
- size 39031361
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee18f4c9cd57ed9369d83f100b966ea756c845c5ca5ab3508718cd34aa97a11a
3
+ size 18277766
sc2q_commoncrawl.py DELETED
@@ -1,99 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """(SC)^2QA: Self-Contained Summary-Centric QA Dataset.
18
- This dataset (https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl) only contains question and article pairs.
19
- If you want {Question, Article, Summary, Length Constraint} 4-tuples, please load sc2qa_commoncrawl (https://huggingface.co/datasets/sc2qa/sc2qa_commoncrawl)
20
- """
21
-
22
- import csv
23
-
24
- import datasets
25
-
26
-
27
- logger = datasets.logging.get_logger(__name__)
28
-
29
-
30
- _CITATION = """\
31
- @article{zhou2021generating,
32
- author = {Li Zhou, Kevin Small, Yong Zhang, Sandeep Atluri},
33
- title = "{Generating Self-Contained and Summary-Centric Question Answer Pairs via Differentiable Reward Imitation Learning}",
34
- conference = {The 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP 2021)},
35
- year = 2021,
36
- }
37
- """
38
-
39
- _DESCRIPTION = """\
40
- """
41
-
42
- _URLS = {
43
- "train":"https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl/resolve/main/train.csv",
44
- "val":"https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl/resolve/main/val.csv",
45
- "test":"https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl/resolve/main/test.csv",
46
- }
47
-
48
- class SC2QAConfig(datasets.BuilderConfig):
49
- """BuilderConfig for (SC)^2QA."""
50
-
51
- def __init__(self, **kwargs):
52
- """BuilderConfig for (SC)^2QA.
53
-
54
- Args:
55
- **kwargs: keyword arguments forwarded to super.
56
- """
57
- super(SC2QAConfig, self).__init__(**kwargs)
58
-
59
-
60
- class SC2QA(datasets.GeneratorBasedBuilder):
61
- BUILDER_CONFIGS = [
62
- SC2QAConfig(
63
- name="plain_text",
64
- version=datasets.Version("1.0.0", ""),
65
- description="Plain text",
66
- ),
67
- ]
68
-
69
- def _info(self):
70
- # Should return a datasets.DatasetInfo object
71
- return datasets.DatasetInfo(
72
- description=_DESCRIPTION,
73
- features=datasets.Features(
74
- {
75
- "question": datasets.Value("string"),
76
- "article": datasets.Value("string"),
77
- "url": datasets.Value("string"),
78
- }
79
- ),
80
- citation=_CITATION,
81
- )
82
-
83
- def _split_generators(self, dl_manager):
84
- downloaded_files = dl_manager.download_and_extract(_URLS)
85
-
86
- return [
87
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
88
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
89
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
90
- ]
91
-
92
- def _generate_examples(self, filepath):
93
- """This function returns the examples in the raw (text) form."""
94
- logger.info("generating examples from = %s", filepath)
95
- key = 0
96
- with open(filepath, encoding="ascii", errors='ignore') as f:
97
- csv_reader = csv.DictReader(f)
98
- for i, row in enumerate(csv_reader):
99
- yield i, row