Datasets:

Languages:
English
ArXiv:
License:

Convert dataset to Parquet

#6
README.md CHANGED
@@ -1,15 +1,14 @@
1
  ---
2
  annotations_creators:
3
  - crowdsourced
4
- language:
5
- - en
6
  language_creators:
7
  - found
 
 
8
  license:
9
  - cc-by-4.0
10
  multilinguality:
11
  - monolingual
12
- pretty_name: CosmosQA
13
  size_categories:
14
  - 10K<n<100K
15
  source_datasets:
@@ -19,6 +18,7 @@ task_categories:
19
  task_ids:
20
  - multiple-choice-qa
21
  paperswithcode_id: cosmosqa
 
22
  dataset_info:
23
  features:
24
  - name: id
@@ -39,16 +39,25 @@ dataset_info:
39
  dtype: int32
40
  splits:
41
  - name: train
42
- num_bytes: 17159918
43
  num_examples: 25262
44
  - name: test
45
- num_bytes: 5121479
46
  num_examples: 6963
47
  - name: validation
48
- num_bytes: 2186987
49
  num_examples: 2985
50
- download_size: 24399475
51
- dataset_size: 24468384
 
 
 
 
 
 
 
 
 
52
  ---
53
 
54
  # Dataset Card for "cosmos_qa"
 
1
  ---
2
  annotations_creators:
3
  - crowdsourced
 
 
4
  language_creators:
5
  - found
6
+ language:
7
+ - en
8
  license:
9
  - cc-by-4.0
10
  multilinguality:
11
  - monolingual
 
12
  size_categories:
13
  - 10K<n<100K
14
  source_datasets:
 
18
  task_ids:
19
  - multiple-choice-qa
20
  paperswithcode_id: cosmosqa
21
+ pretty_name: CosmosQA
22
  dataset_info:
23
  features:
24
  - name: id
 
39
  dtype: int32
40
  splits:
41
  - name: train
42
+ num_bytes: 17156676
43
  num_examples: 25262
44
  - name: test
45
+ num_bytes: 5120580
46
  num_examples: 6963
47
  - name: validation
48
+ num_bytes: 2186585
49
  num_examples: 2985
50
+ download_size: 12029581
51
+ dataset_size: 24463841
52
+ configs:
53
+ - config_name: default
54
+ data_files:
55
+ - split: train
56
+ path: data/train-*
57
+ - split: test
58
+ path: data/test-*
59
+ - split: validation
60
+ path: data/validation-*
61
  ---
62
 
63
  # Dataset Card for "cosmos_qa"
cosmos_qa.py DELETED
@@ -1,116 +0,0 @@
1
- """Cosmos QA dataset."""
2
-
3
-
4
- import csv
5
- import json
6
-
7
- import datasets
8
-
9
-
10
- _HOMEPAGE = "https://wilburone.github.io/cosmos/"
11
-
12
- _DESCRIPTION = """\
13
- Cosmos QA is a large-scale dataset of 35.6K problems that require commonsense-based reading comprehension, formulated as multiple-choice questions. It focuses on reading between the lines over a diverse collection of people's everyday narratives, asking questions concerning on the likely causes or effects of events that require reasoning beyond the exact text spans in the context
14
- """
15
-
16
- _CITATION = """\
17
- @inproceedings{huang-etal-2019-cosmos,
18
- title = "Cosmos {QA}: Machine Reading Comprehension with Contextual Commonsense Reasoning",
19
- author = "Huang, Lifu and
20
- Le Bras, Ronan and
21
- Bhagavatula, Chandra and
22
- Choi, Yejin",
23
- booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
24
- month = nov,
25
- year = "2019",
26
- address = "Hong Kong, China",
27
- publisher = "Association for Computational Linguistics",
28
- url = "https://www.aclweb.org/anthology/D19-1243",
29
- doi = "10.18653/v1/D19-1243",
30
- pages = "2391--2401",
31
- }
32
- """
33
-
34
- _LICENSE = "CC BY 4.0"
35
-
36
- _URL = "https://github.com/wilburOne/cosmosqa/raw/master/data/"
37
- _URLS = {
38
- "train": _URL + "train.csv",
39
- "test": _URL + "test.jsonl",
40
- "dev": _URL + "valid.csv",
41
- }
42
-
43
-
44
- class CosmosQa(datasets.GeneratorBasedBuilder):
45
- """Cosmos QA dataset."""
46
-
47
- VERSION = datasets.Version("0.1.0")
48
-
49
- def _info(self):
50
- return datasets.DatasetInfo(
51
- description=_DESCRIPTION,
52
- features=datasets.Features(
53
- {
54
- "id": datasets.Value("string"),
55
- "context": datasets.Value("string"),
56
- "question": datasets.Value("string"),
57
- "answer0": datasets.Value("string"),
58
- "answer1": datasets.Value("string"),
59
- "answer2": datasets.Value("string"),
60
- "answer3": datasets.Value("string"),
61
- "label": datasets.Value("int32"),
62
- }
63
- ),
64
- homepage=_HOMEPAGE,
65
- citation=_CITATION,
66
- license=_LICENSE,
67
- )
68
-
69
- def _split_generators(self, dl_manager):
70
- """Returns SplitGenerators."""
71
- urls_to_download = _URLS
72
- dl_dir = dl_manager.download_and_extract(urls_to_download)
73
- return [
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TRAIN,
76
- gen_kwargs={"filepath": dl_dir["train"], "split": "train"},
77
- ),
78
- datasets.SplitGenerator(
79
- name=datasets.Split.TEST,
80
- gen_kwargs={"filepath": dl_dir["test"], "split": "test"},
81
- ),
82
- datasets.SplitGenerator(
83
- name=datasets.Split.VALIDATION,
84
- gen_kwargs={"filepath": dl_dir["dev"], "split": "dev"},
85
- ),
86
- ]
87
-
88
- def _generate_examples(self, filepath, split):
89
- """Yields examples."""
90
- with open(filepath, encoding="utf-8") as f:
91
- if split == "test":
92
- for id_, row in enumerate(f):
93
- data = json.loads(row)
94
- yield id_, {
95
- "id": data["id"],
96
- "context": data["context"],
97
- "question": data["question"],
98
- "answer0": data["answer0"],
99
- "answer1": data["answer1"],
100
- "answer2": data["answer2"],
101
- "answer3": data["answer3"],
102
- "label": int(data.get("label", -1)),
103
- }
104
- else:
105
- data = csv.DictReader(f)
106
- for id_, row in enumerate(data):
107
- yield id_, {
108
- "id": row["id"],
109
- "context": row["context"],
110
- "question": row["question"],
111
- "answer0": row["answer0"],
112
- "answer1": row["answer1"],
113
- "answer2": row["answer2"],
114
- "answer3": row["answer3"],
115
- "label": int(row.get("label", -1)),
116
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4477336b3eb4fed17dd14d0b4932d758b22ee4f0c5cda1c853ebb30612c92c8f
3
+ size 2873194
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7b9516f6650bb92b12c02cf78c7ffcc31659546e38ba82bb3f10ff84c4e6d98
3
+ size 7923050
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc6e093c9b05d7b97a74f587c32c5501718317d9ae4c6046cae92878197c3929
3
+ size 1233337