Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Chinese
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
48d5f78
1 Parent(s): d10d2fe

Add mixed data files

Browse files
README.md CHANGED
@@ -62,16 +62,16 @@ dataset_info:
62
  sequence: string
63
  splits:
64
  - name: train
65
- num_bytes: 2710513
66
  num_examples: 3138
67
  - name: test
68
- num_bytes: 891619
69
  num_examples: 1045
70
  - name: validation
71
- num_bytes: 910799
72
  num_examples: 1046
73
- download_size: 5481785
74
- dataset_size: 4512931
75
  configs:
76
  - config_name: dialog
77
  data_files:
@@ -81,6 +81,14 @@ configs:
81
  path: dialog/test-*
82
  - split: validation
83
  path: dialog/validation-*
 
 
 
 
 
 
 
 
84
  ---
85
  # Dataset Card for C3
86
 
 
62
  sequence: string
63
  splits:
64
  - name: train
65
+ num_bytes: 2710473
66
  num_examples: 3138
67
  - name: test
68
+ num_bytes: 891579
69
  num_examples: 1045
70
  - name: validation
71
+ num_bytes: 910759
72
  num_examples: 1046
73
+ download_size: 3183780
74
+ dataset_size: 4512811
75
  configs:
76
  - config_name: dialog
77
  data_files:
 
81
  path: dialog/test-*
82
  - split: validation
83
  path: dialog/validation-*
84
+ - config_name: mixed
85
+ data_files:
86
+ - split: train
87
+ path: mixed/train-*
88
+ - split: test
89
+ path: mixed/test-*
90
+ - split: validation
91
+ path: mixed/validation-*
92
  ---
93
  # Dataset Card for C3
94
 
dataset_infos.json CHANGED
@@ -8,53 +8,40 @@
8
  "documents": {
9
  "feature": {
10
  "dtype": "string",
11
- "id": null,
12
  "_type": "Value"
13
  },
14
- "length": -1,
15
- "id": null,
16
  "_type": "Sequence"
17
  },
18
  "document_id": {
19
  "dtype": "string",
20
- "id": null,
21
  "_type": "Value"
22
  },
23
  "questions": {
24
  "feature": {
25
  "question": {
26
  "dtype": "string",
27
- "id": null,
28
  "_type": "Value"
29
  },
30
  "answer": {
31
  "dtype": "string",
32
- "id": null,
33
  "_type": "Value"
34
  },
35
  "choice": {
36
  "feature": {
37
  "dtype": "string",
38
- "id": null,
39
  "_type": "Value"
40
  },
41
- "length": -1,
42
- "id": null,
43
  "_type": "Sequence"
44
  }
45
  },
46
- "length": -1,
47
- "id": null,
48
  "_type": "Sequence"
49
  }
50
  },
51
- "post_processed": null,
52
- "supervised_keys": null,
53
- "builder_name": "c3",
54
  "config_name": "mixed",
55
  "version": {
56
  "version_str": "1.0.0",
57
- "description": null,
58
  "major": 1,
59
  "minor": 0,
60
  "patch": 0
@@ -62,41 +49,26 @@
62
  "splits": {
63
  "train": {
64
  "name": "train",
65
- "num_bytes": 2710513,
66
  "num_examples": 3138,
67
- "dataset_name": "c3"
68
  },
69
  "test": {
70
  "name": "test",
71
- "num_bytes": 891619,
72
  "num_examples": 1045,
73
- "dataset_name": "c3"
74
  },
75
  "validation": {
76
  "name": "validation",
77
- "num_bytes": 910799,
78
  "num_examples": 1046,
79
- "dataset_name": "c3"
80
- }
81
- },
82
- "download_checksums": {
83
- "https://raw.githubusercontent.com/nlpdata/c3/master/data/c3-m-train.json": {
84
- "num_bytes": 3292571,
85
- "checksum": "4c84a534f1eec2c72e5f60f0c044cc39e2e42a88df01134e677e03217472d6af"
86
- },
87
- "https://raw.githubusercontent.com/nlpdata/c3/master/data/c3-m-test.json": {
88
- "num_bytes": 1085489,
89
- "checksum": "7d8074be56cf574536a3284bc2d6b04d137694d5e5f5b1368143c0cf3e336822"
90
- },
91
- "https://raw.githubusercontent.com/nlpdata/c3/master/data/c3-m-dev.json": {
92
- "num_bytes": 1103725,
93
- "checksum": "357d0d8d2a29bc845cbe50e048c263629f5e527b70f24c3e0838c387c8d3cb54"
94
  }
95
  },
96
- "download_size": 5481785,
97
- "post_processing_size": null,
98
- "dataset_size": 4512931,
99
- "size_in_bytes": 9994716
100
  },
101
  "dialog": {
102
  "description": "Machine reading comprehension tasks require a machine reader to answer questions relevant to the given document. In this paper, we present the first free-form multiple-Choice Chinese machine reading Comprehension dataset (C^3), containing 13,369 documents (dialogues or more formally written mixed-genre texts) and their associated 19,577 multiple-choice free-form questions collected from Chinese-as-a-second-language examinations.\nWe present a comprehensive analysis of the prior knowledge (i.e., linguistic, domain-specific, and general world knowledge) needed for these real-world problems. We implement rule-based and popular neural methods and find that there is still a significant performance gap between the best performing model (68.5%) and human readers (96.0%), especially on problems that require prior knowledge. We further study the effects of distractor plausibility and data augmentation based on translated relevant datasets for English on model performance. We expect C^3 to present great challenges to existing systems as answering 86.8% of questions requires both knowledge within and beyond the accompanying document, and we hope that C^3 can serve as a platform to study how to leverage various kinds of prior knowledge to better understand a given written or orally oriented text.\n",
 
8
  "documents": {
9
  "feature": {
10
  "dtype": "string",
 
11
  "_type": "Value"
12
  },
 
 
13
  "_type": "Sequence"
14
  },
15
  "document_id": {
16
  "dtype": "string",
 
17
  "_type": "Value"
18
  },
19
  "questions": {
20
  "feature": {
21
  "question": {
22
  "dtype": "string",
 
23
  "_type": "Value"
24
  },
25
  "answer": {
26
  "dtype": "string",
 
27
  "_type": "Value"
28
  },
29
  "choice": {
30
  "feature": {
31
  "dtype": "string",
 
32
  "_type": "Value"
33
  },
 
 
34
  "_type": "Sequence"
35
  }
36
  },
 
 
37
  "_type": "Sequence"
38
  }
39
  },
40
+ "builder_name": "parquet",
41
+ "dataset_name": "c3",
 
42
  "config_name": "mixed",
43
  "version": {
44
  "version_str": "1.0.0",
 
45
  "major": 1,
46
  "minor": 0,
47
  "patch": 0
 
49
  "splits": {
50
  "train": {
51
  "name": "train",
52
+ "num_bytes": 2710473,
53
  "num_examples": 3138,
54
+ "dataset_name": null
55
  },
56
  "test": {
57
  "name": "test",
58
+ "num_bytes": 891579,
59
  "num_examples": 1045,
60
+ "dataset_name": null
61
  },
62
  "validation": {
63
  "name": "validation",
64
+ "num_bytes": 910759,
65
  "num_examples": 1046,
66
+ "dataset_name": null
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  }
68
  },
69
+ "download_size": 3183780,
70
+ "dataset_size": 4512811,
71
+ "size_in_bytes": 7696591
 
72
  },
73
  "dialog": {
74
  "description": "Machine reading comprehension tasks require a machine reader to answer questions relevant to the given document. In this paper, we present the first free-form multiple-Choice Chinese machine reading Comprehension dataset (C^3), containing 13,369 documents (dialogues or more formally written mixed-genre texts) and their associated 19,577 multiple-choice free-form questions collected from Chinese-as-a-second-language examinations.\nWe present a comprehensive analysis of the prior knowledge (i.e., linguistic, domain-specific, and general world knowledge) needed for these real-world problems. We implement rule-based and popular neural methods and find that there is still a significant performance gap between the best performing model (68.5%) and human readers (96.0%), especially on problems that require prior knowledge. We further study the effects of distractor plausibility and data augmentation based on translated relevant datasets for English on model performance. We expect C^3 to present great challenges to existing systems as answering 86.8% of questions requires both knowledge within and beyond the accompanying document, and we hope that C^3 can serve as a platform to study how to leverage various kinds of prior knowledge to better understand a given written or orally oriented text.\n",
mixed/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba4b6fc08d5f3505a6c6606e3a8792807b8808bce8b6262ec472d6dcb720f5ef
3
+ size 636791
mixed/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72892b1447e1fa1068447ec5a253cea56e196550db6a7b52a916c350319bc6b5
3
+ size 1901402
mixed/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2061eb2e311e0fbad3de5a3c38fc4d948f391d4671907ee14fe1c65a3764828d
3
+ size 645587