Add data card, update challenge set loader
Browse files- bisect.json +178 -0
- bisect.md +191 -0
- bisect.py +33 -19
bisect.json
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"overview": {
|
3 |
+
"where": {
|
4 |
+
"has-leaderboard": "no",
|
5 |
+
"leaderboard-url": "N/A",
|
6 |
+
"leaderboard-description": "N/A",
|
7 |
+
"website": "https://github.com/mounicam/BiSECT",
|
8 |
+
"data-url": "https://github.com/mounicam/BiSECT/tree/main/bisect",
|
9 |
+
"paper-url": "https://aclanthology.org/2021.emnlp-main.500/",
|
10 |
+
"paper-bibtext": "@inproceedings{kim-etal-2021-bisect,\n title = \"{B}i{SECT}: Learning to Split and Rephrase Sentences with Bitexts\",\n author = \"Kim, Joongwon and\n Maddela, Mounica and\n Kriz, Reno and\n Xu, Wei and\n Callison-Burch, Chris\",\n booktitle = \"Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing\",\n month = nov,\n year = \"2021\",\n address = \"Online and Punta Cana, Dominican Republic\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2021.emnlp-main.500\",\n pages = \"6193--6209\"\n}\n",
|
11 |
+
"contact-name": "Joongwon Kim, Mounica Maddela, Reno Kriz",
|
12 |
+
"contact-email": "[email protected], [email protected], [email protected]"
|
13 |
+
},
|
14 |
+
"languages": {
|
15 |
+
"is-multilingual": "no",
|
16 |
+
"license": "other: Other license",
|
17 |
+
"task-other": "N/A",
|
18 |
+
"language-names": [
|
19 |
+
"English"
|
20 |
+
],
|
21 |
+
"license-other": "The dataset is not licensed by itself, and the source OPUS data consists solely of publicly available parallel corpora.",
|
22 |
+
"task": "Simplification",
|
23 |
+
"communicative": "To rewrite a long, complex sentence into shorter, readable, meaning-equivalent sentences.",
|
24 |
+
"intended-use": "Split and Rephrase."
|
25 |
+
},
|
26 |
+
"credit": {
|
27 |
+
"organization-type": [
|
28 |
+
"academic"
|
29 |
+
],
|
30 |
+
"organization-names": "University of Pennsylvania, Georgia Institute of Technology",
|
31 |
+
"creators": "Joongwon Kim (University of Pennsylvania), Mounica Maddela (Georgia Institute of Technology), Reno Kriz (University of Pennsylvania, Johns Hopkins University), Wei Xu (Georgia Institute of Technology), Chris Callison-Burch (University of Pennsylvania)",
|
32 |
+
"funding": "This work is supported in part by the NSF awards IIS-2055699, ODNI and IARPA via the BETTER program (contract 19051600004), and the DARPA KAIROS Program (contract FA8750-19-2-1004).",
|
33 |
+
"gem-added-by": "Reno Kriz (Johns Hopkins University), Jenny Chim (Queen Mary University of London)"
|
34 |
+
},
|
35 |
+
"structure": {
|
36 |
+
"data-fields": "gem_id - (string) a unique identifier for the instance\nsource_sentence - (string) sentence to be simplified\ntarget_sentence - (string) simplified text that was split and rephrased",
|
37 |
+
"structure-example": "{\n \"gem_id\": \"bisect-train-0\",\n \"source_sentence\": \"The report on the visit to Bhutan states that the small community has made the task of coordination less complex and success is manifested in the synchronized programming cycles which now apply to all but one of the agencies ( the World Health Organization ) .\",\n \"target_sentence\": \"The report on the visit to Bhutan says that the small community has made the coordination work less complex . Success manifests itself in synchronized programming cycles that now apply to all but one organism ( the World Health Organization ) .\"\n}",
|
38 |
+
"structure-splits": "For the main English BiSECT dataset, the splits are as follows:\n(1) train (n=928440)\n(2) validation (n=9079)\n(3) test (n=583)\nAdditional challenge sets were derived from the data presented in the paper. Please refer to the challenge set sections.",
|
39 |
+
"structure-description": "N/A",
|
40 |
+
"structure-labels": "N/A",
|
41 |
+
"structure-splits-criteria": "While all training data were derived from subsets of the OPUS corpora, different source subsets were used for training v.s., validation and testing. The training set comprised more web crawl data, whereas development and test sets comprised EMEA and EU texts. Details can be found in the BiSECT paper.",
|
42 |
+
"structure-outlier": ""
|
43 |
+
}
|
44 |
+
},
|
45 |
+
"context": {
|
46 |
+
"previous": {
|
47 |
+
"is-deployed": "no",
|
48 |
+
"described-risks": "N/A",
|
49 |
+
"changes-from-observation": "N/A"
|
50 |
+
},
|
51 |
+
"underserved": {
|
52 |
+
"helps-underserved": "yes",
|
53 |
+
"underserved-description": "The data as provided in GEMv2 is in English, which is a language with abundant existing resources. However, the original paper also provides Split and Rephrase pairs for French, Spanish, and German, while providing a framework for leveraging bilingual corpora from any language pair found within OPUS."
|
54 |
+
},
|
55 |
+
"biases": {
|
56 |
+
"has-biases": "no",
|
57 |
+
"bias-analyses": "N/A",
|
58 |
+
"speaker-distibution": "The language produced in the dataset is limited to what is captured in the used subset of the OPUS corpora, which might not represent the full distribution of speakers from all locations. For example, the corpora used are from a limited set of relatively formal domains, so it is possible that high performance on the BiSECT test set may not transfer to more informal text."
|
59 |
+
}
|
60 |
+
},
|
61 |
+
"considerations": {
|
62 |
+
"pii": {
|
63 |
+
"risks-description": "Since this data is collected from OPUS, all pairs are already in the public domain."
|
64 |
+
},
|
65 |
+
"licenses": {
|
66 |
+
"dataset-restrictions-other": "N/A",
|
67 |
+
"data-copyright-other": "N/A",
|
68 |
+
"dataset-restrictions": [
|
69 |
+
"public domain"
|
70 |
+
],
|
71 |
+
"data-copyright": [
|
72 |
+
"public domain"
|
73 |
+
]
|
74 |
+
},
|
75 |
+
"limitations": {
|
76 |
+
"data-technical-limitations": "The creation of English BiSECT relies on translating non-English text back to English. While machine translation systems tend to perform well on high-resource languages, there is still a non-negligible chance that there these systems make errors; through a manual evaluation of a subset of BiSECT, it was found that 15% of pairs contained significant errors, while an additional 22% contained minor adequacy/fluency errors. This problem is exacerbated slightly when creating German BiSECT (22% significant errors, 24% minor errors), and these numbers would likely get larger if lower-resource languages were used."
|
77 |
+
}
|
78 |
+
},
|
79 |
+
"results": {
|
80 |
+
"results": {
|
81 |
+
"other-metrics-definitions": "SARI is a metric used for evaluating automatic text simplification systems. The metric compares the predicted simplified sentences against the reference and the source sentences. It explicitly measures the goodness of words that are added, deleted and kept by the system. ",
|
82 |
+
"has-previous-results": "yes",
|
83 |
+
"current-evaluation": "N/A",
|
84 |
+
"previous-results": "N/A",
|
85 |
+
"metrics": [
|
86 |
+
"Other: Other Metrics",
|
87 |
+
"BERT-Score"
|
88 |
+
],
|
89 |
+
"model-abilities": "Text comprehension (needed to generate meaning-equivalent output) and notions of complexity (what is more 'readable' in terms of syntactic structure, lexical choice, punctuation).",
|
90 |
+
"original-evaluation": "Existing automatic metrics, such as BLEU (Papineni et al., 2002) and SAMSA (Sulem et al., 2018),\nare not optimal for the Split and Rephrase task as\nthey rely on lexical overlap between the output and\nthe target (or source) and underestimate the splitting capability of the models that rephrase often. \n\nAs such, the dataset creators focused on BERTScore (Zhang et al., 2020) and SARI (Xu et al., 2016). BERTScore captures meaning preservation and fluency\nwell (Scialom et al., 2021). SARI can provide three\nseparate F1/precision scores that explicitly measure the correctness of inserted, kept and deleted\nn-grams when compared to both the source and\nthe target. The authors used an extended version of SARI\nthat considers lexical paraphrases of the reference. "
|
91 |
+
}
|
92 |
+
},
|
93 |
+
"gem": {
|
94 |
+
"rationale": {
|
95 |
+
"sole-task-dataset": "yes",
|
96 |
+
"distinction-description": "BiSECT is the largest available corpora for the Split and Rephrase task. In addition, it has been shown that BiSECT is of higher quality than previous Split and Rephrase corpora and contains a wider variety of splitting operations.\n\nMost previous Split and Rephrase corpora (HSplit-Wiki, Cont-Benchmark, and Wiki-Benchmark) were manually written at a small scale and focused on evaluation, while the one corpus of comparable size, WikiSplit, contains around 25% of pairs contain significant errors. This is because Wikipedia editors are not only trying to split a sentence, but also often simultaneously modifying the sentence for other purposes, which results in changes of the initial meaning.",
|
97 |
+
"contribution": "Understanding long and complex sentences is challenging for both humans and NLP models. The BiSECT dataset helps facilitate more research on Split and Rephrase as a task within itself, as well as how it can benefit downstream NLP applications.",
|
98 |
+
"sole-language-task-dataset": "yes",
|
99 |
+
"model-ability": ""
|
100 |
+
},
|
101 |
+
"curation": {
|
102 |
+
"has-additional-curation": "yes",
|
103 |
+
"modification-types": [
|
104 |
+
"data points added"
|
105 |
+
],
|
106 |
+
"modification-description": "The original BiSECT training, validation, and test splits are maintained to ensure a fair comparison. Note that the original BiSECT test set was created by manually selecting 583 high-quality Split and Rephrase instances from 1000 random source-target pairs sampled from the EMEA and JRC-Acquis corpora from OPUS.\n\nAs the first challenge set, we include the HSPLIT-Wiki test set, containing 359 pairs. For each complex sentence, there are four reference splits; To ensure replicability, as reference splits, we again follow the BiSECT paper and present only the references from HSplit2-full.\n\nIn addition to the two evaluation sets used in the original BiSECT paper, we also introduce a second challenge set. For this, we initially consider all 7,293 pairs from the EMEA and JRC-Acquis corpora. From there, we classify each pair using the classification algorithm from Section 4.2 of the original BiSECT paper. The three classes are as follows:\n\n1. Direct Insertion: when a long sentence l contains two independent clauses and requires only minor changes in order to make a fluent and meaning-preserving split s.\n2. Changes near Split, when l contains one independent and one dependent clause, but modifications are restricted to the region where l is split.\n3. Changes across Sentences, where major changes are required throughout l in order to create a fluent split s.\nWe keep only pairs labeled as Type 3, and after filtering out pairs with significant length differences (signaling potential content addition/deletion), we present a second challenge set of 1,798 pairs.",
|
107 |
+
"has-additional-splits": "no",
|
108 |
+
"additional-splits-description": "N/A",
|
109 |
+
"additional-splits-capacicites": "N/A"
|
110 |
+
},
|
111 |
+
"starting": {
|
112 |
+
"research-pointers": "The dataset can be downloaded from the original repository by the authors.\n\nThe original BiSECT paper proposes several transformer-based models that can be used as baselines, which also compares against Copy512, an LSTM-based model and the previous state-of-the-art.\n\nThe common metric used for automatic evaluation of Split and Rephrase, and sentence simplification more generally is SARI. The BiSECT paper also evaluates using BERTScore. Note that automatic evaluations tend to not correlate well with human judgments, so a human evaluation for quality is generally expected for publication. The original BiSECT paper provides templates for collecting quality annotations from Amazon Mechanical Turk.",
|
113 |
+
"technical-terms": ""
|
114 |
+
}
|
115 |
+
},
|
116 |
+
"curation": {
|
117 |
+
"original": {
|
118 |
+
"is-aggregated": "no",
|
119 |
+
"aggregated-sources": "N/A",
|
120 |
+
"rationale": "BiSECT was constructed to satisfy the need of a Split and Rephrase corpus that is both large-scale and high-quality. Most previous Split and Rephrase corpora (HSplit-Wiki, Cont-Benchmark, and Wiki-Benchmark) were manually written at a small scale and focused on evaluation, while the one corpus of comparable size, WikiSplit, contains around 25% of pairs contain significant errors. This is because Wikipedia editors are not only trying to split a sentence, but also often simultaneously modifying the sentence for other purposes, which results in changes of the initial meaning.",
|
121 |
+
"communicative": "The goal of Split and Rephrase is to break down longer sentences into multiple shorter sentences, which has downstream applications for many NLP tasks, including machine translation and dependency parsing."
|
122 |
+
},
|
123 |
+
"language": {
|
124 |
+
"found": [
|
125 |
+
"Other"
|
126 |
+
],
|
127 |
+
"crowdsourced": [],
|
128 |
+
"created": "N/A",
|
129 |
+
"machine-generated": "N/A",
|
130 |
+
"validated": "validated by data curator",
|
131 |
+
"is-filtered": "hybrid",
|
132 |
+
"filtered-criteria": "To remove noise, the authors remove pairs where the single long sentence (l) contains a token with a punctuation after the first two and before the last two alphabetic characters. The authors also removed instances where l contains more than one unconnected component in its dependency tree, generated via SpaCy.",
|
133 |
+
"obtained": [
|
134 |
+
"Found"
|
135 |
+
],
|
136 |
+
"pre-processed": "The construction of the BiSECT corpus relies on leveraging the sentence-level alignments from OPUS), a collection of bilingual parallel corpora over many language pairs. Given a target language A, this work extracts all 1-2 and 2-1 sentence alignments from parallel corpora between A and a set of foreign languages B.\n\nNext, the foreign sentences are translated into English using Google Translate\u2019s Web API service to obtain sentence alignments between a single long sentence and two corresponding split sentences, both in the desired language.\n\nThe authors further filtered the data in a hybrid fashion.",
|
137 |
+
"topics": "There is a range of topics spanning domains such as web crawl and government documents (European Parliament, United Nations, EMEA).",
|
138 |
+
"producers-description": "N/A."
|
139 |
+
},
|
140 |
+
"annotations": {
|
141 |
+
"origin": "none",
|
142 |
+
"rater-number": "N/A",
|
143 |
+
"rater-qualifications": "N/A",
|
144 |
+
"rater-training-num": "N/A",
|
145 |
+
"rater-test-num": "N/A",
|
146 |
+
"rater-annotation-service-bool": "no",
|
147 |
+
"rater-annotation-service": [],
|
148 |
+
"values": "N/A",
|
149 |
+
"quality-control": [],
|
150 |
+
"quality-control-details": "N/A"
|
151 |
+
},
|
152 |
+
"consent": {
|
153 |
+
"has-consent": "no",
|
154 |
+
"consent-policy": "N/A",
|
155 |
+
"consent-other": "N/A",
|
156 |
+
"no-consent-justification": "Since this data is collected from OPUS, all instances are already in the public domain. "
|
157 |
+
},
|
158 |
+
"pii": {
|
159 |
+
"has-pii": "unlikely",
|
160 |
+
"no-pii-justification": "N/A",
|
161 |
+
"is-pii-identified": "no identification",
|
162 |
+
"pii-identified-method": "N/A",
|
163 |
+
"is-pii-replaced": "N/A",
|
164 |
+
"pii-replaced-method": "N/A",
|
165 |
+
"pii-categories": [
|
166 |
+
"generic PII"
|
167 |
+
]
|
168 |
+
},
|
169 |
+
"maintenance": {
|
170 |
+
"has-maintenance": "no",
|
171 |
+
"description": "N/A",
|
172 |
+
"contact": "N/A",
|
173 |
+
"contestation-mechanism": "N/A",
|
174 |
+
"contestation-link": "N/A",
|
175 |
+
"contestation-description": "N/A"
|
176 |
+
}
|
177 |
+
}
|
178 |
+
}
|
bisect.md
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: 'BiSECT'
|
3 |
+
type: 'Split and Rephrase'
|
4 |
+
motivation: 'Why is the dataset part of GEM?'
|
5 |
+
---
|
6 |
+
|
7 |
+
## Table of Contents
|
8 |
+
|
9 |
+
[Leave this blank, we autogenerate this section and overwrite content]
|
10 |
+
|
11 |
+
## Dataset Description
|
12 |
+
|
13 |
+
- **Homepage:** [https://github.com/mounicam/BiSECT/](https://github.com/mounicam/BiSECT/)
|
14 |
+
- **Repository:** [https://github.com/mounicam/BiSECT/](https://github.com/mounicam/BiSECT/)
|
15 |
+
- **Paper:** [https://aclanthology.org/2021.emnlp-main.500/](https://aclanthology.org/2021.emnlp-main.500/)
|
16 |
+
- **Points of Contact:** [Joongwon Kim](mailto:[email protected]), [Mounica Maddela](mailto:[email protected]), [Reno Kriz](mailto:[email protected])
|
17 |
+
|
18 |
+
### Dataset and Task Summary
|
19 |
+
This dataset captures the ‘Split and Rephrase’ task, which involves taking long, complex sentences and splitting them into shorter, simpler, and potentially rephrased meaning-equivalent sentences.
|
20 |
+
|
21 |
+
**BiSECT** was created via bilingual pivoting using subsets of the OPUS dataset ([Tiedemann and Nygaard, 2004](https://aclanthology.org/L04-1174/)). It spans multiple domains, from web crawl to government documents. The data released here is in English, but data for other European languages are also available upon request.
|
22 |
+
|
23 |
+
Compared to previous resources for this task, the resulting dataset was found to contain examples with higher quality, as well as splits that require more significant modifications.
|
24 |
+
|
25 |
+
### Why is this dataset part of GEM?
|
26 |
+
|
27 |
+
**BiSECT** is the largest available corpora for the Split and Rephrase task. In addition, it has been shown that **BiSECT** is of higher quality than previous Split and Rephrase corpora and contains a wider variety of splitting operations.
|
28 |
+
|
29 |
+
### Languages
|
30 |
+
English (en-US). Split and Rephase pairs for French, Spanish, and German are also available upon request.
|
31 |
+
|
32 |
+
## Meta Information
|
33 |
+
|
34 |
+
### Dataset Curators
|
35 |
+
|
36 |
+
BiSECT was developed by researchers at the University of Pennsylvania and Georgia Institute of Technology. This work is supported in part by the NSF awards IIS-2055699, ODNI and IARPA via the BETTER program (contract 19051600004), and the DARPA KAIROS Program (contract FA8750-19-2-1004).
|
37 |
+
|
38 |
+
### Licensing Information
|
39 |
+
|
40 |
+
The dataset is not licensed by itself, and the source Opus data consists solely of publicly available parallel corpora.
|
41 |
+
|
42 |
+
### Citation Information
|
43 |
+
```
|
44 |
+
@inproceedings{kim-etal-2021-bisect,
|
45 |
+
title = "{B}i{SECT}: Learning to Split and Rephrase Sentences with Bitexts",
|
46 |
+
author = "Kim, Joongwon and
|
47 |
+
Maddela, Mounica and
|
48 |
+
Kriz, Reno and
|
49 |
+
Xu, Wei and
|
50 |
+
Callison-Burch, Chris",
|
51 |
+
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
|
52 |
+
month = nov,
|
53 |
+
year = "2021",
|
54 |
+
address = "Online and Punta Cana, Dominican Republic",
|
55 |
+
publisher = "Association for Computational Linguistics",
|
56 |
+
url = "https://aclanthology.org/2021.emnlp-main.500",
|
57 |
+
pages = "6193--6209"
|
58 |
+
}
|
59 |
+
```
|
60 |
+
This work also evaluates on the HSplit-Wiki evaluation set, first introduced in the papers below.
|
61 |
+
```
|
62 |
+
@article{Xu-EtAl:2016:TACL,
|
63 |
+
author = {Wei Xu and Courtney Napoles and Ellie Pavlick and Quanze Chen and Chris Callison-Burch},
|
64 |
+
title = {Optimizing Statistical Machine Translation for Text Simplification},
|
65 |
+
journal = {Transactions of the Association for Computational Linguistics},
|
66 |
+
volume = {4},
|
67 |
+
year = {2016},
|
68 |
+
pages = {401--415}
|
69 |
+
},
|
70 |
+
@inproceedings{sulem-etal-2018-bleu,
|
71 |
+
title = "{BLEU} is Not Suitable for the Evaluation of Text Simplification",
|
72 |
+
author = "Sulem, Elior and
|
73 |
+
Abend, Omri and
|
74 |
+
Rappoport, Ari",
|
75 |
+
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
|
76 |
+
month = oct # "-" # nov,
|
77 |
+
year = "2018",
|
78 |
+
address = "Brussels, Belgium",
|
79 |
+
publisher = "Association for Computational Linguistics",
|
80 |
+
url = "https://aclanthology.org/D18-1081",
|
81 |
+
doi = "10.18653/v1/D18-1081",
|
82 |
+
pages = "738--744"
|
83 |
+
}
|
84 |
+
```
|
85 |
+
|
86 |
+
### Leaderboard
|
87 |
+
There is currently no leaderboard for this task.
|
88 |
+
|
89 |
+
## Dataset Structure
|
90 |
+
|
91 |
+
### Data Instances
|
92 |
+
Example of an instance:
|
93 |
+
```
|
94 |
+
{
|
95 |
+
"gem_id": "bisect-train-0",
|
96 |
+
"source_sentence": "The report on the visit to Bhutan states that the small community has made the task of coordination less complex and success is manifested in the synchronized programming cycles which now apply to all but one of the agencies ( the World Health Organization ) .",
|
97 |
+
"target_sentence": "The report on the visit to Bhutan says that the small community has made the coordination work less complex . Success manifests itself in synchronized programming cycles that now apply to all but one organism ( the World Health Organization ) ."
|
98 |
+
}
|
99 |
+
```
|
100 |
+
|
101 |
+
### Data Fields
|
102 |
+
The fields are the same across all splits.
|
103 |
+
- `gem_id` - (string) a unique identifier for the instance
|
104 |
+
- `source_sentence` - (string) sentence to be simplified
|
105 |
+
- `target_sentence` - (string) simplified text that was split and rephrased
|
106 |
+
|
107 |
+
|
108 |
+
### Data Statistics
|
109 |
+
|dataset |train |validation|test|
|
110 |
+
|-------:|:-----:|:--------:|:--:|
|
111 |
+
|BiSECT |928440 | 9079|583 |
|
112 |
+
|HSplit |-- |-- |359 |
|
113 |
+
|Challenge Set|--|-- |1798|
|
114 |
+
|
115 |
+
## Dataset Creation
|
116 |
+
|
117 |
+
### Curation Rationale
|
118 |
+
|
119 |
+
**BiSECT** was constructed to satisfy the need of a Split and Rephrase corpus that is both large-scale and high-quality. Most previous Split and Rephrase corpora ([HSplit-Wiki](https://www.aclweb.org/anthology/D18-1081), [Cont-Benchmark](https://www.aclweb.org/anthology/2020.emnlp-main.91), and [Wiki-Benchmark](https://www.aclweb.org/anthology/2020.emnlp-main.91)) were manually written at a small scale and focused on evaluation, while the one corpus of comparable size, [WikiSplit](https://www.aclweb.org/anthology/D18-1080), contains around 25\% of pairs contain significant errors. This is because Wikipedia editors are not only trying to split a sentence, but also often simultaneously modifying the sentence for other purposes, which results in changes of the initial meaning.
|
120 |
+
|
121 |
+
### Communicative Goal
|
122 |
+
|
123 |
+
The goal of Split and Rephrase is to break down longer sentences into multiple shorter sentences, which has downstream applications for many NLP tasks, including machine translation and dependency parsing.
|
124 |
+
|
125 |
+
### Source Data
|
126 |
+
|
127 |
+
#### Initial Data Collection and Normalization
|
128 |
+
|
129 |
+
The construction of the **BiSECT** corpus relies on leveraging the sentence-level alignments from [OPUS](http://www.lrec-conf.org/proceedings/lrec2004/pdf/320.pdf)), a collection of bilingual parallel corpora over many language pairs. Given a target language *A*, this work extracts all 1-2 and 2-1 sentence alignments from parallel corpora between *A* and a set of foreign languages ***B***.
|
130 |
+
|
131 |
+
Next, the foreign sentences are translated into English using Google Translate's [Web API service](https://pypi.org/project/googletrans/) to obtain sentence alignments between a single long sentence $l$ and two corresponding split sentences $s= (s_1, s_2)$, both in the desired language.
|
132 |
+
|
133 |
+
To remove noise, the authors remove pairs where $l$ contains a token with a punctuation after the first two and before the last two alphabetic characters, as well as where $l$ contains more than one unconnected component in its dependency tree, generated via [SpaCy](https://spacy.io).
|
134 |
+
|
135 |
+
#### Who are the source language producers?
|
136 |
+
|
137 |
+
Opus corpora are from a variety of sources. The **BiSECT** training set contains pairs extracted from five datasets: *CCAligned*, parallel English-French documents from common crawl; *Europarl*, an English-French dataset from European Parliament; *10^9 FR-EN*, an English-French newswire corpus; *ParaCrawl*, a multilingual web crawl dataset; and *UN*, multilingual translated UN documents. The **BiSECT** test set contains pairs extracted from two additional datasets: *EMEA*, an English-French parallel corpus made out of PDF documents from the European Medicines Agency; and *JRC-Acquis*, a multilingual collection of European Union legislative text.
|
138 |
+
|
139 |
+
### Annotations
|
140 |
+
|
141 |
+
#### Annotation process
|
142 |
+
|
143 |
+
The training data was automatically extracted, so no annotators were needed. For the test set, the authors manually selected 583 high-quality sentence splits from 1000 random source-target pairs from the *EMEA* and *JRC-Acquis* corpora.
|
144 |
+
|
145 |
+
#### Who are the annotators?
|
146 |
+
|
147 |
+
None.
|
148 |
+
|
149 |
+
### Personal and Sensitive Information
|
150 |
+
|
151 |
+
Since this data is collected from [OPUS](http://www.lrec-conf.org/proceedings/lrec2004/pdf/320.pdf), all pairs are already in the public domain.
|
152 |
+
|
153 |
+
## Changes to the Original Dataset for GEM
|
154 |
+
|
155 |
+
The original **BiSECT** training, validation, and test splits are maintained to ensure a fair comparison. Note that the original **BiSECT** test set was created by manually selecting 583 high-quality Split and Rephrase instances from 1000 random source-target pairs sampled from the *EMEA* and *JRC-Acquis* corpora from [OPUS](http://www.lrec-conf.org/proceedings/lrec2004/pdf/320.pdf).
|
156 |
+
|
157 |
+
As the first challenge set, we include the *HSPLIT-Wiki* test set, containing 359 pairs. For each complex sentence, there are four reference splits; To ensure replicability, as reference splits, we again follow the BiSECT paper and present only the references from [HSplit2-full](https://github.com/eliorsulem/HSplit-corpus/blob/master/HSplit/HSplit2_full).
|
158 |
+
|
159 |
+
### Special Test Sets
|
160 |
+
|
161 |
+
In addition to the two evaluation sets used in the original **BiSECT** paper, we also introduce a second challenge set. For this, we initially consider all 7,293 pairs from the *EMEA* and *JRC-Acquis* corpora. From there, we classify each pair using the classification algorithm from Section 4.2 of the original **BiSECT** paper. The three classes are as follows:
|
162 |
+
|
163 |
+
1) **Direct Insertion**: when a long sentence *l* contains two independent clauses and requires only minor changes in order to make a fluent and meaning-preserving split *s*.
|
164 |
+
2) **Changes near Split**, when *l* contains one independent and one dependent clause, but modifications are restricted to the region where *l* is split.
|
165 |
+
3) **Changes across Sentences**, where major changes are required throughout *l* in order to create a fluent split *s*.
|
166 |
+
|
167 |
+
We keep only pairs labeled as Type 3, and after filtering out pairs with significant length differences (signaling potential content addition/deletion), we present a second challenge set of 1,798 pairs.
|
168 |
+
|
169 |
+
## Considerations for Using the Data
|
170 |
+
|
171 |
+
### Social Impact of the Dataset
|
172 |
+
Understanding long and complex sentences is challenging for both humans and NLP models. The **BiSECT** dataset helps facilitate more research on Split and Rephrase as a task within itself, as well as how it can benefit downstream NLP applications.
|
173 |
+
|
174 |
+
### Impact on Underserved Communities
|
175 |
+
The data as provided in GEMv2 is in English, which is a language with abundant existing resources. However, the original paper also provides Split and Rephrase pairs for French, Spanish, and German, while providing a framework for leveraging bilingual corpora from any language pair found within [OPUS](http://www.lrec-conf.org/proceedings/lrec2004/pdf/320.pdf).
|
176 |
+
|
177 |
+
### Discussion of Biases
|
178 |
+
|
179 |
+
The *Opus* corpora used are from a limited set of relatively formal domains, so it is possible that high performance on the BiSECT test set may not transfer to more informal text.
|
180 |
+
|
181 |
+
### Other Known Limitations
|
182 |
+
|
183 |
+
The creation of English **BiSECT** relies on translating non-English text back to English. While machine translation systems tend to perform well on high-resource languages, there is still a non-negligible chance that there these systems make errors; through a manual evaluation of a subset of **BiSECT**, it was found that 15% of pairs contained significant errors, while an additional 22% contained minor adequacy/fluency errors. This problem is exacerbated slightly when creating German **BiSECT** (22% significant errors, 24% minor errors), and these numbers would likely get larger if lower-resource languages were used.
|
184 |
+
|
185 |
+
## Getting started with in-depth research on the task
|
186 |
+
|
187 |
+
The dataset can be downloaded from the [original repository](https://github.com/mounicam/BiSECT) by the authors.
|
188 |
+
|
189 |
+
The [original **BiSECT** paper](https://aclanthology.org/2021.emnlp-main.500/) proposes several transformer-based models that can be used as baselines, which also compares against [Copy512](https://www.aclweb.org/anthology/P18-2114), an LSTM-based model and the previous state-of-the-art.
|
190 |
+
|
191 |
+
The common metric used for automatic evaluation of Split and Rephrase, and sentence simplification more generally is [SARI](https://www.aclweb.org/anthology/Q15-1021). The **BiSECT** paper also evaluates using [BERTScore](https://openreview.net/forum?id=SkeHuCVFDr). Note that automatic evaluations tend to not correlate well with human judgments, so a human evaluation for quality is generally expected for publication. The original **BiSECT** paper provides templates for collecting quality annotations from Amazon Mechanical Turk.
|
bisect.py
CHANGED
@@ -42,19 +42,39 @@ BiSECT is a Split and Rephrase corpus created via bilingual pivoting.
|
|
42 |
|
43 |
_HOMEPAGE = "https://github.com/mounicam/BiSECT"
|
44 |
|
45 |
-
_URL = "https://
|
|
|
|
|
|
|
46 |
|
47 |
_URLs = {
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
}
|
52 |
|
53 |
|
54 |
class BiSECT(datasets.GeneratorBasedBuilder):
|
55 |
"""The BiSECT Split and Rephrase corpus."""
|
56 |
|
57 |
-
VERSION = datasets.Version("1.
|
58 |
|
59 |
BUILDER_CONFIGS = [
|
60 |
datasets.BuilderConfig(
|
@@ -84,20 +104,12 @@ class BiSECT(datasets.GeneratorBasedBuilder):
|
|
84 |
def _split_generators(self, dl_manager):
|
85 |
"""Returns SplitGenerators."""
|
86 |
data_dir = dl_manager.download_and_extract(_URLs)
|
87 |
-
|
88 |
return [
|
89 |
datasets.SplitGenerator(
|
90 |
-
name=
|
91 |
-
gen_kwargs={"filepath": data_dir[
|
92 |
-
)
|
93 |
-
|
94 |
-
name=datasets.Split.TEST,
|
95 |
-
gen_kwargs={"filepath": data_dir["test"], "split": "test"},
|
96 |
-
),
|
97 |
-
datasets.SplitGenerator(
|
98 |
-
name=datasets.Split.VALIDATION,
|
99 |
-
gen_kwargs={"filepath": data_dir["validation"], "split": "validation"},
|
100 |
-
),
|
101 |
]
|
102 |
|
103 |
def _generate_examples(self, filepath, split):
|
@@ -107,9 +119,11 @@ class BiSECT(datasets.GeneratorBasedBuilder):
|
|
107 |
target_filepath = filepath["dst"]
|
108 |
|
109 |
with open(source_filepath, encoding="utf-8") as f:
|
110 |
-
source_lines = [line.strip() for line in f]
|
111 |
with open(target_filepath, encoding="utf-8") as f:
|
112 |
-
target_lines = [
|
|
|
|
|
113 |
|
114 |
for id_ in range(len(source_lines)):
|
115 |
yield id_, {
|
|
|
42 |
|
43 |
_HOMEPAGE = "https://github.com/mounicam/BiSECT"
|
44 |
|
45 |
+
_URL = "https://raw.githubusercontent.com/mounicam/BiSECT/main/"
|
46 |
+
_URL_MAIN = _URL + "bisect/"
|
47 |
+
_URL_CHALLENGE = _URL + "bisect_challenge/"
|
48 |
+
|
49 |
|
50 |
_URLs = {
|
51 |
+
datasets.Split.TRAIN: {
|
52 |
+
"src": _URL_MAIN + "train.src.gz",
|
53 |
+
"dst": _URL_MAIN + "train.dst.gz",
|
54 |
+
},
|
55 |
+
datasets.Split.VALIDATION: {
|
56 |
+
"src": _URL_MAIN + "valid.src.gz",
|
57 |
+
"dst": _URL_MAIN + "valid.dst.gz",
|
58 |
+
},
|
59 |
+
datasets.Split.TEST: {
|
60 |
+
"src": _URL_MAIN + "test.src.gz",
|
61 |
+
"dst": _URL_MAIN + "test.dst.gz",
|
62 |
+
},
|
63 |
+
"challenge_hsplit": {
|
64 |
+
"src": "https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/test.8turkers.tok.norm",
|
65 |
+
"dst": "https://raw.githubusercontent.com/eliorsulem/HSplit-corpus/master/HSplit/HSplit2_full",
|
66 |
+
},
|
67 |
+
"challenge_bisect": {
|
68 |
+
"src": _URL_CHALLENGE + "challenge_test.src",
|
69 |
+
"dst": _URL_CHALLENGE + "challenge_test.dst",
|
70 |
+
},
|
71 |
}
|
72 |
|
73 |
|
74 |
class BiSECT(datasets.GeneratorBasedBuilder):
|
75 |
"""The BiSECT Split and Rephrase corpus."""
|
76 |
|
77 |
+
VERSION = datasets.Version("1.1.0")
|
78 |
|
79 |
BUILDER_CONFIGS = [
|
80 |
datasets.BuilderConfig(
|
|
|
104 |
def _split_generators(self, dl_manager):
|
105 |
"""Returns SplitGenerators."""
|
106 |
data_dir = dl_manager.download_and_extract(_URLs)
|
|
|
107 |
return [
|
108 |
datasets.SplitGenerator(
|
109 |
+
name=split_name,
|
110 |
+
gen_kwargs={"filepath": data_dir[split_name], "split": split_name},
|
111 |
+
)
|
112 |
+
for split_name in data_dir
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
]
|
114 |
|
115 |
def _generate_examples(self, filepath, split):
|
|
|
119 |
target_filepath = filepath["dst"]
|
120 |
|
121 |
with open(source_filepath, encoding="utf-8") as f:
|
122 |
+
source_lines = [line.strip() for line in f if line.strip()]
|
123 |
with open(target_filepath, encoding="utf-8") as f:
|
124 |
+
target_lines = [
|
125 |
+
line.strip().replace(" <SEP>", "") for line in f if line.strip()
|
126 |
+
]
|
127 |
|
128 |
for id_ in range(len(source_lines)):
|
129 |
yield id_, {
|