|
"""The Cornell eRulemaking Corpus (CDCP) dataset for English Argumentation Mining.""" |
|
import glob |
|
import json |
|
from os.path import abspath, isdir |
|
from pathlib import Path |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{niculae-etal-2017-argument, |
|
title = "Argument Mining with Structured {SVM}s and {RNN}s", |
|
author = "Niculae, Vlad and |
|
Park, Joonsuk and |
|
Cardie, Claire", |
|
booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", |
|
month = jul, |
|
year = "2017", |
|
address = "Vancouver, Canada", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/P17-1091", |
|
doi = "10.18653/v1/P17-1091", |
|
pages = "985--995", |
|
abstract = "We propose a novel factor graph model for argument mining, designed for settings in which the argumentative relations in a document do not necessarily form a tree structure. (This is the case in over 20{\\%} of the web comments dataset we release.) Our model jointly learns elementary unit type classification and argumentative relation prediction. Moreover, our model supports SVM and RNN parametrizations, can enforce structure constraints (e.g., transitivity), and can express dependencies between adjacent relations and propositions. Our approaches outperform unstructured baselines in both web comments and argumentative essay datasets.", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = "The CDCP dataset for English Argumentation Mining" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_URL = "https://facultystaff.richmond.edu/~jpark/data/cdcp_acl17.zip" |
|
|
|
_VERSION = datasets.Version("1.0.0") |
|
|
|
_SPAN_CLASS_LABELS = ["fact", "policy", "reference", "testimony", "value"] |
|
_RELATION_CLASS_LABELS = ["evidence", "reason"] |
|
|
|
|
|
class CDCP(datasets.GeneratorBasedBuilder): |
|
"""CDCP is a argumentation mining dataset.""" |
|
|
|
BUILDER_CONFIGS = [datasets.BuilderConfig(name="default")] |
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"propositions": datasets.Sequence( |
|
{ |
|
"start": datasets.Value("int32"), |
|
"end": datasets.Value("int32"), |
|
"label": datasets.ClassLabel(names=_SPAN_CLASS_LABELS), |
|
|
|
"url": datasets.Value("string"), |
|
} |
|
), |
|
"relations": datasets.Sequence( |
|
{ |
|
"head": datasets.Value("int32"), |
|
"tail": datasets.Value("int32"), |
|
"label": datasets.ClassLabel(names=_RELATION_CLASS_LABELS), |
|
} |
|
), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
if dl_manager.manual_dir is not None: |
|
base_path = abspath(dl_manager.manual_dir) |
|
if not isdir(base_path): |
|
base_path = dl_manager.extract(base_path) |
|
else: |
|
base_path = dl_manager.download_and_extract(_URL) |
|
base_path = Path(base_path) / "cdcp" |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"path": base_path / "train"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"path": base_path / "test"} |
|
), |
|
] |
|
|
|
def _generate_examples(self, path): |
|
"""Yields examples.""" |
|
|
|
|
|
|
|
|
|
_id = 0 |
|
text_file_names = sorted(glob.glob(f"{path}/*.txt")) |
|
for text_file_name in text_file_names: |
|
txt_fn = Path(text_file_name) |
|
ex_id = txt_fn.stem |
|
if ex_id == "00411": |
|
continue |
|
ann_fn = txt_fn.with_suffix(".ann.json") |
|
with open(txt_fn, encoding="utf-8") as f: |
|
text = f.read() |
|
with open(ann_fn, encoding="utf-8") as f: |
|
annotations = json.load(f) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
propositions = [ |
|
{ |
|
"start": start, |
|
"end": end, |
|
"label": label, |
|
"url": annotations["url"].get(str(idx), ""), |
|
} |
|
for idx, ((start, end), label) in enumerate( |
|
zip(annotations["prop_offsets"], annotations["prop_labels"]) |
|
) |
|
] |
|
relations = [] |
|
for (tail_first_idx, tail_last_idx), head_idx in annotations["evidences"]: |
|
for tail_idx in range(tail_first_idx, tail_last_idx + 1): |
|
relations.append({"head": head_idx, "tail": tail_idx, "label": "evidence"}) |
|
for (tail_first_idx, tail_last_idx), head_idx in annotations["reasons"]: |
|
for tail_idx in range(tail_first_idx, tail_last_idx + 1): |
|
relations.append({"head": head_idx, "tail": tail_idx, "label": "reason"}) |
|
yield _id, { |
|
"id": ex_id, |
|
"text": text, |
|
"propositions": propositions, |
|
"relations": relations, |
|
} |
|
_id += 1 |
|
|