File size: 7,487 Bytes
1015ee3 3cfad37 1015ee3 3cfad37 1015ee3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
import os
from typing import Dict
import pandas as pd
from pie_modules.document.processing import RegexPartitioner
from pytorch_ie.annotations import BinaryRelation
from pytorch_ie.documents import (
TextDocumentWithLabeledSpansAndBinaryRelations,
TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions,
)
from pie_datasets.builders import BratBuilder
from pie_datasets.builders.brat import BratConfig, BratDocumentWithMergedSpans
from pie_datasets.core.dataset import DocumentConvertersType
from pie_datasets.document.processing import Caster, Converter, Pipeline
def get_split_paths(url_split_ids: str, subdirectory: str) -> Dict[str, str]:
df_splits = pd.read_csv(url_split_ids, sep=";")
splits2ids = df_splits.groupby(df_splits["SET"]).agg(list).to_dict()["ID"]
return {
split.lower(): [os.path.join(subdirectory, split_id) for split_id in split_ids]
for split, split_ids in splits2ids.items()
}
URL = "https://github.com/ArneBinder/pie-datasets/raw/83fb46f904b13f335b6da3cce2fc7004d802ce4e/data/datasets/ArgumentAnnotatedEssays-2.0/brat-project-final.zip"
URL_SPLIT_IDS = "https://raw.githubusercontent.com/ArneBinder/pie-datasets/83fb46f904b13f335b6da3cce2fc7004d802ce4e/data/datasets/ArgumentAnnotatedEssays-2.0/train-test-split.csv"
SPLIT_PATHS = get_split_paths(URL_SPLIT_IDS, subdirectory="brat-project-final")
DEFAULT_ATTRIBUTIONS_TO_RELATIONS_DICT = {"For": "supports", "Against": "attacks"}
def convert_aae2_claim_attributions_to_relations(
document: BratDocumentWithMergedSpans,
method: str,
attributions_to_relations_mapping: Dict[str, str] = DEFAULT_ATTRIBUTIONS_TO_RELATIONS_DICT,
major_claim_label: str = "MajorClaim",
claim_label: str = "Claim",
semantically_same_label: str = "semantically_same",
) -> TextDocumentWithLabeledSpansAndBinaryRelations:
"""This function collects the attributions of Claims from BratDocumentWithMergedSpans, and
build new relations between MajorClaims and Claims based on these attributions in the following
way:
1) "connect_first":
Each Claim points to the first MajorClaim,
and the other MajorClaim(s) is labeled as semantically same as the first MajorClaim.
The number of new relations created are: NoOfMajorClaim - 1 + NoOfClaim.
2) "connect_all":
Each Claim points to every MajorClaim; creating many-to-many relations.
The number of new relations created are: NoOfMajorClaim x NoOfClaim.
The attributions are transformed into the relation labels as listed in
DEFAULT_ATTRIBUTIONS_TO_RELATIONS_DICT dictionary.
"""
document = document.copy()
new_document = TextDocumentWithLabeledSpansAndBinaryRelations(
text=document.text, id=document.id, metadata=document.metadata
)
# import from document
spans = document.spans.clear()
new_document.labeled_spans.extend(spans)
relations = document.relations.clear()
new_document.binary_relations.extend(relations)
claim_attributes = [
attribute
for attribute in document.span_attributes
if attribute.annotation.label == claim_label
]
# get all MajorClaims
# sorted by start position to ensure the first MajorClaim is really the first one that occurs in the text
major_claims = sorted(
[mc for mc in new_document.labeled_spans if mc.label == major_claim_label],
key=lambda span: span.start,
)
if method == "connect_first":
if len(major_claims) > 0:
first_major_claim = major_claims.pop(0)
# Add relation between Claims and first MajorClaim
for claim_attribute in claim_attributes:
new_relation = BinaryRelation(
head=claim_attribute.annotation,
tail=first_major_claim,
label=attributions_to_relations_mapping[claim_attribute.value],
)
new_document.binary_relations.append(new_relation)
# Add relations between MajorClaims
for majorclaim in major_claims:
new_relation = BinaryRelation(
head=majorclaim,
tail=first_major_claim,
label=semantically_same_label,
)
new_document.binary_relations.append(new_relation)
elif method == "connect_all":
for major_claim in major_claims:
for claim_attribute in claim_attributes:
new_relation = BinaryRelation(
head=claim_attribute.annotation,
tail=major_claim,
label=attributions_to_relations_mapping[claim_attribute.value],
)
new_document.binary_relations.append(new_relation)
else:
raise ValueError(f"unknown method: {method}")
return new_document
def get_common_pipeline_steps(conversion_method: str) -> dict:
return dict(
convert=Converter(
function=convert_aae2_claim_attributions_to_relations,
method=conversion_method,
),
)
class ArgumentAnnotatedEssaysV2Config(BratConfig):
def __init__(self, conversion_method: str, **kwargs):
"""BuilderConfig for ArgumentAnnotatedEssaysV2.
Args:
conversion_method: either "connect_first" or "connect_all", see convert_aae2_claim_attributions_to_relations
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(merge_fragmented_spans=True, **kwargs)
self.conversion_method = conversion_method
class ArgumentAnnotatedEssaysV2(BratBuilder):
BUILDER_CONFIG_CLASS = ArgumentAnnotatedEssaysV2Config
BASE_DATASET_PATH = "DFKI-SLT/brat"
BASE_DATASET_REVISION = "bb8c37d84ddf2da1e691d226c55fef48fd8149b5"
# we need to add None to the list of dataset variants to support the default dataset variant
BASE_BUILDER_KWARGS_DICT = {
dataset_variant: {"url": URL, "split_paths": SPLIT_PATHS}
for dataset_variant in [BratBuilder.DEFAULT_CONFIG_NAME, None]
}
BUILDER_CONFIGS = [
ArgumentAnnotatedEssaysV2Config(
name=BratBuilder.DEFAULT_CONFIG_NAME,
conversion_method="connect_first",
),
]
DOCUMENT_TYPES = {
BratBuilder.DEFAULT_CONFIG_NAME: BratDocumentWithMergedSpans,
}
@property
def document_converters(self) -> DocumentConvertersType:
if self.config.name == "default" or None:
return {
TextDocumentWithLabeledSpansAndBinaryRelations: Pipeline(
**get_common_pipeline_steps(conversion_method=self.config.conversion_method)
),
TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions: Pipeline(
**get_common_pipeline_steps(conversion_method=self.config.conversion_method),
cast=Caster(
document_type=TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions
),
add_partitions=RegexPartitioner(
partition_layer_name="labeled_partitions",
default_partition_label="paragraph",
pattern="\n",
strip_whitespace=True,
verbose=False,
),
),
}
else:
raise ValueError(f"Unknown dataset variant: {self.config.name}")
|