Datasets:
rcds
/

ArXiv:
Tags:
legal
License:
MultiLegalNeg / MultiLegalNeg.py
ramonachristen's picture
Update MultiLegalNeg.py
26e5e47
raw
history blame
4.63 kB
import json
import datasets
import pandas as pd
from huggingface_hub.file_download import hf_hub_url
try:
import lzma as xz
except ImportError:
import pylzma as xz
datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION ="""\
"""
_HOMEPAGE = ""
_LICENSE = ""
_CITATION = ""
_URL = {
'data/'
}
_LANGUAGES = [
"de", "fr", "it", "swiss", "en"
]
class MultiLegalNegConfig(datasets.BuilderConfig):
def __init__(self, name:str, **kwargs):
super( MultiLegalNegConfig, self).__init__(**kwargs)
self.name = name
self.language = name.split("_")[0]
class MultiLegalNeg(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = MultiLegalNegConfig
BUILDER_CONFIGS = [
MultiLegalNegConfig(f"{language}")
for language in _LANGUAGES + ['all']
]
DEFAULT_CONFIG_NAME = 'all_all'
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"spans": [
{
"start": datasets.Value("int64"),
"end": datasets.Value("int64"),
"token_start": datasets.Value("int64"),
"token_end": datasets.Value("int64"),
"label": datasets.Value("string")
}
],
"tokens": [
{
"text": datasets.Value("string"),
"start": datasets.Value("int64"),
"end": datasets.Value("int64"),
"id": datasets.Value("int64"),
"ws": datasets.Value("bool")
}
]
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features = features,
homepage = _HOMEPAGE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
data_files = {
"train": [
"data/train/it_train.jsonl.xz",
"data/train/fr_train.jsonl.xz",
"data/train/de_train.jsonl.xz",
"data/train/swiss_train.jsonl.xz",
"data/train/en_sherlock_train.jsonl.xz",
"data/train/en_sfu_train.jsonl.xz",
"data/train/en_bioscope_train.jsonl.xz"
],
"test": [
"data/test/it_test.jsonl.xz",
"data/test/fr_test.jsonl.xz",
"data/test/de_test.jsonl.xz",
"data/test/swiss_test.jsonl.xz",
"data/test/en_sherlock_test.jsonl.xz",
"data/test/en_sfu_test.jsonl.xz",
"data/test/en_bioscope_test.jsonl.xz"
],
"validation": [
"data/validation/it_validation.jsonl.xz",
"data/validation/fr_validation.jsonl.xz",
"data/validation/de_validation.jsonl.xz",
"data/validation/swiss_validation.jsonl.xz",
"data/validation/en_sherlock_validation.jsonl.xz",
"data/validation/en_sfu_validation.jsonl.xz",
"data/validation/en_bioscope_validation.jsonl.xz"
]
}
split_generators = []
for split in data_files.keys():
filepaths = []
for file_name in data_files[split]:
try:
filepaths.append(dl_manager.download((f'{file_name}')))
except:
break
split_generators.append(datasets.SplitGenerator(name=split, gen_kwargs={'filepaths': filepaths}))
return split_generators
def _generate_examples(self, data):
id_ = 0
for filepath in filepaths:
if filepath:
logger.info("Generating examples from = %s", filepath)
try:
with xz.open(open(filepath,'rb'), 'rt', encoding='utf-8') as f:
json_list = list(f)
for json_str in json_list:
example = json.loads(json_str)
if example is not None and isinstance(example, dict):
yield id_, example
id_ +=1
except Exception:
logger.exception("Error while processing file %s", filepath)