Datasets:
rcds
/

MultiLegalSBD / Multilingual-SBD.py
tobiasbrugger
update
581a7d6
raw
history blame
4.55 kB
import datasets
from huggingface_hub.file_download import hf_hub_url
import glob
import json
import pandas as pd
try:
import lzma as xz
except ImportError:
import pylzma as xz
datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION ="""\
"""
_HOMEPAGE = ""
_LICENSE = ""
_CITATION = ""
_URL = {
'data/'
}
_LANGUAGES = [
"fr","it","es","en","de","pt"
]
_TYPES = [
"laws", "judgements"
]
_SOURCES = [
"MultiLegalPile", "Wipolex", "Jug", "BVA", "CC", "IP", "SCOTUS", "SwissJudgementPrediction"
"Gesetz", "Constitution", "CivilCode", "CriminalCode",
]
"""
see https://huggingface.co/datasets/joelito/MultiLegalPile_Wikipedia_Filtered/blob/main/MultiLegalPile_Wikipedia_Filtered.py
"""
_HIGHEST_NUMBER_OF_SHARDS = 4
class MultilingualSBDConfig(datasets.BuilderConfig):
def __init__(self, name:str, **kwargs):
super( MultilingualSBDConfig, self).__init__(**kwargs)
self.name = name
self.language = name.split("_")[0]
self.type = name.split("_")[1]
#self.source = name.split("_")[2]
class MultilingualSBD(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = MultilingualSBDConfig
BUILDER_CONFIGS = [
MultilingualSBDConfig(f"{language}_{type}")
for language in _LANGUAGES + ['all']
for type in _TYPES + ["all"]
]
DEFAULT_CONFIG_NAME = 'all_all'
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"spans": [
{
"start": datasets.Value("int64"),
"end": datasets.Value("int64"),
"label": datasets.Value("string"),
"token_start": datasets.Value("int64"),
"token_end": datasets.Value("int64")
}
],
"tokens": [
{
"text": datasets.Value("string"),
"start": datasets.Value("int64"),
"end": datasets.Value("int64"),
"id": datasets.Value("int64"),
"ws": datasets.Value("bool")
}
],
"source": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features = features,
homepage = _HOMEPAGE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
def download_url(filename):
url = hf_hub_url(
repo_id="tbrugger/Multilingual-SBD",
filename = f'data/{filename}.jsonl.xz',
repo_type='dataset'
)
return dl_manager.download(url)
languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
types = _TYPES if self.config.type == 'all' else [self.config.type]
split_generators = []
for split in [datasets.Split.TRAIN]:
filepaths = []
for language in languages:
for type in types:
for shard in range(_HIGHEST_NUMBER_OF_SHARDS):
try:
filepaths.append(download_url(f'{language}_{type}_{shard}'))
except:
break
split_generators.append(
datasets.SplitGenerator(name=split, gen_kwargs={'filepaths': filepaths})
)
return split_generators
def _generate_examples(self,filepaths):
id_ = 0
for filepath in filepaths:
if filepath:
logger.info("Generating examples from = %s", filepath)
try:
with xz.open(open(filepath,'rb'), 'rt', encoding='utf-8') as f:
json_list = list(f)
for json_str in json_list:
example = json.loads(json_str)
if id_ == 0:
print(example)
if example is not None and isinstance(example, dict):
yield id_, example
id_ +=1
except Exception:
logger.exception("Error while processing file %s", filepath)