import json

import datasets
import pandas as pd
from huggingface_hub.file_download import hf_hub_url
from collections import OrderedDict

try:
    import lzma as xz
except ImportError:
    import pylzma as xz

datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)

_DESCRIPTION ="""\

    """

_HOMEPAGE =  ""

_LICENSE = ""

_CITATION = ""

_URL = {
    'data/'
}
_LANGUAGES = [
    "de", "fr", "it", "swiss", "en"
]
_SUBSETS = [
    "_sherlock", "_sfu", "_bioscope", ""
]

_BUILDS = ['de', 'fr', 'it', 'swiss', 'en_bioscope', 'en_sherlock', 'en_sfu', 'en_all', 'all_all']



class MultiLegalNegConfig(datasets.BuilderConfig):
    
    def __init__(self, name:str, **kwargs):
        super( MultiLegalNegConfig, self).__init__(**kwargs)
        self.name = name
        self.language = name.split("_")[0]
        self.subset = f'_{name.split("_")[1]}' if len(name.split("_"))==2 else ""

class MultiLegalNeg(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIG_CLASS = MultiLegalNegConfig
    
    BUILDER_CONFIGS = [
    MultiLegalNegConfig(f"{build}") for build in _BUILDS
    ]

    def _info(self):
        features = datasets.Features(
            {   
                "text": datasets.Value("string"),        
                "spans": [
                    {
                        "start": datasets.Value("int64"),
                        "end": datasets.Value("int64"),
                        "token_start": datasets.Value("int64"),
                        "token_end": datasets.Value("int64"),
                        "label": datasets.Value("string")                     
                    }
                ],
                "tokens": [
                    {
                        "text": datasets.Value("string"),
                        "start": datasets.Value("int64"),
                        "end": datasets.Value("int64"),
                        "id": datasets.Value("int64"),
                        "ws": datasets.Value("bool")
                    }
                ]
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features = features,
            homepage = _HOMEPAGE,
            citation=_CITATION
        )
    
    def _split_generators(self, dl_manager):
        languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
        subsets = _SUBSETS if self.config.subset == "_all" else [self.config.subset]

        split_generators = []
        for split in [datasets.Split.TRAIN, datasets.Split.TEST, datasets.Split.TRAIN, datasets.Split.VALIDATION]:
            filepaths = []
            for language in languages:
                for subset in subsets:
                    try: 
                        filepaths.append(dl_manager.download((f'data/{split}/{language}{subset}_{split}.jsonl.xz')))
                    except:
                        break 
            split_generators.append(datasets.SplitGenerator(name=split, gen_kwargs={'filepaths': filepaths}))

        return split_generators

    def _generate_examples(self, filepaths):
        id_ = 0
        for filepath in filepaths:
            if filepath:
                logger.info("Generating examples from = %s", filepath)
                try:
                    with xz.open(open(filepath,'rb'), 'rt', encoding='utf-8') as f:
                        json_list = list(f)
                    
                    for json_str in json_list:
                        example = json.loads(json_str)
                        if example is not None and isinstance(example, dict):
                            yield id_, example
                            id_ +=1

                except Exception:
                    logger.exception("Error while processing file %s", filepath)