import json import os import datasets _OPEN_SLU_CITATION = """\ xxx""" _OPEN_SLU_DESCRIPTION = """\ xxx""" _ATIS_DESCRIPTION = """\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } Note that each SuperGLUE dataset has its own citation. Please see the source to get the correct citation for each contained dataset. """ _BOOLQ_CITATION = """\ @inproceedings{clark2019boolq, title={BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions}, author={Clark, Christopher and Lee, Kenton and Chang, Ming-Wei, and Kwiatkowski, Tom and Collins, Michael, and Toutanova, Kristina}, booktitle={NAACL}, year={2019} }""" class OpenSLUConfig(datasets.BuilderConfig): """BuilderConfig for OpenSLU.""" def __init__(self, features, data_url, citation, url, intent_label_classes=None, slot_label_classes=None, **kwargs): """BuilderConfig for OpenSLU. Args: features: `list[string]`, list of the features that will appear in the feature dict. Should not include "label". data_url: `string`, url to download the zip file from. citation: `string`, citation for the data set. url: `string`, url for information about the data set. intent_label_classes: `list[string]`, the list of classes for the intent label slot_label_classes: `list[string]`, the list of classes for the slot label **kwargs: keyword arguments forwarded to super. """ # Version history: # 0.0.1: Initial version. super(OpenSLUConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs) self.features = features self.intent_label_classes = intent_label_classes self.slot_label_classes = slot_label_classes self.data_url = data_url self.citation = citation self.url = url class OpenSLU(datasets.GeneratorBasedBuilder): """The SuperGLUE benchmark.""" BUILDER_CONFIGS = [ OpenSLUConfig( name="atis", description=_ATIS_DESCRIPTION, features=["text"], data_url="https://huggingface.co/datasets/LightChen2333/OpenSLU/resolve/main/atis.tar.gz", citation="", url="", ), OpenSLUConfig( name="snips", description=_ATIS_DESCRIPTION, features=["text"], data_url="https://huggingface.co/datasets/LightChen2333/OpenSLU/resolve/main/snips.tar.gz", citation="", url="", ), OpenSLUConfig( name="mix-atis", description=_ATIS_DESCRIPTION, features=["text"], data_url="https://huggingface.co/datasets/LightChen2333/OpenSLU/resolve/main/mix-atis.tar.gz", citation="", url="", ), OpenSLUConfig( name="mix-snips", description=_ATIS_DESCRIPTION, features=["text"], data_url="https://huggingface.co/datasets/LightChen2333/OpenSLU/resolve/main/mix-snips.tar.gz", citation="", url="", ), ] def _info(self): features = {feature: datasets.Sequence(datasets.Value("string")) for feature in self.config.features} features["slot"] = datasets.Sequence(datasets.Value("string")) features["intent"] = datasets.Value("string") return datasets.DatasetInfo( description=_OPEN_SLU_DESCRIPTION + self.config.description, features=datasets.Features(features), homepage=self.config.url, citation=self.config.citation + "\n" + _OPEN_SLU_CITATION, ) def _split_generators(self, dl_manager): print(self.config.data_url) dl_dir = dl_manager.download_and_extract(self.config.data_url) or "" task_name = _get_task_name_from_data_url(self.config.data_url) print(dl_dir) print(task_name) dl_dir = os.path.join(dl_dir, task_name) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "data_file": os.path.join(dl_dir, "train.jsonl"), "split": datasets.Split.TRAIN, }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "data_file": os.path.join(dl_dir, "dev.jsonl"), "split": datasets.Split.VALIDATION, }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "data_file": os.path.join(dl_dir, "test.jsonl"), "split": datasets.Split.TEST, }, ), ] def _generate_examples(self, data_file, split): with open(data_file, encoding="utf-8") as f: for index, line in enumerate(f): row = json.loads(line) yield index, row def _cast_label(label): """Converts the label into the appropriate string version.""" if isinstance(label, str): return label elif isinstance(label, bool): return "True" if label else "False" elif isinstance(label, int): assert label in (0, 1) return str(label) else: raise ValueError("Invalid label format.") def _get_record_entities(passage): """Returns the unique set of entities.""" text = passage["text"] entity_spans = list() for entity in passage["entities"]: entity_text = text[entity["start"]: entity["end"] + 1] entity_spans.append({"text": entity_text, "start": entity["start"], "end": entity["end"] + 1}) entity_spans = sorted(entity_spans, key=lambda e: e["start"]) # sort by start index entity_texts = set(e["text"] for e in entity_spans) # for backward compatability return entity_texts, entity_spans def _get_record_answers(qa): """Returns the unique set of answers.""" if "answers" not in qa: return [] answers = set() for answer in qa["answers"]: answers.add(answer["text"]) return sorted(answers) def _get_task_name_from_data_url(data_url): return data_url.split("/")[-1].split(".")[0]