"""TODO: Add a description here.""" import json import datasets # TODO: Add BibTeX citation _CITATION = """\ """ # TODO: Add description of the dataset here _DESCRIPTION = """\ """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" _URL = "https://huggingface.co/datasets/alexjercan/bugnet/resolve/main/" def _mk_urls(language): return { "train": _URL + language + "_train.jsonl", "validation": _URL + language + "_validation.jsonl", "test": _URL + language + "_test.jsonl", "descriptions": _URL + "problem_descriptions.json", } class Bugnet(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("1.4.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="Python", version=VERSION, description="This part of bugnet contains Python bugs"), datasets.BuilderConfig(name="C++", version=VERSION, description="This part of bugnet contains C++ bugs"), ] DEFAULT_CONFIG_NAME = "Python" def _info(self): features = datasets.Features( { "problem_id": datasets.Value("string"), "original_status": datasets.Value("string"), "original_src": datasets.Value("string"), "changed_src": datasets.Value("string"), "change": datasets.Value("string"), "i1": datasets.Value("uint32"), "i2": datasets.Value("uint32"), "j1": datasets.Value("uint32"), "j2": datasets.Value("uint32"), "error": datasets.Value("string"), "stderr": datasets.Value("string"), "description": datasets.Value("string"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): urls = _mk_urls(self.config.name) data_dir = dl_manager.download_and_extract(urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": data_dir["train"], "descriptions": data_dir["descriptions"], }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": data_dir["validation"], "descriptions": data_dir["descriptions"], }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": data_dir["test"], "descriptions": data_dir["descriptions"], }, ), ] def _generate_examples(self, filepath, descriptions): with open(descriptions, encoding="utf-8") as file: data = json.load(file) descriptions = {} for item in data: key = item["problem_id"] value = item["description"] descriptions[key] = value with open(filepath, encoding="utf-8") as file: for key, row in enumerate(file): data = json.loads(row) yield key, { "problem_id": data["problem_id"], "original_status": data["original_status"], "original_src": data["original_src"], "changed_src": data["changed_src"], "change": data["change"], "i1": data["i1"], "i2": data["i2"], "j1": data["j1"], "j2": data["j2"], "error": data["error"], "stderr": data["stderr"], "description": descriptions[data["problem_id"]], }