|
"""TODO: Add a description here.""" |
|
|
|
|
|
import json |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
_URL = "https://huggingface.co/datasets/alexjercan/bugnet/resolve/main/" |
|
|
|
|
|
def _mk_urls(language): |
|
return { |
|
"train": _URL + language + "_train.jsonl", |
|
"validation": _URL + language + "_validation.jsonl", |
|
"test": _URL + language + "_test.jsonl", |
|
"descriptions": _URL + "problem_descriptions.json", |
|
"tests": _URL + "problem_tests.json", |
|
} |
|
|
|
|
|
class Bugnet(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("3.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="Python", version=VERSION, description="This part of bugnet contains Python bugs"), |
|
datasets.BuilderConfig(name="C++", version=VERSION, description="This part of bugnet contains C++ bugs"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "Python" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"problem_id": datasets.Value("string"), |
|
"language": datasets.Value("string"), |
|
"original_status": datasets.Value("string"), |
|
"fail": datasets.Value("string"), |
|
"pass": datasets.Value("string"), |
|
"change": datasets.Value("string"), |
|
"i1": datasets.Value("uint32"), |
|
"i2": datasets.Value("uint32"), |
|
"j1": datasets.Value("uint32"), |
|
"j2": datasets.Value("uint32"), |
|
"error": datasets.Value("string"), |
|
"stderr": datasets.Value("string"), |
|
"stdout": datasets.Value("string"), |
|
"description": datasets.Value("string"), |
|
"input": datasets.Value("string"), |
|
"output": datasets.Value("string"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _mk_urls(self.config.name) |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": data_dir["train"], |
|
"descriptions": data_dir["descriptions"], |
|
"tests": data_dir["tests"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": data_dir["validation"], |
|
"descriptions": data_dir["descriptions"], |
|
"tests": data_dir["tests"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": data_dir["test"], |
|
"descriptions": data_dir["descriptions"], |
|
"tests": data_dir["tests"], |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, descriptions, tests): |
|
with open(descriptions, encoding="utf-8") as file: |
|
description_data = json.load(file) |
|
|
|
descriptions = {} |
|
for item in description_data: |
|
key = item["problem_id"] |
|
value = item["description"] |
|
descriptions[key] = value |
|
|
|
with open(tests, encoding="utf-8") as file: |
|
tests_data = json.load(file) |
|
|
|
inputs = {} |
|
outputs = {} |
|
for item in tests_data: |
|
key = item["problem_id"] |
|
inputs[key] = item["input"] |
|
outputs[key] = item["output"] |
|
|
|
with open(filepath, encoding="utf-8") as file: |
|
for key, row in enumerate(file): |
|
data = json.loads(row) |
|
|
|
yield key, { |
|
"problem_id": data["problem_id"], |
|
"language": data["language"], |
|
"original_status": data["original_status"], |
|
"fail": data["fail"], |
|
"pass": data["pass"], |
|
"change": data["change"], |
|
"i1": data["i1"], |
|
"i2": data["i2"], |
|
"j1": data["j1"], |
|
"j2": data["j2"], |
|
"error": data["error"], |
|
"stderr": data["stderr"], |
|
"stdout": data["stdout"], |
|
"description": descriptions[data["problem_id"]], |
|
"input": inputs[data["problem_id"]], |
|
"output": outputs[data["problem_id"]], |
|
} |
|
|