galtimur's picture
Update dataset/dataset.py
63f941f verified
import json
import os
import datasets
# Define the different configurations (languages)
_LANGUAGES = ["java", "kt", "py"]
class BugLocalizationConfig(datasets.BuilderConfig):
"""BuilderConfig for BugLocalization dataset"""
def __init__(self, **kwargs):
"""BuilderConfig for BugLocalization.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(BugLocalizationConfig, self).__init__(**kwargs)
class BugLocalization(datasets.GeneratorBasedBuilder):
"""BugLocalization dataset"""
BUILDER_CONFIGS = [
BugLocalizationConfig(name=lang, description=f"BugLocalization dataset for {lang}.")
for lang in _LANGUAGES
]
def _info(self):
sample_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), f"{self.config.name}.jsonl")
# Infer schema from a sample
schema = self._infer_schema(sample_file)
return datasets.DatasetInfo(
features=datasets.Features(schema),
)
def _infer_schema(self, sample_file, num_samples=100):
"""Infer schema from a sample of JSONL data."""
schema = {}
with open(sample_file, encoding="utf-8") as f:
for line in f:
data = json.loads(line)
for key, value in data.items():
if key not in schema:
schema[key] = datasets.Value('string') # Assuming all values are strings, tweak as needed
return schema
def _split_generators(self, dl_manager):
# Path where the dataset files are located
data_dir = os.path.abspath(os.path.dirname(__file__))
# Define path to each of the JSONL files
paths = {
"python": os.path.join(data_dir, "python.jsonl"),
"java": os.path.join(data_dir, "java.jsonl"),
"kt": os.path.join(data_dir, "kotlin.jsonl"),
}
# Return the split with the appropriate file path
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": paths[self.config.name]
},
)
]
def _generate_examples(self, filepath):
"""Generate examples from a JSONL file."""
with open(filepath, encoding="utf-8") as f:
for id_, line in enumerate(f):
data = json.loads(line)
yield id_, data