Datasets:

Languages:
Thai
ArXiv:
License:
thai_databricks_dolly / thai_databricks_dolly.py
holylovenia's picture
Upload thai_databricks_dolly.py with huggingface_hub
f3b1af6 verified
from pathlib import Path
from typing import Dict, List, Tuple
import datasets
import pandas as pd
from datasets.download.download_manager import DownloadManager
from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import Licenses, Tasks
# No paper citation found.
_CITATION = ""
_LOCAL = False
_LANGUAGES = ["tha"]
_DATASETNAME = "thai_databricks_dolly"
_DESCRIPTION = """\
This is a Thai-instructed dataset translated from databricks-dolly-15k using
Google Cloud Translation. databricks-dolly-15k is an open-source dataset of
instruction-following records generated by thousands of Databricks employees in
several behavioral categories outlined in the InstructGPT paper, including
brainstorming, classification, closed QA, generation, information extraction,
open QA, and summarization.
"""
_HOMEPAGE = "https://huggingface.co/datasets/Thaweewat/databricks-dolly-15k-th"
_LICENSE = Licenses.CC_BY_SA_3_0.value
_URL = "https://huggingface.co/datasets/Thaweewat/databricks-dolly-15k-th/resolve/main/databricks-dolly-15k-th.parquet"
_SUPPORTED_TASKS = [Tasks.INSTRUCTION_TUNING]
_SOURCE_VERSION = "1.0.0"
_SEACROWD_VERSION = "2024.06.20"
class ThaiDatabricksDollyDataset(datasets.GeneratorBasedBuilder):
"""Thai Databricks Dolly Dataset"""
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
SEACROWD_SCHEMA_NAME = "t2t"
BUILDER_CONFIGS = [
SEACrowdConfig(
name=f"{_DATASETNAME}_source",
version=SOURCE_VERSION,
description=f"{_DATASETNAME} source schema",
schema="source",
subset_id=_DATASETNAME,
),
SEACrowdConfig(
name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
version=SEACROWD_VERSION,
description=f"{_DATASETNAME} SEACrowd schema",
schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
subset_id=_DATASETNAME,
),
]
DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
def _info(self) -> datasets.DatasetInfo:
if self.config.schema == "source":
features = datasets.Features(
{
"instruction": datasets.Value("string"),
"context": datasets.Value("string"),
"response": datasets.Value("string"),
"category": datasets.Value("string"),
}
)
elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
features = schemas.text2text_features
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
"""Returns SplitGenerators."""
data_file = Path(dl_manager.download_and_extract(_URL))
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_file})]
def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
"""Yield examples as (key, example) tuples"""
# pyarrow is an implicit dependency to load the parquet files
df = pd.read_parquet(filepath, engine="pyarrow")
for idx, row in df.iterrows():
instruction = row.get("instruction").strip()
context = row.get("context").strip()
response = row.get("response").strip()
category = row.get("category").strip()
if self.config.schema == "source":
example = {
"instruction": instruction,
"context": context,
"response": response,
"category": category,
}
elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
text_1 = f"Context: {context}\n\n{instruction}" if context else instruction
text_2 = response
example = {
"id": str(idx),
"text_1": text_1,
"text_2": text_2,
"text_1_name": "context_and_instruction",
"text_2_name": "response",
}
yield idx, example