|
import datasets |
|
import pandas as pd |
|
import json |
|
|
|
_DESCRIPTION = """ |
|
BIRD SQL Dataset with complete database content. |
|
Total rows: 366,787,649 |
|
Total chunks: 3660 |
|
""" |
|
|
|
class BirdSQLDatabase(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="database_content", description="Complete database content"), |
|
datasets.BuilderConfig(name="table_metadata", description="Table metadata"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "database_content" |
|
|
|
def _info(self): |
|
if self.config.name == "database_content": |
|
features = datasets.Features({ |
|
"db_id": datasets.Value("string"), |
|
"table_name": datasets.Value("string"), |
|
"row_index": datasets.Value("int64"), |
|
"row_data": datasets.Value("string"), |
|
"split": datasets.Value("string") |
|
}) |
|
else: |
|
features = datasets.Features({ |
|
"db_id": datasets.Value("string"), |
|
"table_name": datasets.Value("string"), |
|
"columns": datasets.Value("string"), |
|
"column_types": datasets.Value("string"), |
|
"primary_keys": datasets.Value("string"), |
|
"total_rows": datasets.Value("int64"), |
|
"split": datasets.Value("string") |
|
}) |
|
|
|
return datasets.DatasetInfo(description=_DESCRIPTION, features=features) |
|
|
|
def _split_generators(self, dl_manager): |
|
if self.config.name == "database_content": |
|
manifest_file = dl_manager.download("database_content_manifest.json") |
|
with open(manifest_file) as f: |
|
manifest = json.load(f) |
|
|
|
chunk_files = [] |
|
for chunk_name in manifest["files"]: |
|
chunk_file = dl_manager.download(chunk_name) |
|
chunk_files.append(chunk_file) |
|
|
|
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"chunk_files": chunk_files})] |
|
else: |
|
metadata_file = dl_manager.download("table_metadata.parquet") |
|
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": metadata_file})] |
|
|
|
def _generate_examples(self, filepath=None, chunk_files=None): |
|
if self.config.name == "database_content": |
|
idx = 0 |
|
for chunk_file in chunk_files: |
|
df = pd.read_parquet(chunk_file) |
|
for _, row in df.iterrows(): |
|
yield idx, { |
|
"db_id": row["db_id"], |
|
"table_name": row["table_name"], |
|
"row_index": row["row_index"], |
|
"row_data": row["row_data"], |
|
"split": row["split"] |
|
} |
|
idx += 1 |
|
else: |
|
df = pd.read_parquet(filepath) |
|
for idx, row in df.iterrows(): |
|
yield idx, { |
|
"db_id": row["db_id"], |
|
"table_name": row["table_name"], |
|
"columns": row["columns"], |
|
"column_types": row["column_types"], |
|
"primary_keys": row["primary_keys"], |
|
"total_rows": row["total_rows"], |
|
"split": row["split"] |
|
} |
|
|