File size: 3,402 Bytes
6b56185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import datasets
import pandas as pd
import json

_DESCRIPTION = """
BIRD SQL Dataset with complete database content.
Total rows: 366,787,649
Total chunks: 3660
"""

class BirdSQLDatabase(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")
    
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="database_content", description="Complete database content"),
        datasets.BuilderConfig(name="table_metadata", description="Table metadata"),
    ]
    
    DEFAULT_CONFIG_NAME = "database_content"
    
    def _info(self):
        if self.config.name == "database_content":
            features = datasets.Features({
                "db_id": datasets.Value("string"),
                "table_name": datasets.Value("string"), 
                "row_index": datasets.Value("int64"),
                "row_data": datasets.Value("string"),
                "split": datasets.Value("string")
            })
        else:  # table_metadata
            features = datasets.Features({
                "db_id": datasets.Value("string"),
                "table_name": datasets.Value("string"),
                "columns": datasets.Value("string"),
                "column_types": datasets.Value("string"),
                "primary_keys": datasets.Value("string"),
                "total_rows": datasets.Value("int64"),
                "split": datasets.Value("string")
            })
        
        return datasets.DatasetInfo(description=_DESCRIPTION, features=features)
    
    def _split_generators(self, dl_manager):
        if self.config.name == "database_content":
            manifest_file = dl_manager.download("database_content_manifest.json")
            with open(manifest_file) as f:
                manifest = json.load(f)
            
            chunk_files = []
            for chunk_name in manifest["files"]:
                chunk_file = dl_manager.download(chunk_name)
                chunk_files.append(chunk_file)
            
            return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"chunk_files": chunk_files})]
        else:
            metadata_file = dl_manager.download("table_metadata.parquet")
            return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": metadata_file})]
    
    def _generate_examples(self, filepath=None, chunk_files=None):
        if self.config.name == "database_content":
            idx = 0
            for chunk_file in chunk_files:
                df = pd.read_parquet(chunk_file)
                for _, row in df.iterrows():
                    yield idx, {
                        "db_id": row["db_id"],
                        "table_name": row["table_name"],
                        "row_index": row["row_index"],
                        "row_data": row["row_data"],
                        "split": row["split"]
                    }
                    idx += 1
        else:
            df = pd.read_parquet(filepath)
            for idx, row in df.iterrows():
                yield idx, {
                    "db_id": row["db_id"],
                    "table_name": row["table_name"],
                    "columns": row["columns"],
                    "column_types": row["column_types"],
                    "primary_keys": row["primary_keys"],
                    "total_rows": row["total_rows"],
                    "split": row["split"]
                }