Sudnya commited on
Commit
6b56185
·
verified ·
1 Parent(s): c2c9957

Upload bird_sql_database.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. bird_sql_database.py +84 -0
bird_sql_database.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import pandas as pd
3
+ import json
4
+
5
+ _DESCRIPTION = """
6
+ BIRD SQL Dataset with complete database content.
7
+ Total rows: 366,787,649
8
+ Total chunks: 3660
9
+ """
10
+
11
+ class BirdSQLDatabase(datasets.GeneratorBasedBuilder):
12
+ VERSION = datasets.Version("1.0.0")
13
+
14
+ BUILDER_CONFIGS = [
15
+ datasets.BuilderConfig(name="database_content", description="Complete database content"),
16
+ datasets.BuilderConfig(name="table_metadata", description="Table metadata"),
17
+ ]
18
+
19
+ DEFAULT_CONFIG_NAME = "database_content"
20
+
21
+ def _info(self):
22
+ if self.config.name == "database_content":
23
+ features = datasets.Features({
24
+ "db_id": datasets.Value("string"),
25
+ "table_name": datasets.Value("string"),
26
+ "row_index": datasets.Value("int64"),
27
+ "row_data": datasets.Value("string"),
28
+ "split": datasets.Value("string")
29
+ })
30
+ else: # table_metadata
31
+ features = datasets.Features({
32
+ "db_id": datasets.Value("string"),
33
+ "table_name": datasets.Value("string"),
34
+ "columns": datasets.Value("string"),
35
+ "column_types": datasets.Value("string"),
36
+ "primary_keys": datasets.Value("string"),
37
+ "total_rows": datasets.Value("int64"),
38
+ "split": datasets.Value("string")
39
+ })
40
+
41
+ return datasets.DatasetInfo(description=_DESCRIPTION, features=features)
42
+
43
+ def _split_generators(self, dl_manager):
44
+ if self.config.name == "database_content":
45
+ manifest_file = dl_manager.download("database_content_manifest.json")
46
+ with open(manifest_file) as f:
47
+ manifest = json.load(f)
48
+
49
+ chunk_files = []
50
+ for chunk_name in manifest["files"]:
51
+ chunk_file = dl_manager.download(chunk_name)
52
+ chunk_files.append(chunk_file)
53
+
54
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"chunk_files": chunk_files})]
55
+ else:
56
+ metadata_file = dl_manager.download("table_metadata.parquet")
57
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": metadata_file})]
58
+
59
+ def _generate_examples(self, filepath=None, chunk_files=None):
60
+ if self.config.name == "database_content":
61
+ idx = 0
62
+ for chunk_file in chunk_files:
63
+ df = pd.read_parquet(chunk_file)
64
+ for _, row in df.iterrows():
65
+ yield idx, {
66
+ "db_id": row["db_id"],
67
+ "table_name": row["table_name"],
68
+ "row_index": row["row_index"],
69
+ "row_data": row["row_data"],
70
+ "split": row["split"]
71
+ }
72
+ idx += 1
73
+ else:
74
+ df = pd.read_parquet(filepath)
75
+ for idx, row in df.iterrows():
76
+ yield idx, {
77
+ "db_id": row["db_id"],
78
+ "table_name": row["table_name"],
79
+ "columns": row["columns"],
80
+ "column_types": row["column_types"],
81
+ "primary_keys": row["primary_keys"],
82
+ "total_rows": row["total_rows"],
83
+ "split": row["split"]
84
+ }