Josh Cole
commited on
Commit
·
ed3e9f5
1
Parent(s):
6f319f3
tryign this new thing
Browse files- Generator.ipynb +76 -9
- test/dataset.arrow +3 -0
- test/dataset_info.json +35 -0
- test/state.json +14 -0
- training/dataset.arrow +3 -0
- training/dataset_info.json +35 -0
- training/state.json +14 -0
Generator.ipynb
CHANGED
@@ -2,14 +2,14 @@
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
-
"execution_count":
|
6 |
"id": "bbd1b7a1-dbb7-4243-99e0-70a6cd47d573",
|
7 |
"metadata": {},
|
8 |
"outputs": [
|
9 |
{
|
10 |
"data": {
|
11 |
"application/vnd.jupyter.widget-view+json": {
|
12 |
-
"model_id": "
|
13 |
"version_major": 2,
|
14 |
"version_minor": 0
|
15 |
},
|
@@ -28,7 +28,7 @@
|
|
28 |
},
|
29 |
{
|
30 |
"cell_type": "code",
|
31 |
-
"execution_count":
|
32 |
"id": "306958c8-4603-4b9b-b941-6a824777164d",
|
33 |
"metadata": {},
|
34 |
"outputs": [],
|
@@ -42,7 +42,7 @@
|
|
42 |
},
|
43 |
{
|
44 |
"cell_type": "code",
|
45 |
-
"execution_count":
|
46 |
"id": "4ac69d3b-38c6-49af-aefe-63755bf3f0e9",
|
47 |
"metadata": {},
|
48 |
"outputs": [],
|
@@ -59,7 +59,7 @@
|
|
59 |
},
|
60 |
{
|
61 |
"cell_type": "code",
|
62 |
-
"execution_count":
|
63 |
"id": "9192b631-388f-4306-b975-9ba770b9dc4d",
|
64 |
"metadata": {},
|
65 |
"outputs": [],
|
@@ -73,17 +73,84 @@
|
|
73 |
"tbl = table.InMemoryTable(\n",
|
74 |
" pa.Table.from_pandas(df)\n",
|
75 |
")\n",
|
76 |
-
"ds = Dataset(tbl)"
|
77 |
]
|
78 |
},
|
79 |
{
|
80 |
"cell_type": "code",
|
81 |
-
"execution_count":
|
82 |
-
"id": "
|
83 |
"metadata": {},
|
84 |
"outputs": [],
|
85 |
"source": [
|
86 |
-
"ds.save_to_disk(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
]
|
88 |
},
|
89 |
{
|
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
+
"execution_count": 2,
|
6 |
"id": "bbd1b7a1-dbb7-4243-99e0-70a6cd47d573",
|
7 |
"metadata": {},
|
8 |
"outputs": [
|
9 |
{
|
10 |
"data": {
|
11 |
"application/vnd.jupyter.widget-view+json": {
|
12 |
+
"model_id": "bcc2f5482d8342a7915cecf9e7855531",
|
13 |
"version_major": 2,
|
14 |
"version_minor": 0
|
15 |
},
|
|
|
28 |
},
|
29 |
{
|
30 |
"cell_type": "code",
|
31 |
+
"execution_count": 13,
|
32 |
"id": "306958c8-4603-4b9b-b941-6a824777164d",
|
33 |
"metadata": {},
|
34 |
"outputs": [],
|
|
|
42 |
},
|
43 |
{
|
44 |
"cell_type": "code",
|
45 |
+
"execution_count": 14,
|
46 |
"id": "4ac69d3b-38c6-49af-aefe-63755bf3f0e9",
|
47 |
"metadata": {},
|
48 |
"outputs": [],
|
|
|
59 |
},
|
60 |
{
|
61 |
"cell_type": "code",
|
62 |
+
"execution_count": 15,
|
63 |
"id": "9192b631-388f-4306-b975-9ba770b9dc4d",
|
64 |
"metadata": {},
|
65 |
"outputs": [],
|
|
|
73 |
"tbl = table.InMemoryTable(\n",
|
74 |
" pa.Table.from_pandas(df)\n",
|
75 |
")\n",
|
76 |
+
"ds = Dataset(tbl, split=[\"test\", \"training\"])"
|
77 |
]
|
78 |
},
|
79 |
{
|
80 |
"cell_type": "code",
|
81 |
+
"execution_count": 16,
|
82 |
+
"id": "f37d68ea-cbe7-4dd1-8215-f9449fe047f4",
|
83 |
"metadata": {},
|
84 |
"outputs": [],
|
85 |
"source": [
|
86 |
+
"ds.save_to_disk(\"test/\")\n",
|
87 |
+
"ds.save_to_disk(\"training/\")"
|
88 |
+
]
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"cell_type": "code",
|
92 |
+
"execution_count": 12,
|
93 |
+
"id": "bac1a601-a7a1-434e-917d-0e372684f56b",
|
94 |
+
"metadata": {},
|
95 |
+
"outputs": [
|
96 |
+
{
|
97 |
+
"name": "stderr",
|
98 |
+
"output_type": "stream",
|
99 |
+
"text": [
|
100 |
+
"Resuming upload of the dataset shards.\n"
|
101 |
+
]
|
102 |
+
},
|
103 |
+
{
|
104 |
+
"data": {
|
105 |
+
"application/vnd.jupyter.widget-view+json": {
|
106 |
+
"model_id": "61cfa14ccb514ff4961072752bc3d4da",
|
107 |
+
"version_major": 2,
|
108 |
+
"version_minor": 0
|
109 |
+
},
|
110 |
+
"text/plain": [
|
111 |
+
"Pushing dataset shards to the dataset hub: 0%| | 0/1 [00:00<?, ?it/s]"
|
112 |
+
]
|
113 |
+
},
|
114 |
+
"metadata": {},
|
115 |
+
"output_type": "display_data"
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"data": {
|
119 |
+
"application/vnd.jupyter.widget-view+json": {
|
120 |
+
"model_id": "5803c7d37ce1426794af8ad65f618275",
|
121 |
+
"version_major": 2,
|
122 |
+
"version_minor": 0
|
123 |
+
},
|
124 |
+
"text/plain": [
|
125 |
+
"Downloading metadata: 0%| | 0.00/1.20k [00:00<?, ?B/s]"
|
126 |
+
]
|
127 |
+
},
|
128 |
+
"metadata": {},
|
129 |
+
"output_type": "display_data"
|
130 |
+
},
|
131 |
+
{
|
132 |
+
"name": "stderr",
|
133 |
+
"output_type": "stream",
|
134 |
+
"text": [
|
135 |
+
"Updating downloaded metadata with the new split.\n"
|
136 |
+
]
|
137 |
+
},
|
138 |
+
{
|
139 |
+
"ename": "ValueError",
|
140 |
+
"evalue": "Split ['test', 'training'] already present",
|
141 |
+
"output_type": "error",
|
142 |
+
"traceback": [
|
143 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
144 |
+
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
|
145 |
+
"Input \u001b[0;32mIn [12]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mds\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpush_to_hub\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43msharpcoder/bjorn_training\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
146 |
+
"File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:4342\u001b[0m, in \u001b[0;36mDataset.push_to_hub\u001b[0;34m(self, repo_id, split, private, token, branch, max_shard_size, shard_size, embed_external_files)\u001b[0m\n\u001b[1;32m 4340\u001b[0m repo_info\u001b[38;5;241m.\u001b[39mdataset_size \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m dataset_nbytes\n\u001b[1;32m 4341\u001b[0m repo_info\u001b[38;5;241m.\u001b[39msize_in_bytes \u001b[38;5;241m=\u001b[39m repo_info\u001b[38;5;241m.\u001b[39mdownload_size \u001b[38;5;241m+\u001b[39m repo_info\u001b[38;5;241m.\u001b[39mdataset_size\n\u001b[0;32m-> 4342\u001b[0m repo_info\u001b[38;5;241m.\u001b[39msplits[split] \u001b[38;5;241m=\u001b[39m SplitInfo(\n\u001b[1;32m 4343\u001b[0m split, num_bytes\u001b[38;5;241m=\u001b[39mdataset_nbytes, num_examples\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m), dataset_name\u001b[38;5;241m=\u001b[39mdataset_name\n\u001b[1;32m 4344\u001b[0m )\n\u001b[1;32m 4345\u001b[0m info_to_dump \u001b[38;5;241m=\u001b[39m repo_info\n\u001b[1;32m 4346\u001b[0m buffer \u001b[38;5;241m=\u001b[39m BytesIO()\n",
|
147 |
+
"File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/splits.py:523\u001b[0m, in \u001b[0;36mSplitDict.__setitem__\u001b[0;34m(self, key, value)\u001b[0m\n\u001b[1;32m 521\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot add elem. (key mismatch: \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mkey\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m != \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mvalue\u001b[38;5;241m.\u001b[39mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m)\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 522\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m key \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m:\n\u001b[0;32m--> 523\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSplit \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mkey\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m already present\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 524\u001b[0m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__setitem__\u001b[39m(key, value)\n",
|
148 |
+
"\u001b[0;31mValueError\u001b[0m: Split ['test', 'training'] already present"
|
149 |
+
]
|
150 |
+
}
|
151 |
+
],
|
152 |
+
"source": [
|
153 |
+
"# ds.push_to_hub(\"sharpcoder/bjorn_training\")"
|
154 |
]
|
155 |
},
|
156 |
{
|
test/dataset.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c4f04ab2df5a7149d88530b5620ac60fc1a7f12e48e51b2abcfe5e8528d808b5
|
3 |
+
size 102032
|
test/dataset_info.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"builder_name": null,
|
3 |
+
"citation": "",
|
4 |
+
"config_name": null,
|
5 |
+
"dataset_size": null,
|
6 |
+
"description": "",
|
7 |
+
"download_checksums": null,
|
8 |
+
"download_size": null,
|
9 |
+
"features": {
|
10 |
+
"audio": {
|
11 |
+
"feature": {
|
12 |
+
"dtype": "float32",
|
13 |
+
"id": null,
|
14 |
+
"_type": "Value"
|
15 |
+
},
|
16 |
+
"length": -1,
|
17 |
+
"id": null,
|
18 |
+
"_type": "Sequence"
|
19 |
+
},
|
20 |
+
"text": {
|
21 |
+
"dtype": "string",
|
22 |
+
"id": null,
|
23 |
+
"_type": "Value"
|
24 |
+
}
|
25 |
+
},
|
26 |
+
"homepage": "",
|
27 |
+
"license": "",
|
28 |
+
"post_processed": null,
|
29 |
+
"post_processing_size": null,
|
30 |
+
"size_in_bytes": null,
|
31 |
+
"splits": null,
|
32 |
+
"supervised_keys": null,
|
33 |
+
"task_templates": null,
|
34 |
+
"version": null
|
35 |
+
}
|
test/state.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_data_files": [
|
3 |
+
{
|
4 |
+
"filename": "dataset.arrow"
|
5 |
+
}
|
6 |
+
],
|
7 |
+
"_fingerprint": "34ff31483e0b21bc",
|
8 |
+
"_format_columns": null,
|
9 |
+
"_format_kwargs": {},
|
10 |
+
"_format_type": null,
|
11 |
+
"_indexes": {},
|
12 |
+
"_output_all_columns": false,
|
13 |
+
"_split": "['test', 'training']"
|
14 |
+
}
|
training/dataset.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c4f04ab2df5a7149d88530b5620ac60fc1a7f12e48e51b2abcfe5e8528d808b5
|
3 |
+
size 102032
|
training/dataset_info.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"builder_name": null,
|
3 |
+
"citation": "",
|
4 |
+
"config_name": null,
|
5 |
+
"dataset_size": null,
|
6 |
+
"description": "",
|
7 |
+
"download_checksums": null,
|
8 |
+
"download_size": null,
|
9 |
+
"features": {
|
10 |
+
"audio": {
|
11 |
+
"feature": {
|
12 |
+
"dtype": "float32",
|
13 |
+
"id": null,
|
14 |
+
"_type": "Value"
|
15 |
+
},
|
16 |
+
"length": -1,
|
17 |
+
"id": null,
|
18 |
+
"_type": "Sequence"
|
19 |
+
},
|
20 |
+
"text": {
|
21 |
+
"dtype": "string",
|
22 |
+
"id": null,
|
23 |
+
"_type": "Value"
|
24 |
+
}
|
25 |
+
},
|
26 |
+
"homepage": "",
|
27 |
+
"license": "",
|
28 |
+
"post_processed": null,
|
29 |
+
"post_processing_size": null,
|
30 |
+
"size_in_bytes": null,
|
31 |
+
"splits": null,
|
32 |
+
"supervised_keys": null,
|
33 |
+
"task_templates": null,
|
34 |
+
"version": null
|
35 |
+
}
|
training/state.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_data_files": [
|
3 |
+
{
|
4 |
+
"filename": "dataset.arrow"
|
5 |
+
}
|
6 |
+
],
|
7 |
+
"_fingerprint": "34ff31483e0b21bc",
|
8 |
+
"_format_columns": null,
|
9 |
+
"_format_kwargs": {},
|
10 |
+
"_format_type": null,
|
11 |
+
"_indexes": {},
|
12 |
+
"_output_all_columns": false,
|
13 |
+
"_split": "['test', 'training']"
|
14 |
+
}
|