Datasets:
Upload dataset
Browse files- README.md +34 -38
- data/OneStory/train/train-00000-of-00001.parquet +3 -0
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
language:
|
3 |
-
|
4 |
license: cc-by-4.0
|
5 |
dataset_info:
|
6 |
features:
|
@@ -13,28 +13,18 @@ dataset_info:
|
|
13 |
- name: end
|
14 |
dtype: float64
|
15 |
- name: start
|
16 |
-
dtype:
|
17 |
- name: transcript
|
18 |
dtype: string
|
19 |
- name: transcript
|
20 |
dtype: string
|
21 |
-
configs:
|
22 |
-
- config_name: default
|
23 |
-
default: True
|
24 |
-
data_files:
|
25 |
-
- split: train
|
26 |
-
path: data/*/train/train-*
|
27 |
-
- split: test
|
28 |
-
path: data/*/test/test-*
|
29 |
splits:
|
30 |
- name: train
|
31 |
-
num_bytes:
|
32 |
-
num_examples:
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
download_size: 8450773572
|
37 |
-
dataset_size: 8894461272.875
|
38 |
- config_name: AmenyKH
|
39 |
data_files:
|
40 |
- split: train
|
@@ -80,16 +70,6 @@ configs:
|
|
80 |
num_examples: 3
|
81 |
download_size: 550807181
|
82 |
dataset_size: 559811332.0
|
83 |
-
- config_name: Tunisian_dataset_STT-TTS15s_filtred1.0
|
84 |
-
data_files:
|
85 |
-
- split: train
|
86 |
-
path: data/Tunisian_dataset_STT-TTS15s_filtred1.0/train/train-*
|
87 |
-
splits:
|
88 |
-
- name: train
|
89 |
-
num_bytes: 1334823662.375
|
90 |
-
num_examples: 1029
|
91 |
-
download_size: 1192677319
|
92 |
-
dataset_size: 1334823662.375
|
93 |
- config_name: TunSwitchCS
|
94 |
data_files:
|
95 |
- split: train
|
@@ -118,6 +98,16 @@ configs:
|
|
118 |
num_examples: 344
|
119 |
download_size: 1001771927
|
120 |
dataset_size: 1307984322.625
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
- config_name: Wav2Vec-tunisian-Darja
|
122 |
data_files:
|
123 |
- split: train
|
@@ -188,16 +178,6 @@ configs:
|
|
188 |
num_examples: 53
|
189 |
download_size: 289952550
|
190 |
dataset_size: 291350485.0
|
191 |
-
- config_name: Youtube_TN_Shorts
|
192 |
-
data_files:
|
193 |
-
- split: train
|
194 |
-
path: data/Youtube_TN_Shorts/train/train-*
|
195 |
-
splits:
|
196 |
-
- name: train
|
197 |
-
num_bytes: 435317340.0
|
198 |
-
num_examples: 135
|
199 |
-
download_size: 433946877
|
200 |
-
dataset_size: 435317340.0
|
201 |
- config_name: Youtube_TNScrapped_V1
|
202 |
data_files:
|
203 |
- split: train
|
@@ -213,6 +193,16 @@ configs:
|
|
213 |
num_examples: 5
|
214 |
download_size: 509649276
|
215 |
dataset_size: 517133844
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
216 |
- config_name: Youtube_TV
|
217 |
data_files:
|
218 |
- split: train
|
@@ -223,5 +213,11 @@ configs:
|
|
223 |
num_examples: 4
|
224 |
download_size: 69480960
|
225 |
dataset_size: 70380078.0
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
version: 1.0
|
227 |
-
---
|
|
|
1 |
---
|
2 |
language:
|
3 |
+
- ar
|
4 |
license: cc-by-4.0
|
5 |
dataset_info:
|
6 |
features:
|
|
|
13 |
- name: end
|
14 |
dtype: float64
|
15 |
- name: start
|
16 |
+
dtype: float64
|
17 |
- name: transcript
|
18 |
dtype: string
|
19 |
- name: transcript
|
20 |
dtype: string
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
splits:
|
22 |
- name: train
|
23 |
+
num_bytes: 65241850.0
|
24 |
+
num_examples: 36
|
25 |
+
download_size: 59587235
|
26 |
+
dataset_size: 65241850.0
|
27 |
+
configs:
|
|
|
|
|
28 |
- config_name: AmenyKH
|
29 |
data_files:
|
30 |
- split: train
|
|
|
70 |
num_examples: 3
|
71 |
download_size: 550807181
|
72 |
dataset_size: 559811332.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
- config_name: TunSwitchCS
|
74 |
data_files:
|
75 |
- split: train
|
|
|
98 |
num_examples: 344
|
99 |
download_size: 1001771927
|
100 |
dataset_size: 1307984322.625
|
101 |
+
- config_name: Tunisian_dataset_STT-TTS15s_filtred1.0
|
102 |
+
data_files:
|
103 |
+
- split: train
|
104 |
+
path: data/Tunisian_dataset_STT-TTS15s_filtred1.0/train/train-*
|
105 |
+
splits:
|
106 |
+
- name: train
|
107 |
+
num_bytes: 1334823662.375
|
108 |
+
num_examples: 1029
|
109 |
+
download_size: 1192677319
|
110 |
+
dataset_size: 1334823662.375
|
111 |
- config_name: Wav2Vec-tunisian-Darja
|
112 |
data_files:
|
113 |
- split: train
|
|
|
178 |
num_examples: 53
|
179 |
download_size: 289952550
|
180 |
dataset_size: 291350485.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
- config_name: Youtube_TNScrapped_V1
|
182 |
data_files:
|
183 |
- split: train
|
|
|
193 |
num_examples: 5
|
194 |
download_size: 509649276
|
195 |
dataset_size: 517133844
|
196 |
+
- config_name: Youtube_TN_Shorts
|
197 |
+
data_files:
|
198 |
+
- split: train
|
199 |
+
path: data/Youtube_TN_Shorts/train/train-*
|
200 |
+
splits:
|
201 |
+
- name: train
|
202 |
+
num_bytes: 435317340.0
|
203 |
+
num_examples: 135
|
204 |
+
download_size: 433946877
|
205 |
+
dataset_size: 435317340.0
|
206 |
- config_name: Youtube_TV
|
207 |
data_files:
|
208 |
- split: train
|
|
|
213 |
num_examples: 4
|
214 |
download_size: 69480960
|
215 |
dataset_size: 70380078.0
|
216 |
+
- config_name: default
|
217 |
+
data_files:
|
218 |
+
- split: train
|
219 |
+
path: data/OneStory/train/train-*
|
220 |
+
- split: test
|
221 |
+
path: data/*/test/test-*
|
222 |
version: 1.0
|
223 |
+
---
|
data/OneStory/train/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d5fa25573d764c882a561772a6d4f8d179e0e00041f26439074ae9368545af2c
|
3 |
+
size 59587235
|