subatomicseer commited on
Commit
f9bd15a
·
verified ·
1 Parent(s): 4e1527a

Create text_speech_codes_v2.py

Browse files
Files changed (1) hide show
  1. text_speech_codes_v2.py +98 -0
text_speech_codes_v2.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lint as: python3
2
+ """semantic and acoustic codes dataset with text.
3
+ """
4
+
5
+
6
+ import glob
7
+ import os
8
+
9
+ import datasets
10
+ import torch
11
+
12
+
13
+ class TextSpeechCodesDatasetConfig(datasets.BuilderConfig):
14
+ """BuilderConfig for Text-SpeechCodes dataset."""
15
+
16
+ def __init__(self, **kwargs):
17
+ super(TextSpeechCodesDatasetConfig, self).__init__(**kwargs)
18
+
19
+
20
+ class TextSpeechCodesDataset(datasets.GeneratorBasedBuilder):
21
+ """Codes dataset."""
22
+
23
+ BUILDER_CONFIGS = [
24
+ TextSpeechCodesDatasetConfig(name="all", description="TextSpeechCodes dataset"),
25
+ ]
26
+
27
+ @property
28
+ def manual_download_instructions(self):
29
+ return (
30
+ "Codes should be computed before using this dataset. "
31
+ "`datasets.load_dataset('/path/to/this/script', name=all, data_dir='path/to/folder/folder_name/of/codes')`"
32
+ )
33
+
34
+ def _info(self):
35
+ features = datasets.Features(
36
+ {
37
+ "id": datasets.Value("string"),
38
+ "length": datasets.Value("int32"),
39
+ "transcription": datasets.Value("string"),
40
+ "acoustic_tokens": datasets.Array2D(shape=(None, 12), dtype="int16"),
41
+ "semantic_tokens": datasets.Array2D(shape=(None, 1), dtype="int16"),
42
+ "transcription_bytes": datasets.Sequence(datasets.Value("uint8")),
43
+ }
44
+ )
45
+
46
+ return datasets.DatasetInfo(
47
+ features=features,
48
+ )
49
+
50
+ def _split_generators(self, dl_manager):
51
+ base_data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
52
+ if not os.path.exists(base_data_dir):
53
+ raise FileNotFoundError(
54
+ f"{base_data_dir} does not exist. Make sure you insert a manual dir via "
55
+ f"`datasets.load_dataset('/this/script', data_dir=...)` "
56
+ f"that includes code files .pt files "
57
+ f"dataset. Manual download instructions: {self.manual_download_instructions}"
58
+ )
59
+
60
+ train_data_dirs = glob.glob(os.path.join(base_data_dir, "**", "*.pt"), recursive=True)
61
+ print(f"Found {len(train_data_dirs)} files")
62
+
63
+ return [
64
+ datasets.SplitGenerator(
65
+ name=datasets.Split.TRAIN,
66
+ gen_kwargs={"data_dirs": train_data_dirs},
67
+ ),
68
+ ]
69
+
70
+ def _generate_examples(self, data_dirs):
71
+ for key, path in enumerate(data_dirs):
72
+ id_ = path.split("/")[-1].replace(".pt", "")
73
+
74
+ data = torch.load(path, map_location="cpu", weights_only=False)
75
+ for i, (k, v) in enumerate(data.items()):
76
+ acoustic_tokens = v["acoustic_codes"]
77
+ semantic_tokens = v["semantic_codes"]
78
+
79
+ if acoustic_tokens.ndim == 3:
80
+ acoustic_tokens = acoustic_tokens.squeeze(0).transpose(0, 1)
81
+ else:
82
+ acoustic_tokens = acoustic_tokens.transpose(0, 1)
83
+ if semantic_tokens.ndim == 2:
84
+ semantic_tokens = semantic_tokens.transpose(0, 1)
85
+ else:
86
+ semantic_tokens = semantic_tokens.unsqueeze(1)
87
+
88
+ transcription = v["transcription"]
89
+ transcription_bytes = list(transcription.encode("utf-8"))
90
+
91
+ yield f"{id_}_{i}", {
92
+ "id": str(k),
93
+ "length": semantic_tokens.shape[0] + len(transcription_bytes),
94
+ "transcription": transcription,
95
+ "transcription_bytes": transcription_bytes,
96
+ "acoustic_tokens": acoustic_tokens,
97
+ "semantic_tokens": semantic_tokens,
98
+ }