akshansh36 commited on
Commit
215fc5b
·
verified ·
1 Parent(s): 0a4efd4

Upload 16 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.index filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,14 @@
1
- ---
2
- title: Vodex AI
3
- emoji: 🏃
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 4.41.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
+ ---
2
+ title: RVC⚡ZERO
3
+ emoji:
4
+ colorFrom: gray
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 4.28.3
8
+ app_file: app.py
9
+ license: mit
10
+ pinned: true
11
+ short_description: Voice conversion framework based on VITS
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import spaces
4
+ from infer_rvc_python import BaseLoader
5
+ import random
6
+ import logging
7
+ import time
8
+ import soundfile as sf
9
+ from infer_rvc_python.main import download_manager
10
+ import zipfile
11
+ import edge_tts
12
+ import asyncio
13
+ import librosa
14
+ import traceback
15
+ import soundfile as sf
16
+ from pedalboard import Pedalboard, Reverb, Compressor, HighpassFilter
17
+ from pedalboard.io import AudioFile
18
+ from pydub import AudioSegment
19
+ import noisereduce as nr
20
+ import numpy as np
21
+ import urllib.request
22
+ import shutil
23
+ import threading
24
+
25
+ logging.getLogger("infer_rvc_python").setLevel(logging.ERROR)
26
+
27
+ converter = BaseLoader(only_cpu=False, hubert_path=None, rmvpe_path=None)
28
+
29
+ title = "<center><strong><font size='7'>Vodex AI</font></strong></center>"
30
+ theme = "aliabid94/new-theme"
31
+
32
+ def find_files(directory):
33
+ file_paths = []
34
+ for filename in os.listdir(directory):
35
+ if filename.endswith('.pth') or filename.endswith('.zip') or filename.endswith('.index'):
36
+ file_paths.append(os.path.join(directory, filename))
37
+ return file_paths
38
+
39
+ def unzip_in_folder(my_zip, my_dir):
40
+ with zipfile.ZipFile(my_zip) as zip:
41
+ for zip_info in zip.infolist():
42
+ if zip_info.is_dir():
43
+ continue
44
+ zip_info.filename = os.path.basename(zip_info.filename)
45
+ zip.extract(zip_info, my_dir)
46
+
47
+ def find_my_model(a_, b_):
48
+ if a_ is None or a_.endswith(".pth"):
49
+ return a_, b_
50
+
51
+ txt_files = []
52
+ for base_file in [a_, b_]:
53
+ if base_file is not None and base_file.endswith(".txt"):
54
+ txt_files.append(base_file)
55
+
56
+ directory = os.path.dirname(a_)
57
+
58
+ for txt in txt_files:
59
+ with open(txt, 'r') as file:
60
+ first_line = file.readline()
61
+
62
+ download_manager(
63
+ url=first_line.strip(),
64
+ path=directory,
65
+ extension="",
66
+ )
67
+
68
+ for f in find_files(directory):
69
+ if f.endswith(".zip"):
70
+ unzip_in_folder(f, directory)
71
+
72
+ model = None
73
+ index = None
74
+ end_files = find_files(directory)
75
+
76
+ for ff in end_files:
77
+ if ff.endswith(".pth"):
78
+ model = os.path.join(directory, ff)
79
+ gr.Info(f"Model found: {ff}")
80
+ if ff.endswith(".index"):
81
+ index = os.path.join(directory, ff)
82
+ gr.Info(f"Index found: {ff}")
83
+
84
+ if not model:
85
+ gr.Error(f"Model not found in: {end_files}")
86
+
87
+ if not index:
88
+ gr.Warning("Index not found")
89
+
90
+ return model, index
91
+
92
+ def get_file_size(url):
93
+ if "huggingface" not in url:
94
+ raise ValueError("Only downloads from Hugging Face are allowed")
95
+
96
+ try:
97
+ with urllib.request.urlopen(url) as response:
98
+ info = response.info()
99
+ content_length = info.get("Content-Length")
100
+
101
+ file_size = int(content_length)
102
+ if file_size > 500000000:
103
+ raise ValueError("The file is too large. You can only download files up to 500 MB in size.")
104
+
105
+ except Exception as e:
106
+ raise e
107
+
108
+ def clear_files(directory):
109
+ time.sleep(15)
110
+ print(f"Clearing files: {directory}.")
111
+ shutil.rmtree(directory)
112
+
113
+ def get_my_model(url_data):
114
+ if not url_data:
115
+ return None, None
116
+
117
+ if "," in url_data:
118
+ a_, b_ = url_data.split()
119
+ a_, b_ = a_.strip().replace("/blob/", "/resolve/"), b_.strip().replace("/blob/", "/resolve/")
120
+ else:
121
+ a_, b_ = url_data.strip().replace("/blob/", "/resolve/"), None
122
+
123
+ out_dir = "downloads"
124
+ folder_download = str(random.randint(1000, 9999))
125
+ directory = os.path.join(out_dir, folder_download)
126
+ os.makedirs(directory, exist_ok=True)
127
+
128
+ try:
129
+ get_file_size(a_)
130
+ if b_:
131
+ get_file_size(b_)
132
+
133
+ valid_url = [a_] if not b_ else [a_, b_]
134
+ for link in valid_url:
135
+ download_manager(
136
+ url=link,
137
+ path=directory,
138
+ extension="",
139
+ )
140
+
141
+ for f in find_files(directory):
142
+ if f.endswith(".zip"):
143
+ unzip_in_folder(f, directory)
144
+
145
+ model = None
146
+ index = None
147
+ end_files = find_files(directory)
148
+
149
+ for ff in end_files:
150
+ if ff.endswith(".pth"):
151
+ model = ff
152
+ gr.Info(f"Model found: {ff}")
153
+ if ff.endswith(".index"):
154
+ index = ff
155
+ gr.Info(f"Index found: {ff}")
156
+
157
+ if not model:
158
+ raise ValueError(f"Model not found in: {end_files}")
159
+
160
+ if not index:
161
+ gr.Warning("Index not found")
162
+ else:
163
+ index = os.path.abspath(index)
164
+
165
+ return os.path.abspath(model), index
166
+
167
+ except Exception as e:
168
+ raise e
169
+ finally:
170
+ t = threading.Thread(target=clear_files, args=(directory,))
171
+ t.start()
172
+
173
+ def convert_now(audio_files, random_tag, converter):
174
+ return converter(
175
+ audio_files,
176
+ random_tag,
177
+ overwrite=False,
178
+ parallel_workers=8
179
+ )
180
+
181
+ def apply_noisereduce(audio_list):
182
+ print("Applying noise reduction")
183
+
184
+ result = []
185
+ for audio_path in audio_list:
186
+ out_path = f'{os.path.splitext(audio_path)[0]}_noisereduce.wav'
187
+
188
+ try:
189
+ # Load audio file
190
+ audio = AudioSegment.from_file(audio_path)
191
+
192
+ # Convert audio to numpy array
193
+ samples = np.array(audio.get_array_of_samples())
194
+
195
+ # Reduce noise
196
+ reduced_noise = nr.reduce_noise(y=samples, sr=audio.frame_rate, prop_decrease=0.6)
197
+
198
+ # Convert reduced noise signal back to audio
199
+ reduced_audio = AudioSegment(
200
+ reduced_noise.tobytes(),
201
+ frame_rate=audio.frame_rate,
202
+ sample_width=audio.sample_width,
203
+ channels=audio.channels
204
+ )
205
+
206
+ # Save reduced audio to file
207
+ reduced_audio.export(out_path, format="wav")
208
+ result.append(out_path)
209
+
210
+ except Exception as e:
211
+ traceback.print_exc()
212
+ print(f"Error in noise reduction: {str(e)}")
213
+ result.append(audio_path)
214
+
215
+ return result
216
+
217
+ def run(audio_files, file_m, file_index):
218
+ if not audio_files:
219
+ raise ValueError("Please provide an audio file.")
220
+
221
+ if isinstance(audio_files, str):
222
+ audio_files = [audio_files]
223
+
224
+ try:
225
+ duration_base = librosa.get_duration(filename=audio_files[0])
226
+ print("Duration:", duration_base)
227
+ except Exception as e:
228
+ print(e)
229
+
230
+ if file_m is not None and file_m.endswith(".txt"):
231
+ file_m, file_index = find_my_model(file_m, file_index)
232
+ print(file_m, file_index)
233
+
234
+ random_tag = "USER_" + str(random.randint(10000000, 99999999))
235
+
236
+ # Hardcoding pitch algorithm and other parameters
237
+ pitch_alg = "rmvpe+"
238
+ pitch_lvl = 0
239
+ index_inf = 0.75
240
+ r_m_f = 3
241
+ e_r = 0.25
242
+ c_b_p = 0.5
243
+
244
+ converter.apply_conf(
245
+ tag=random_tag,
246
+ file_model=file_m,
247
+ pitch_algo=pitch_alg,
248
+ pitch_lvl=pitch_lvl,
249
+ file_index=file_index,
250
+ index_influence=index_inf,
251
+ respiration_median_filtering=r_m_f,
252
+ envelope_ratio=e_r,
253
+ consonant_breath_protection=c_b_p,
254
+ resample_sr=44100 if audio_files[0].endswith('.mp3') else 0,
255
+ )
256
+ time.sleep(0.1)
257
+
258
+ result = convert_now(audio_files, random_tag, converter)
259
+
260
+ # Always apply noise reduction
261
+ result = apply_noisereduce(result)
262
+
263
+ return result
264
+
265
+ def model_conf():
266
+ model_files = [f for f in os.listdir("models") if f.endswith(".pth")]
267
+ return gr.Dropdown(
268
+ label="Select Model File",
269
+ choices=model_files,
270
+ value=model_files[0] if model_files else None,
271
+ interactive=True,
272
+ )
273
+
274
+ def index_conf():
275
+ index_files = [f for f in os.listdir("models") if f.endswith(".index")]
276
+ return gr.Dropdown(
277
+ label="Select Index File",
278
+ choices=index_files,
279
+ value=index_files[0] if index_files else None,
280
+ interactive=True,
281
+ )
282
+
283
+ def audio_conf():
284
+ return gr.File(
285
+ label="Audio files",
286
+ file_count="multiple",
287
+ type="filepath",
288
+ container=True,
289
+ )
290
+
291
+ def button_conf():
292
+ return gr.Button(
293
+ "Inference",
294
+ variant="primary",
295
+ )
296
+
297
+ def output_conf():
298
+ return gr.File(
299
+ label="Result",
300
+ file_count="multiple",
301
+ interactive=False,
302
+ )
303
+
304
+ def get_gui(theme):
305
+ with gr.Blocks(theme=theme, delete_cache=(3200, 3200)) as app:
306
+ gr.Markdown(title)
307
+
308
+ aud = audio_conf()
309
+
310
+ model = model_conf()
311
+ indx = index_conf()
312
+ button_base = button_conf()
313
+ output_base = output_conf()
314
+
315
+ button_base.click(
316
+ run,
317
+ inputs=[
318
+ aud,
319
+ model,
320
+ indx,
321
+ ],
322
+ outputs=[output_base],
323
+ )
324
+
325
+ gr.Examples(
326
+ examples=[
327
+ [
328
+ ["./test.ogg"],
329
+ "./model.pth",
330
+ "./model.index",
331
+ ],
332
+ [
333
+ ["./example2/test2.ogg"],
334
+ "./example2/model.pth",
335
+ "./example2/model.index",
336
+ ],
337
+ ],
338
+ fn=run,
339
+ inputs=[
340
+ aud,
341
+ model,
342
+ indx,
343
+ ],
344
+ outputs=[output_base],
345
+ cache_examples=False,
346
+ )
347
+
348
+ return app
349
+
350
+ if __name__ == "__main__":
351
+ app = get_gui(theme)
352
+ app.queue(default_concurrency_limit=40)
353
+ app.launch(
354
+ max_threads=40,
355
+ share=False,
356
+ show_error=True,
357
+ quiet=False,
358
+ debug=False,
359
+ allowed_paths=["./downloads/"],
360
+ )
example2/index_link.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/sail-rvc/ayaka-jp/resolve/main/model.index?download=true
example2/model_link.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/sail-rvc/ayaka-jp/resolve/main/model.pth?download=true
example2/test2.ogg ADDED
Binary file (118 kB). View file
 
example3/test3.wav ADDED
Binary file (12.2 kB). View file
 
example3/zip_link.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/Stevenojob/furina_JP/resolve/main/furina_jp.zip?download=true
gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.index filter=lfs diff=lfs merge=lfs -text
model.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af434a9142b070f7091dcdbbf957b7a01bbc96294add99d186ef1e0d4b226eac
3
+ size 83987395
model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:896fcee182ecdcea6645a691366ac50153bc63015f43c981da135a8cabe2f088
3
+ size 55028048
models/metadata.json ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "title": "US Ascent",
3
+ "author": {
4
+ "name": "mayank dubey",
5
+ "discordUserId": null
6
+ },
7
+ "md5": "b0a77398fc88806fda285f4ecd6a5839",
8
+ "uploadedAt": "2024-07-05T07:38:18.500Z",
9
+ "weightsLink": "https://www.weights.gg/models/cly8dvwn8000cagr4ohpzo4q2",
10
+ "id": "cly8dvwn8000cagr4ohpzo4q2",
11
+ "type": "v2",
12
+ "tags": [],
13
+ "description": "US Ascent",
14
+ "samples": [],
15
+ "files": [
16
+ {
17
+ "name": "model.index",
18
+ "size": 101587779,
19
+ "md5": "61a545d9b5bb380bed408a51708b210e"
20
+ },
21
+ {
22
+ "name": "model.pth",
23
+ "size": 57577722,
24
+ "md5": "b0a77398fc88806fda285f4ecd6a5839"
25
+ }
26
+ ],
27
+ "torchMetadata": {
28
+ "config": {
29
+ "spec_channels": 1025,
30
+ "segment_size": 32,
31
+ "inter_channels": 192,
32
+ "hidden_channels": 192,
33
+ "filter_channels": 768,
34
+ "n_heads": 2,
35
+ "n_layers": 6,
36
+ "kernel_size": 3,
37
+ "p_dropout": 0,
38
+ "resblock": "1",
39
+ "resblock_kernel_sizes": [
40
+ 3,
41
+ 7,
42
+ 11
43
+ ],
44
+ "resblock_dilation_sizes": [
45
+ [
46
+ 1,
47
+ 3,
48
+ 5
49
+ ],
50
+ [
51
+ 1,
52
+ 3,
53
+ 5
54
+ ],
55
+ [
56
+ 1,
57
+ 3,
58
+ 5
59
+ ]
60
+ ],
61
+ "upsample_rates": [
62
+ 12,
63
+ 10,
64
+ 2,
65
+ 2
66
+ ],
67
+ "upsample_initial_channel": 512,
68
+ "upsample_kernel_sizes": [
69
+ 24,
70
+ 20,
71
+ 4,
72
+ 4
73
+ ],
74
+ "emb_channels": null,
75
+ "spk_embed_dim": 109,
76
+ "gin_channels": 256,
77
+ "sr": 48000
78
+ },
79
+ "f0": 1,
80
+ "version": "v2",
81
+ "extra_info": {
82
+ "config": [
83
+ 1025,
84
+ 32,
85
+ 192,
86
+ 192,
87
+ 768,
88
+ 2,
89
+ 6,
90
+ 3,
91
+ 0,
92
+ "1",
93
+ [
94
+ 3,
95
+ 7,
96
+ 11
97
+ ],
98
+ [
99
+ [
100
+ 1,
101
+ 3,
102
+ 5
103
+ ],
104
+ [
105
+ 1,
106
+ 3,
107
+ 5
108
+ ],
109
+ [
110
+ 1,
111
+ 3,
112
+ 5
113
+ ]
114
+ ],
115
+ [
116
+ 12,
117
+ 10,
118
+ 2,
119
+ 2
120
+ ],
121
+ 512,
122
+ [
123
+ 24,
124
+ 20,
125
+ 4,
126
+ 4
127
+ ],
128
+ 109,
129
+ 256,
130
+ 48000
131
+ ],
132
+ "epoch": 233,
133
+ "step": 6291,
134
+ "sr": 48000,
135
+ "f0": 1,
136
+ "version": "v2",
137
+ "creation_date": "2024-07-05T07:01:09.035229",
138
+ "model_hash": "7c335d1650be63dea6409d741859c137dc6827d9945d6afba08867ab4281e056"
139
+ },
140
+ "epochs": 233,
141
+ "step": 6291,
142
+ "creation_date": "2024-07-05T07:01:09.035229",
143
+ "model_hash": "7c335d1650be63dea6409d741859c137dc6827d9945d6afba08867ab4281e056"
144
+ },
145
+ "url": "https://models.weights.gg/cly79hr6d1211hlpr4obj48ab.zip",
146
+ "urls": [],
147
+ "epochs": 233,
148
+ "originalFileList": [
149
+ "model.index",
150
+ "model.pth"
151
+ ]
152
+ }
models/model.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9cc11461add817f1964dfac11c37033a20037d28fe2935038d884196f556590
3
+ size 101587779
models/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:595287a521b83cbd8cf8372e1a8c3200081e88ce8c0b7866ebc9db7e66be9512
3
+ size 57577722
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ffmpeg
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch==2.2.0
2
+ infer-rvc-python==1.1.0
3
+ edge-tts
4
+ pedalboard
5
+ noisereduce
6
+ numpy==1.23.5
test.ogg ADDED
Binary file (73.4 kB). View file