Update app.py
Browse files
app.py
CHANGED
@@ -97,7 +97,7 @@ def wipe_npz_file(folder_path):
|
|
97 |
print("YO")
|
98 |
|
99 |
def split_process(audio, chosen_out_track):
|
100 |
-
gr.Info("Cleaning your audio sample")
|
101 |
os.makedirs("out", exist_ok=True)
|
102 |
write('test.wav', audio[0], audio[1])
|
103 |
os.system("python3 -m demucs.separate -n mdx_extra_q -j 4 test.wav -o out")
|
@@ -123,7 +123,11 @@ def update_selection(selected_state: gr.SelectData):
|
|
123 |
|
124 |
|
125 |
def infer(prompt, input_wav_file, clean_audio, hidden_numpy_audio):
|
126 |
-
|
|
|
|
|
|
|
|
|
127 |
if clean_audio is True :
|
128 |
print("We want to clean audio sample")
|
129 |
# Extract the file name without the extension
|
@@ -184,12 +188,17 @@ def infer(prompt, input_wav_file, clean_audio, hidden_numpy_audio):
|
|
184 |
# Print the contents
|
185 |
for item in contents:
|
186 |
print(item)
|
187 |
-
|
188 |
tts_video = gr.make_waveform(audio="output.wav")
|
189 |
print("FINISHED")
|
190 |
return "output.wav", tts_video, gr.update(value=f"bark_voices/{file_name}/{contents[1]}", visible=True), gr.Group.update(visible=True), destination_path
|
191 |
|
192 |
def infer_from_c(prompt, c_name):
|
|
|
|
|
|
|
|
|
|
|
193 |
print(f"USING VOICE LIBRARY: {c_name}")
|
194 |
# Split the text into sentences based on common punctuation marks
|
195 |
sentences = re.split(r'(?<=[.!?])\s+', prompt)
|
@@ -200,11 +209,13 @@ def infer_from_c(prompt, c_name):
|
|
200 |
# Join the selected sentences back into a single string
|
201 |
limited_prompt = ' '.join(first_nb_sentences)
|
202 |
|
|
|
203 |
tts.tts_to_file(text=limited_prompt,
|
204 |
file_path="output.wav",
|
205 |
voice_dir="examples/library/",
|
206 |
speaker=f"{c_name}")
|
207 |
-
|
|
|
208 |
tts_video = gr.make_waveform(audio="output.wav")
|
209 |
print("FINISHED")
|
210 |
return "output.wav", tts_video, gr.update(value=f"examples/library/{c_name}/{c_name}.npz", visible=True), gr.Group.update(visible=True)
|
|
|
97 |
print("YO")
|
98 |
|
99 |
def split_process(audio, chosen_out_track):
|
100 |
+
gr.Info("Cleaning your audio sample...")
|
101 |
os.makedirs("out", exist_ok=True)
|
102 |
write('test.wav', audio[0], audio[1])
|
103 |
os.system("python3 -m demucs.separate -n mdx_extra_q -j 4 test.wav -o out")
|
|
|
123 |
|
124 |
|
125 |
def infer(prompt, input_wav_file, clean_audio, hidden_numpy_audio):
|
126 |
+
print"""
|
127 |
+
βββββ
|
128 |
+
NEW INFERENCE:
|
129 |
+
βββββββ
|
130 |
+
"""
|
131 |
if clean_audio is True :
|
132 |
print("We want to clean audio sample")
|
133 |
# Extract the file name without the extension
|
|
|
188 |
# Print the contents
|
189 |
for item in contents:
|
190 |
print(item)
|
191 |
+
gr.Info("Preparing final waveform video ...")
|
192 |
tts_video = gr.make_waveform(audio="output.wav")
|
193 |
print("FINISHED")
|
194 |
return "output.wav", tts_video, gr.update(value=f"bark_voices/{file_name}/{contents[1]}", visible=True), gr.Group.update(visible=True), destination_path
|
195 |
|
196 |
def infer_from_c(prompt, c_name):
|
197 |
+
print"""
|
198 |
+
βββββ
|
199 |
+
NEW INFERENCE:
|
200 |
+
βββββββ
|
201 |
+
"""
|
202 |
print(f"USING VOICE LIBRARY: {c_name}")
|
203 |
# Split the text into sentences based on common punctuation marks
|
204 |
sentences = re.split(r'(?<=[.!?])\s+', prompt)
|
|
|
209 |
# Join the selected sentences back into a single string
|
210 |
limited_prompt = ' '.join(first_nb_sentences)
|
211 |
|
212 |
+
gr.Info(f"Generating audio from prompt with {c_name} ;)")
|
213 |
tts.tts_to_file(text=limited_prompt,
|
214 |
file_path="output.wav",
|
215 |
voice_dir="examples/library/",
|
216 |
speaker=f"{c_name}")
|
217 |
+
|
218 |
+
gr.Info("Preparing final waveform video ...")
|
219 |
tts_video = gr.make_waveform(audio="output.wav")
|
220 |
print("FINISHED")
|
221 |
return "output.wav", tts_video, gr.update(value=f"examples/library/{c_name}/{c_name}.npz", visible=True), gr.Group.update(visible=True)
|