Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,27 +1,22 @@
|
|
|
|
1 |
import os
|
2 |
import requests
|
3 |
import torch
|
4 |
import zipfile
|
5 |
from TTS.api import TTS
|
6 |
from pydub import AudioSegment
|
7 |
-
import gradio as gr
|
8 |
|
9 |
-
# Environment variable
|
10 |
os.environ["COQUI_TOS_AGREED"] = "1"
|
11 |
|
12 |
-
# Constants
|
13 |
MODEL_PATH = "tts_models/multilingual/multi-dataset/xtts_v2"
|
14 |
LANGUAGES = ["en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh-cn", "ja", "hu", "ko", "hi"]
|
15 |
AUDIO_FORMATS = [".wav", ".mp3", ".flac", ".mp4"]
|
16 |
|
17 |
-
# Device setup
|
18 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
19 |
print(f"Using device: {device}")
|
20 |
|
21 |
-
# Initialize TTS model
|
22 |
tts = TTS(MODEL_PATH).to(device)
|
23 |
|
24 |
-
# Function to download audio file
|
25 |
def download_audio_file(url):
|
26 |
try:
|
27 |
response = requests.get(url)
|
@@ -34,7 +29,6 @@ def download_audio_file(url):
|
|
34 |
print(f"Error downloading audio file: {e}")
|
35 |
return None
|
36 |
|
37 |
-
# Function to extract zip file
|
38 |
def extract_zip_file(zip_file):
|
39 |
try:
|
40 |
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
|
@@ -44,7 +38,6 @@ def extract_zip_file(zip_file):
|
|
44 |
print(f"Error extracting zip file: {e}")
|
45 |
return False
|
46 |
|
47 |
-
# Function to convert audio file to WAV
|
48 |
def convert_to_wav(input_audio_file):
|
49 |
file_extension = os.path.splitext(input_audio_file)[-1].lower()
|
50 |
if file_extension!= ".wav":
|
@@ -54,13 +47,11 @@ def convert_to_wav(input_audio_file):
|
|
54 |
return "temp.wav"
|
55 |
return input_audio_file
|
56 |
|
57 |
-
# Function to synthesize text
|
58 |
def synthesize_text(text, input_audio_file, language):
|
59 |
input_audio_file = convert_to_wav(input_audio_file)
|
60 |
tts.tts_to_file(text=text, speaker_wav=input_audio_file, language=language, file_path="./output.wav")
|
61 |
return "./output.wav"
|
62 |
|
63 |
-
# Function to clone audio
|
64 |
def clone(text, input_file, language, url=None, use_url=False):
|
65 |
if use_url:
|
66 |
if url is None:
|
@@ -81,15 +72,9 @@ def clone(text, input_file, language, url=None, use_url=False):
|
|
81 |
else:
|
82 |
input_audio_file = input_file.name
|
83 |
|
84 |
-
|
85 |
-
audio_cache = {}
|
86 |
-
if input_audio_file not in audio_cache:
|
87 |
-
audio_cache[input_audio_file] = convert_to_wav(input_audio_file)
|
88 |
-
|
89 |
-
output_file_path = synthesize_text(text, audio_cache[input_audio_file], language)
|
90 |
return output_file_path
|
91 |
|
92 |
-
# Gradio interface
|
93 |
iface = gr.Interface(
|
94 |
fn=clone,
|
95 |
inputs=["text", gr.File(label="Input File", file_types=[".zip", *AUDIO_FORMATS]), gr.Dropdown(choices=LANGUAGES, label="Language"), gr.Text(label="URL"), gr.Checkbox(label="Use URL", value=False)],
|
|
|
1 |
+
gradio as gr
|
2 |
import os
|
3 |
import requests
|
4 |
import torch
|
5 |
import zipfile
|
6 |
from TTS.api import TTS
|
7 |
from pydub import AudioSegment
|
|
|
8 |
|
|
|
9 |
os.environ["COQUI_TOS_AGREED"] = "1"
|
10 |
|
|
|
11 |
MODEL_PATH = "tts_models/multilingual/multi-dataset/xtts_v2"
|
12 |
LANGUAGES = ["en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh-cn", "ja", "hu", "ko", "hi"]
|
13 |
AUDIO_FORMATS = [".wav", ".mp3", ".flac", ".mp4"]
|
14 |
|
|
|
15 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
16 |
print(f"Using device: {device}")
|
17 |
|
|
|
18 |
tts = TTS(MODEL_PATH).to(device)
|
19 |
|
|
|
20 |
def download_audio_file(url):
|
21 |
try:
|
22 |
response = requests.get(url)
|
|
|
29 |
print(f"Error downloading audio file: {e}")
|
30 |
return None
|
31 |
|
|
|
32 |
def extract_zip_file(zip_file):
|
33 |
try:
|
34 |
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
|
|
|
38 |
print(f"Error extracting zip file: {e}")
|
39 |
return False
|
40 |
|
|
|
41 |
def convert_to_wav(input_audio_file):
|
42 |
file_extension = os.path.splitext(input_audio_file)[-1].lower()
|
43 |
if file_extension!= ".wav":
|
|
|
47 |
return "temp.wav"
|
48 |
return input_audio_file
|
49 |
|
|
|
50 |
def synthesize_text(text, input_audio_file, language):
|
51 |
input_audio_file = convert_to_wav(input_audio_file)
|
52 |
tts.tts_to_file(text=text, speaker_wav=input_audio_file, language=language, file_path="./output.wav")
|
53 |
return "./output.wav"
|
54 |
|
|
|
55 |
def clone(text, input_file, language, url=None, use_url=False):
|
56 |
if use_url:
|
57 |
if url is None:
|
|
|
72 |
else:
|
73 |
input_audio_file = input_file.name
|
74 |
|
75 |
+
output_file_path = synthesize_text(text, input_audio_file, language)
|
|
|
|
|
|
|
|
|
|
|
76 |
return output_file_path
|
77 |
|
|
|
78 |
iface = gr.Interface(
|
79 |
fn=clone,
|
80 |
inputs=["text", gr.File(label="Input File", file_types=[".zip", *AUDIO_FORMATS]), gr.Dropdown(choices=LANGUAGES, label="Language"), gr.Text(label="URL"), gr.Checkbox(label="Use URL", value=False)],
|