File size: 18,054 Bytes
cd02b8a
 
 
 
 
cb16eb2
cd02b8a
 
cb16eb2
cd02b8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cb16eb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd02b8a
 
 
 
 
 
cb16eb2
cd02b8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cb16eb2
cd02b8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cb16eb2
cd02b8a
 
 
 
 
 
cb16eb2
cd02b8a
 
 
 
 
cb16eb2
 
 
 
 
 
 
cd02b8a
cb16eb2
 
 
 
 
 
 
cd02b8a
 
cb16eb2
cd02b8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cb16eb2
 
cd02b8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cb16eb2
cd02b8a
 
 
 
 
cb16eb2
 
cd02b8a
cb16eb2
cd02b8a
 
 
 
 
 
 
 
 
 
cb16eb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd02b8a
cb16eb2
 
 
 
 
 
 
 
cd02b8a
cb16eb2
 
 
 
 
 
 
cd02b8a
 
cb16eb2
cd02b8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cb16eb2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
import asyncio
import base64
import time
import uuid
import shutil
import hashlib
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import List, Optional, Tuple
import subprocess

import ebooklib
import gradio as gr
import torch
import torchaudio
from ebooklib import epub
from bs4 import BeautifulSoup

from auralis import TTS, TTSRequest, TTSOutput, AudioPreprocessingConfig, setup_logger

logger = setup_logger(__file__)

tts = TTS()
model_path = "AstraMindAI/xttsv2"  # change this if you have a different model
gpt_model = "AstraMindAI/xtts2-gpt"
try:
    tts = tts.from_pretrained(model_path, gpt_model=gpt_model)
    logger.info(f"Successfully loaded model {model_path}")
except Exception as e:
    logger.error(f"Failed to load model: {e}. Ensure that the model exists at {model_path}")

# Create a temporary directory to store short-named files
temp_dir = Path("/tmp/auralis")
temp_dir.mkdir(exist_ok=True)

def convert_ebook_to_txt(input_path: str) -> str:
    """
    Convert any ebook format to txt using calibre's ebook-convert
    Returns the path to the converted txt file
    """
    output_path = str(temp_dir / f"{uuid.uuid4().hex[:8]}.txt")
    try:
        subprocess.run(['ebook-convert', input_path, output_path], 
                      check=True, capture_output=True, text=True)
        return output_path
    except subprocess.CalledProcessError as e:
        logger.error(f"Conversion failed: {e.stderr}")
        raise RuntimeError(f"Failed to convert ebook: {e.stderr}")

def shorten_filename(original_path: str) -> str:
    """Copies the given file to a temporary directory with a shorter, random filename."""
    ext = Path(original_path).suffix
    short_name = "file_" + uuid.uuid4().hex[:8] + ext
    short_path = temp_dir / short_name
    shutil.copyfile(original_path, short_path)
    return str(short_path)

def text_from_file(file_path: str) -> str:
    """Read text from a file, converting if necessary."""
    file_ext = Path(file_path).suffix.lower()
    
    if file_ext in ['.txt']:
        with open(file_path, 'r', encoding='utf-8') as f:
            return f.read()
    else:
        # Convert other formats to txt first
        txt_path = convert_ebook_to_txt(file_path)
        with open(txt_path, 'r', encoding='utf-8') as f:
            return f.read()

def clone_voice(audio_path: str):
    """Clone a voice from an audio path."""
    audio_short_path = shorten_filename(audio_path)
    with open(audio_short_path, "rb") as f:
        audio_data = base64.b64encode(f.read()).decode('utf-8')
    return audio_data

def chunk_text(text: str, max_words: int = 300) -> List[str]:
    """
    Splits the input text into chunks with a maximum of `max_words` per chunk.
    """
    words = text.split()
    chunks = []
    for i in range(0, len(words), max_words):
        chunk = ' '.join(words[i:i + max_words])
        chunks.append(chunk)
    return chunks

def generate_audio_from_chunks(
    chunks: List[str],
    ref_audio_files: List[str],
    speed: float,
    enhance_speech: bool,
    temperature: float,
    top_p: float,
    top_k: int,
    repetition_penalty: float,
    language: str
) -> Tuple[Optional[str], str]:
    """
    Generates audio for each text chunk and combines them into a single audio file.
    Returns the path to the combined audio file and a log message.
    """
    audio_files = []
    log_messages = ""

    for idx, chunk in enumerate(chunks):
        result, log = process_text_and_generate(
            chunk, ref_audio_files, speed, enhance_speech, temperature,
            top_p, top_k, repetition_penalty, language
        )
        if result:
            sample_rate, audio_array = result
            # Save audio array to temp file
            audio_path = temp_dir / f"chunk_{uuid.uuid4().hex[:8]}_{idx}.wav"
            audio_tensor = torch.from_numpy(audio_array)
            torchaudio.save(str(audio_path), audio_tensor.unsqueeze(0), sample_rate)
            audio_files.append(str(audio_path))
            log_messages += f"βœ… Generated audio for chunk {idx + 1}/{len(chunks)}\n"
        else:
            logger.error(f"Failed to generate audio for chunk {idx}: {log}")
            log_messages += f"❌ Failed to generate audio for chunk {idx + 1}: {log}\n"
            return None, log_messages

    # Create a list file for ffmpeg
    list_file = temp_dir / f"list_{uuid.uuid4().hex[:8]}.txt"
    with open(list_file, 'w') as f:
        for audio_file in audio_files:
            f.write(f"file '{audio_file}'\n")

    # Define the output combined audio path
    combined_audio_path = temp_dir / f"combined_{uuid.uuid4().hex[:8]}.wav"

    try:
        subprocess.run(
            [
                'ffmpeg', '-y', '-f', 'concat', '-safe', '0',
                '-i', str(list_file),
                '-c', 'copy',
                str(combined_audio_path)
            ],
            check=True,
            capture_output=True,
            text=True
        )
        log_messages += "βœ… Successfully combined all audio chunks."
        return str(combined_audio_path), log_messages
    except subprocess.CalledProcessError as e:
        logger.error(f"Failed to combine audio files: {e.stderr}")
        log_messages += f"❌ Failed to combine audio files: {e.stderr}"
        return None, log_messages

def process_text_and_generate(
    input_text: str,
    ref_audio_files: List[str],
    speed: float,
    enhance_speech: bool,
    temperature: float,
    top_p: float,
    top_k: int,
    repetition_penalty: float,
    language: str
) -> Tuple[Optional[Tuple[int, np.ndarray]], str]:
    """Process text and generate audio."""
    log_messages = ""
    if not ref_audio_files:
        log_messages += "Please provide at least one reference audio!\n"
        return None, log_messages

    # Clone voices from all file paths (shorten them)
    base64_voices = ref_audio_files[:5]

    request = TTSRequest(
        text=input_text,
        speaker_files=base64_voices,
        stream=False,
        enhance_speech=enhance_speech,
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        repetition_penalty=repetition_penalty,
        language=language,
    )

    try:
        with torch.no_grad():
            output = tts.generate_speech(request)
            if output:
                if speed != 1:
                    output.change_speed(speed)
                log_messages += f"βœ… Successfully Generated audio\n"
                return (output.sample_rate, output.array), log_messages
            else:
                log_messages += "❌ No output was generated. Check that the model was correctly loaded\n"
                return None, log_messages
    except Exception as e:
        logger.error(f"Error: {e}")
        log_messages += f"❌ An Error occurred: {e}\n"
        return None, log_messages

def build_gradio_ui():
    """Builds and launches the Gradio UI for Auralis."""
    with gr.Blocks(title="Auralis TTS Demo", theme="soft") as ui:
        gr.Markdown(
            """
            # Auralis Text-to-Speech Demo 🌌
            Convert text or ebooks to speech with advanced voice cloning and enhancement.
            """
        )

        with gr.Tab("File to Speech"):
            with gr.Row():
                with gr.Column():
                    file_input = gr.File(
                        label="Upload Book/Text File", 
                        file_types=[
                            ".txt", ".epub", ".mobi", ".azw3", ".fb2", 
                            ".htmlz", ".lit", ".pdb", ".pdf", ".rtf"
                        ]
                    )
                    ref_audio_files = gr.Files(
                        label="Reference Audio Files", 
                        file_types=["audio"]
                    )
                    with gr.Accordion("Advanced settings", open=False):
                        speed = gr.Slider(
                            label="Playback speed", 
                            minimum=0.5, 
                            maximum=2.0, 
                            value=1.0, 
                            step=0.1
                        )
                        enhance_speech = gr.Checkbox(
                            label="Enhance Reference Speech", 
                            value=False
                        )
                        temperature = gr.Slider(
                            label="Temperature", 
                            minimum=0.5, 
                            maximum=1.0, 
                            value=0.75, 
                            step=0.05
                        )
                        top_p = gr.Slider(
                            label="Top P", 
                            minimum=0.5, 
                            maximum=1.0, 
                            value=0.85, 
                            step=0.05
                        )
                        top_k = gr.Slider(
                            label="Top K", 
                            minimum=0, 
                            maximum=100, 
                            value=50, 
                            step=10
                        )
                        repetition_penalty = gr.Slider(
                            label="Repetition penalty", 
                            minimum=1.0, 
                            maximum=10.0, 
                            value=5.0, 
                            step=0.5
                        )
                        language = gr.Dropdown(
                            label="Target Language", 
                            choices=[
                                "en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru",
                                "nl", "cs", "ar", "zh-cn", "hu", "ko", "ja", "hi", "auto",
                            ], 
                            value="auto"
                        )
                    generate_button = gr.Button("Generate Speech")
                with gr.Column():
                    audio_output = gr.Audio(label="Generated Audio")
                    log_output = gr.Textbox(label="Log Output", lines=10)

            def process_file_and_generate(
                file_input, ref_audio_files, speed, enhance_speech,
                temperature, top_p, top_k, repetition_penalty, language
            ):
                if not file_input:
                    return None, "❌ Please provide an input file!"

                try:
                    # Convert input file to text
                    input_text = text_from_file(file_input.name)
                    
                    # Chunk the text
                    chunks = chunk_text(input_text, max_words=300)
                    
                    # Generate audio from chunks and combine
                    combined_audio_path, log = generate_audio_from_chunks(
                        chunks, ref_audio_files, speed, enhance_speech, temperature, top_p,
                        top_k, repetition_penalty, language
                    )
                    
                    if combined_audio_path:
                        # Read the combined audio file to return as audio output
                        waveform, sr = torchaudio.load(combined_audio_path)
                        return (sr, waveform.numpy()), log
                    else:
                        return None, log
                except Exception as e:
                    logger.error(f"Error processing file: {e}")
                    return None, f"❌ Error processing file: {str(e)}"

            generate_button.click(
                process_file_and_generate,
                inputs=[
                    file_input, ref_audio_files, speed, enhance_speech,
                    temperature, top_p, top_k, repetition_penalty, language
                ],
                outputs=[audio_output, log_output],
            )

        with gr.Tab("Clone With Microphone"):
            with gr.Row():
                with gr.Column():
                    file_input_mic = gr.File(
                        label="Upload Book/Text File",
                        file_types=[
                            ".txt", ".epub", ".mobi", ".azw3", ".fb2",
                            ".htmlz", ".lit", ".pdb", ".pdf", ".rtf"
                        ]
                    )
                    mic_ref_audio = gr.Audio(
                        label="Record Reference Audio",
                        source="microphone",
                        type="numpy"
                    )

                    with gr.Accordion("Advanced settings", open=False):
                        speed_mic = gr.Slider(
                            label="Playback speed",
                            minimum=0.5,
                            maximum=2.0,
                            value=1.0,
                            step=0.1
                        )
                        enhance_speech_mic = gr.Checkbox(
                            label="Enhance Reference Speech",
                            value=True
                        )
                        temperature_mic = gr.Slider(
                            label="Temperature",
                            minimum=0.5,
                            maximum=1.0,
                            value=0.75,
                            step=0.05
                        )
                        top_p_mic = gr.Slider(
                            label="Top P",
                            minimum=0.5,
                            maximum=1.0,
                            value=0.85,
                            step=0.05
                        )
                        top_k_mic = gr.Slider(
                            label="Top K",
                            minimum=0,
                            maximum=100,
                            value=50,
                            step=10
                        )
                        repetition_penalty_mic = gr.Slider(
                            label="Repetition penalty",
                            minimum=1.0,
                            maximum=10.0,
                            value=5.0,
                            step=0.5
                        )
                        language_mic = gr.Dropdown(
                            label="Target Language",
                            choices=[
                                "en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru",
                                "nl", "cs", "ar", "zh-cn", "hu", "ko", "ja", "hi", "auto",
                            ],
                            value="auto"
                        )
                    generate_button_mic = gr.Button("Generate Speech")
                with gr.Column():
                    audio_output_mic = gr.Audio(label="Generated Audio")
                    log_output_mic = gr.Textbox(label="Log Output", lines=10)

            def process_mic_and_generate(
                file_input, mic_ref_audio, speed_mic, enhance_speech_mic,
                temperature_mic, top_p_mic, top_k_mic, repetition_penalty_mic, language_mic
            ):
                if mic_ref_audio is None:
                    return None, "❌ Please record an audio!"
                if not file_input:
                    return None, "❌ Please provide an input file!"

                try:
                    # Convert input file to text
                    input_text = text_from_file(file_input.name)

                    # Save microphone audio
                    data = str(time.time()).encode("utf-8")
                    hash = hashlib.sha1(data).hexdigest()[:10]
                    output_path = temp_dir / (f"mic_{hash}.wav")

                    # Ensure mic_ref_audio is in the correct format
                    if isinstance(mic_ref_audio, tuple):
                        mic_waveform, mic_sr = mic_ref_audio
                        torch_audio = torch.from_numpy(mic_waveform.astype(float))
                        torchaudio.save(
                            str(output_path),
                            torch_audio.unsqueeze(0),
                            mic_sr
                        )
                    else:
                        # If mic_ref_audio is not a tuple, handle accordingly
                        logger.error("Invalid microphone audio format.")
                        return None, "❌ Invalid microphone audio format."

                    # Clone voice from the saved mic audio
                    ref_audio_files = [str(output_path)]

                    # Chunk the text
                    chunks = chunk_text(input_text, max_words=300)

                    # Generate audio from chunks and combine
                    combined_audio_path, log = generate_audio_from_chunks(
                        chunks, ref_audio_files, speed_mic, enhance_speech_mic,
                        temperature_mic, top_p_mic, top_k_mic, repetition_penalty_mic,
                        language_mic
                    )

                    if combined_audio_path:
                        # Read the combined audio file to return as audio output
                        waveform, sr = torchaudio.load(combined_audio_path)
                        return (sr, waveform.numpy()), log
                    else:
                        return None, log
                except Exception as e:
                    logger.error(f"Error processing input: {e}")
                    return None, f"❌ Error processing input: {str(e)}"

            generate_button_mic.click(
                process_mic_and_generate,
                inputs=[
                    file_input_mic, mic_ref_audio, speed_mic,
                    enhance_speech_mic, temperature_mic, top_p_mic,
                    top_k_mic, repetition_penalty_mic, language_mic
                ],
                outputs=[audio_output_mic, log_output_mic],
            )

    return ui

if __name__ == "__main__":
    ui = build_gradio_ui()
    ui.launch(debug=True, server_name="0.0.0.0", server_port=7860)