gnosticdev commited on
Commit
fd43dfa
·
verified ·
1 Parent(s): ae22f5b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -181
app.py CHANGED
@@ -1,187 +1,54 @@
1
- from dataclasses import dataclass
2
- from typing import List, Tuple, Dict
3
  import os
4
- import re
5
- import httpx
6
- import json
7
- from openai import OpenAI
8
- import edge_tts
9
- import tempfile
10
- from pydub import AudioSegment
11
- import base64
12
- from pathlib import Path
13
- import shutil # Importamos shutil para manejo de directorios
14
 
15
- @dataclass
16
- class ConversationConfig:
17
- max_words: int = 3000
18
- prefix_url: str = "https://r.jina.ai/"
19
- model_name: str = "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"
20
 
21
- class URLToAudioConverter:
22
- def __init__(self, config: ConversationConfig, llm_api_key: str):
23
- self.config = config
24
- self.llm_client = OpenAI(api_key=llm_api_key, base_url="https://api.together.xyz/v1")
25
- self.llm_out = None
26
 
27
- def fetch_text(self, url: str) -> str:
28
- if not url:
29
- raise ValueError("URL cannot be empty")
30
-
31
- full_url = f"{self.config.prefix_url}{url}"
32
- try:
33
- response = httpx.get(full_url, timeout=60.0)
34
- response.raise_for_status()
35
- return response.text
36
- except httpx.HTTPError as e:
37
- raise RuntimeError(f"Failed to fetch URL: {e}")
38
-
39
- def extract_conversation(self, text: str) -> Dict:
40
- if not text:
41
- raise ValueError("Input text cannot be empty")
42
-
43
- try:
44
- # Prompt mejorado para obtener JSON consistente
45
- prompt = (
46
- f"{text}\nConvert the provided text into a short informative podcast conversation "
47
- f"between two experts. Return ONLY a JSON object with the following structure:\n"
48
- '{"conversation": [{"speaker": "Speaker1", "text": "..."}, {"speaker": "Speaker2", "text": "..."}]}'
49
- )
50
-
51
- chat_completion = self.llm_client.chat.completions.create(
52
- messages=[{"role": "user", "content": prompt}],
53
- model=self.config.model_name,
54
- response_format={"type": "json_object"} # Fuerza formato JSON
55
- )
56
-
57
- # Extracción robusta de JSON
58
- response_content = chat_completion.choices[0].message.content
59
- json_str = response_content.strip()
60
-
61
- # Limpieza de texto alrededor del JSON
62
- if not json_str.startswith('{'):
63
- start = json_str.find('{')
64
- if start != -1:
65
- json_str = json_str[start:]
66
-
67
- if not json_str.endswith('}'):
68
- end = json_str.rfind('}')
69
- if end != -1:
70
- json_str = json_str[:end+1]
71
-
72
- return json.loads(json_str)
73
- except Exception as e:
74
- # Debug: Imprime la respuesta del modelo para diagnóstico
75
- print(f"Error en extract_conversation: {str(e)}")
76
- print(f"Respuesta del modelo: {response_content}")
77
- raise RuntimeError(f"Failed to extract conversation: {str(e)}")
78
-
79
- async def text_to_speech(self, conversation_json: Dict, voice_1: str, voice_2: str) -> Tuple[List[str], str]:
80
- output_dir = Path(self._create_output_directory())
81
- filenames = []
82
 
83
- try:
84
- for i, turn in enumerate(conversation_json["conversation"]):
85
- filename = output_dir / f"output_{i}.mp3"
86
- voice = voice_1 if i % 2 == 0 else voice_2
87
-
88
- tmp_path, error = await self._generate_audio(turn["text"], voice)
89
- if error:
90
- raise RuntimeError(f"Text-to-speech failed: {error}")
91
-
92
- os.rename(tmp_path, filename)
93
- filenames.append(str(filename))
94
-
95
- return filenames, str(output_dir)
96
- except Exception as e:
97
- raise RuntimeError(f"Failed to convert text to speech: {e}")
98
-
99
- async def _generate_audio(self, text: str, voice: str, rate: int = 0, pitch: int = 0) -> Tuple[str, str]:
100
- if not text.strip():
101
- return None, "Text cannot be empty"
102
- if not voice:
103
- return None, "Voice cannot be empty"
104
-
105
- voice_short_name = voice.split(" - ")[0]
106
- rate_str = f"{rate:+d}%"
107
- pitch_str = f"{pitch:+d}Hz"
108
- communicate = edge_tts.Communicate(text, voice_short_name, rate=rate_str, pitch=pitch_str)
109
-
110
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
111
- tmp_path = tmp_file.name
112
- await communicate.save(tmp_path)
113
-
114
- return tmp_path, None
115
-
116
- def _create_output_directory(self) -> str:
117
- random_bytes = os.urandom(8)
118
- folder_name = base64.urlsafe_b64encode(random_bytes).decode("utf-8")
119
- os.makedirs(folder_name, exist_ok=True)
120
- return folder_name
121
-
122
- def combine_audio_files(self, filenames: List[str], output_file: str) -> None:
123
- if not filenames:
124
- raise ValueError("No input files provided")
125
-
126
- try:
127
- combined = AudioSegment.empty()
128
- for filename in filenames:
129
- audio_segment = AudioSegment.from_file(filename, format="mp3")
130
- combined += audio_segment
131
-
132
- combined.export(output_file, format="mp3")
133
-
134
- # Limpieza mejorada y robusta
135
- dir_path = os.path.dirname(filenames[0])
136
 
137
- # Eliminar todos los archivos en el directorio
138
- for file in os.listdir(dir_path):
139
- file_path = os.path.join(dir_path, file)
140
- if os.path.isfile(file_path):
141
- try:
142
- os.remove(file_path)
143
- except Exception as e:
144
- print(f"Warning: Could not remove file {file_path}: {str(e)}")
145
-
146
- # Intentar eliminar el directorio (no crítico si falla)
147
- try:
148
- os.rmdir(dir_path)
149
- except OSError as e:
150
- print(f"Info: Could not remove directory {dir_path}: {str(e)}")
151
- # No es crítico, el espacio puede continuar
152
-
153
- except Exception as e:
154
- raise RuntimeError(f"Failed to combine audio files: {e}")
155
-
156
- async def url_to_audio(self, url: str, voice_1: str, voice_2: str) -> Tuple[str, str]:
157
- text = self.fetch_text(url)
158
-
159
- words = text.split()
160
- if len(words) > self.config.max_words:
161
- text = " ".join(words[:self.config.max_words])
162
-
163
- conversation_json = self.extract_conversation(text)
164
- conversation_text = "\n".join(
165
- f"{turn['speaker']}: {turn['text']}" for turn in conversation_json["conversation"]
166
- )
167
- self.llm_out = conversation_json
168
- audio_files, folder_name = await self.text_to_speech(
169
- conversation_json, voice_1, voice_2
170
- )
171
-
172
- final_output = os.path.join(folder_name, "combined_output.mp3")
173
- self.combine_audio_files(audio_files, final_output)
174
- return final_output, conversation_text
175
-
176
- async def text_to_audio(self, text: str, voice_1: str, voice_2: str) -> Tuple[str, str]:
177
- """Método para procesar texto directo"""
178
- conversation_json = self.extract_conversation(text)
179
- conversation_text = "\n".join(
180
- f"{turn['speaker']}: {turn['text']}" for turn in conversation_json["conversation"]
181
- )
182
- audio_files, folder_name = await self.text_to_speech(
183
- conversation_json, voice_1, voice_2
184
- )
185
- final_output = os.path.join(folder_name, "combined_output.mp3")
186
- self.combine_audio_files(audio_files, final_output)
187
- return final_output, conversation_text
 
1
+ import gradio as gr
 
2
  import os
3
+ import asyncio
4
+ from conver import ConversationConfig, URLToAudioConverter
5
+ from dotenv import load_dotenv
 
 
 
 
 
 
 
6
 
7
+ load_dotenv()
 
 
 
 
8
 
9
+ async def synthesize(article_url, text_input, language="en"):
10
+ if not article_url and not text_input:
11
+ return "Error: Ingresa una URL o texto", None
 
 
12
 
13
+ try:
14
+ config = ConversationConfig()
15
+ converter = URLToAudioConverter(config, llm_api_key=os.environ.get("TOGETHER_API_KEY"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ # Voces humanizadas
18
+ voices = {
19
+ "en": ("en-US-AvaMultilingualNeural", "en-US-AndrewMultilingualNeural"),
20
+ "es": ("es-ES-AlvaroNeural", "es-ES-ElviraNeural")
21
+ }
22
+ voice1, voice2 = voices.get(language, voices["en"])
23
+
24
+ if text_input:
25
+ output_file, conversation = await converter.text_to_audio(text_input, voice1, voice2)
26
+ else:
27
+ output_file, conversation = await converter.url_to_audio(article_url, voice1, voice2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ return conversation, output_file
30
+ except Exception as e:
31
+ return f"Error: {str(e)}", None
32
+
33
+ def synthesize_sync(article_url, text_input, language):
34
+ return asyncio.run(synthesize(article_url, text_input, language))
35
+
36
+ with gr.Blocks(theme='gstaff/sketch') as demo:
37
+ gr.Markdown("# 🎙 Podcast Converter (Human Voices)")
38
+ with gr.Group():
39
+ text_url = gr.Textbox(label="URL (opcional)", placeholder="https://...")
40
+ text_input = gr.Textbox(label="O texto directo", lines=5)
41
+ language = gr.Dropdown(["en", "es"], label="Idioma", value="en")
42
+ btn = gr.Button("Generar Podcast", variant="primary")
43
+
44
+ with gr.Row():
45
+ conv_display = gr.Textbox(label="Conversación", interactive=False, lines=10)
46
+ aud = gr.Audio(label="Audio Generado", interactive=False)
47
+
48
+ btn.click(
49
+ synthesize_sync,
50
+ inputs=[text_url, text_input, language],
51
+ outputs=[conv_display, aud]
52
+ )
53
+
54
+ demo.launch()