gnosticdev commited on
Commit
1bb4376
·
verified ·
1 Parent(s): 90d12be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -16
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import os
3
  import asyncio
 
4
  from conver import ConversationConfig, URLToAudioConverter
5
  from dotenv import load_dotenv
6
  from pydub import AudioSegment
@@ -24,7 +25,7 @@ def mezclar_musica_y_tags(audio_path: str, custom_music_path: str = None) -> str
24
  musica_fondo_loop = musica_fondo_loop[:duracion_podcast]
25
 
26
  mezcla = musica_fondo_loop.overlay(podcast_audio)
27
- mezcla = mezcla + tag_outro # tag.mp3 como outro
28
 
29
  silent_ranges = []
30
  for i in range(0, len(podcast_audio) - 500, 100):
@@ -39,16 +40,30 @@ def mezclar_musica_y_tags(audio_path: str, custom_music_path: str = None) -> str
39
  mezcla.export(output_path, format="mp3")
40
  return output_path
41
 
42
- def synthesize_sync(article_url, text_input, language, skip_llm, agregar_musica, custom_music, custom_prompt):
43
- return asyncio.run(synthesize(article_url, text_input, language, skip_llm, agregar_musica, custom_music, custom_prompt))
44
-
45
- async def synthesize(article_url, text_input, language="en", skip_llm=False, agregar_musica=False, custom_music=None, custom_prompt=None):
46
  if not article_url and not text_input:
47
  return "Error: Ingresa una URL o texto", None
48
 
49
  try:
50
  config = ConversationConfig(custom_prompt_template=custom_prompt)
51
  converter = URLToAudioConverter(config, llm_api_key=os.environ.get("TOGETHER_API_KEY"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  voices = {
54
  "en": ("en-US-AvaMultilingualNeural", "en-US-AndrewMultilingualNeural"),
@@ -56,12 +71,7 @@ async def synthesize(article_url, text_input, language="en", skip_llm=False, agr
56
  }
57
  voice1, voice2 = voices.get(language, voices["en"])
58
 
59
- if skip_llm and text_input:
60
- output_file, conversation = await converter.raw_text_to_audio(text_input, voice1, voice2, custom_music)
61
- elif text_input:
62
- output_file, conversation = await converter.text_to_audio(text_input, voice1, voice2, custom_music)
63
- else:
64
- output_file, conversation = await converter.url_to_audio(article_url, voice1, voice2, custom_music)
65
 
66
  if agregar_musica:
67
  output_file = mezclar_musica_y_tags(output_file, custom_music)
@@ -70,6 +80,12 @@ async def synthesize(article_url, text_input, language="en", skip_llm=False, agr
70
  except Exception as e:
71
  return f"Error: {str(e)}", None
72
 
 
 
 
 
 
 
73
  with gr.Blocks(theme='gstaff/sketch') as demo:
74
  gr.Markdown("# 🎙 Podcast Converter")
75
  with gr.Group():
@@ -77,21 +93,30 @@ with gr.Blocks(theme='gstaff/sketch') as demo:
77
  text_input = gr.Textbox(label="Texto manual", lines=5, placeholder="Pega tu texto aquí...")
78
  language = gr.Dropdown(["en", "es"], label="Idioma", value="en")
79
  skip_llm = gr.Checkbox(label="🔴 Modo libre (sin filtros LLM)", value=False)
80
- agregar_musica = gr.Checkbox(label="🎵 Agregar música de fondo y cortinillas", value=False)
81
- custom_music = gr.File(label="Subir música de fondo (opcional)", file_types=[".mp3"])
82
  custom_prompt = gr.Textbox(
83
  label="Prompt personalizado (opcional)",
84
  placeholder="{text}\nCrea un diálogo de podcast en español entre Anfitrión1 y Anfitrión2. Usa un tono informal y genera al menos 6 intercambios por hablante. Devuelve SOLO un objeto JSON: {\"conversation\": [{\"speaker\": \"Anfitrión1\", \"text\": \"...\"}, {\"speaker\": \"Anfitrión2\", \"text\": \"...\"}]}"
85
  )
86
- btn = gr.Button("Generar Podcast", variant="primary")
 
 
 
 
 
 
87
 
88
  with gr.Row():
89
  conv_display = gr.Textbox(label="Conversación", interactive=False, lines=10)
90
  aud = gr.Audio(label="Audio Generado", interactive=False)
91
 
92
- btn.click(
93
  synthesize_sync,
94
- inputs=[text_url, text_input, language, skip_llm, agregar_musica, custom_music, custom_prompt],
 
 
 
 
 
95
  outputs=[conv_display, aud]
96
  )
97
 
 
1
  import gradio as gr
2
  import os
3
  import asyncio
4
+ import json
5
  from conver import ConversationConfig, URLToAudioConverter
6
  from dotenv import load_dotenv
7
  from pydub import AudioSegment
 
25
  musica_fondo_loop = musica_fondo_loop[:duracion_podcast]
26
 
27
  mezcla = musica_fondo_loop.overlay(podcast_audio)
28
+ mezcla = mezcla + tag_outro
29
 
30
  silent_ranges = []
31
  for i in range(0, len(podcast_audio) - 500, 100):
 
40
  mezcla.export(output_path, format="mp3")
41
  return output_path
42
 
43
+ async def generate_dialogue(article_url, text_input, language, skip_llm, custom_prompt):
 
 
 
44
  if not article_url and not text_input:
45
  return "Error: Ingresa una URL o texto", None
46
 
47
  try:
48
  config = ConversationConfig(custom_prompt_template=custom_prompt)
49
  converter = URLToAudioConverter(config, llm_api_key=os.environ.get("TOGETHER_API_KEY"))
50
+
51
+ if skip_llm and text_input:
52
+ dialogue = {"conversation": [{"speaker": "Anfitrión1", "text": text_input}]}
53
+ elif text_input:
54
+ dialogue = converter.extract_conversation(text_input)
55
+ else:
56
+ dialogue = converter.extract_conversation(await converter.fetch_text(article_url))
57
+
58
+ return json.dumps(dialogue, indent=2, ensure_ascii=False), dialogue
59
+ except Exception as e:
60
+ return f"Error: {str(e)}", None
61
+
62
+ async def generate_audio(dialogue_json, language, agregar_musica, custom_music):
63
+ try:
64
+ dialogue = json.loads(dialogue_json)
65
+ config = ConversationConfig()
66
+ converter = URLToAudioConverter(config, llm_api_key=os.environ.get("TOGETHER_API_KEY"))
67
 
68
  voices = {
69
  "en": ("en-US-AvaMultilingualNeural", "en-US-AndrewMultilingualNeural"),
 
71
  }
72
  voice1, voice2 = voices.get(language, voices["en"])
73
 
74
+ output_file, conversation = await converter._process_to_audio(dialogue, voice1, voice2, custom_music)
 
 
 
 
 
75
 
76
  if agregar_musica:
77
  output_file = mezclar_musica_y_tags(output_file, custom_music)
 
80
  except Exception as e:
81
  return f"Error: {str(e)}", None
82
 
83
+ def synthesize_sync(article_url, text_input, language, skip_llm, custom_prompt):
84
+ return asyncio.run(generate_dialogue(article_url, text_input, language, skip_llm, custom_prompt))
85
+
86
+ def generate_audio_sync(dialogue_json, language, agregar_musica, custom_music):
87
+ return asyncio.run(generate_audio(dialogue_json, language, agregar_musica, custom_music))
88
+
89
  with gr.Blocks(theme='gstaff/sketch') as demo:
90
  gr.Markdown("# 🎙 Podcast Converter")
91
  with gr.Group():
 
93
  text_input = gr.Textbox(label="Texto manual", lines=5, placeholder="Pega tu texto aquí...")
94
  language = gr.Dropdown(["en", "es"], label="Idioma", value="en")
95
  skip_llm = gr.Checkbox(label="🔴 Modo libre (sin filtros LLM)", value=False)
 
 
96
  custom_prompt = gr.Textbox(
97
  label="Prompt personalizado (opcional)",
98
  placeholder="{text}\nCrea un diálogo de podcast en español entre Anfitrión1 y Anfitrión2. Usa un tono informal y genera al menos 6 intercambios por hablante. Devuelve SOLO un objeto JSON: {\"conversation\": [{\"speaker\": \"Anfitrión1\", \"text\": \"...\"}, {\"speaker\": \"Anfitrión2\", \"text\": \"...\"}]}"
99
  )
100
+ btn_dialogue = gr.Button("Generar Diálogo", variant="primary")
101
+
102
+ with gr.Group():
103
+ dialogue_json = gr.Textbox(label="Diálogo JSON (editable)", lines=10, interactive=True)
104
+ agregar_musica = gr.Checkbox(label="🎵 Agregar música de fondo y cortinillas", value=False)
105
+ custom_music = gr.File(label="Subir música de fondo (opcional)", file_types=[".mp3"])
106
+ btn_audio = gr.Button("Generar Audio", variant="primary")
107
 
108
  with gr.Row():
109
  conv_display = gr.Textbox(label="Conversación", interactive=False, lines=10)
110
  aud = gr.Audio(label="Audio Generado", interactive=False)
111
 
112
+ btn_dialogue.click(
113
  synthesize_sync,
114
+ inputs=[text_url, text_input, language, skip_llm, custom_prompt],
115
+ outputs=[dialogue_json, dialogue_json]
116
+ )
117
+ btn_audio.click(
118
+ generate_audio_sync,
119
+ inputs=[dialogue_json, language, agregar_musica, custom_music],
120
  outputs=[conv_display, aud]
121
  )
122