File size: 6,972 Bytes
5fe16b1
2247844
5fe16b1
 
587b534
5fe16b1
 
 
 
 
 
 
 
 
 
 
2247844
5fe16b1
 
 
 
 
 
 
 
 
 
71a491a
 
 
 
 
 
 
5fe16b1
 
2247844
71a491a
 
2247844
 
 
 
 
 
 
71a491a
00b6284
71a491a
 
2247844
71a491a
2247844
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71a491a
2247844
5fe16b1
 
eb823dc
5fe16b1
71a491a
 
00b6284
 
 
71a491a
eb823dc
 
71a491a
00b6284
5fe16b1
00b6284
71a491a
00b6284
b41ca3b
00b6284
 
 
 
b41ca3b
885ea0a
00b6284
 
5fe16b1
 
b41ca3b
587b534
 
5fe16b1
587b534
71a491a
00b6284
b41ca3b
 
 
 
5fe16b1
587b534
00b6284
 
 
 
587b534
64bc311
587b534
2247844
00b6284
64bc311
00b6284
 
64bc311
00b6284
 
2247844
64bc311
 
 
 
 
 
 
 
 
00b6284
 
bf07215
64bc311
 
 
 
 
 
 
 
 
 
 
 
2247844
64bc311
 
 
00b6284
64bc311
00b6284
64bc311
b41ca3b
64bc311
 
 
 
 
 
 
2247844
64bc311
 
 
 
00b6284
64bc311
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
from dataclasses import dataclass
from typing import List, Tuple, Dict
import os
import json
import httpx
from openai import OpenAI
import edge_tts
import tempfile
from pydub import AudioSegment
import base64
from pathlib import Path

@dataclass
class ConversationConfig:
    max_words: int = 3000
    prefix_url: str = "https://r.jina.ai/"
    model_name: str = "meta-llama/Llama-3-8b-chat-hf"  # Modelo serverless

class URLToAudioConverter:
    def __init__(self, config: ConversationConfig, llm_api_key: str):
        self.config = config
        self.llm_client = OpenAI(api_key=llm_api_key, base_url="https://api.together.xyz/v1")
        self.llm_out = None

    def fetch_text(self, url: str) -> str:
        if not url:
            raise ValueError("URL cannot be empty")
        full_url = f"{self.config.prefix_url}{url}"
        try:
            response = httpx.get(full_url, timeout=60.0)
            response.raise_for_status()
            return response.text
        except httpx.HTTPError as e:
            raise RuntimeError(f"Failed to fetch URL: {e}")

    def extract_conversation(self, text: str) -> Dict:
        """Versión que parsea 'Host1: texto' -> JSON"""
        if not text:
            raise ValueError("Input text cannot be empty")
        
        prompt = (
            f"{text}\nCreate a podcast dialogue between Host1 and Host2. "
            "Use EXACTLY this format:\n\n"
            "Host1: [message]\nHost2: [reply]\nHost1: [response]..."
        )
        
        try:
            response = self.llm_client.chat.completions.create(
                messages=[{"role": "user", "content": prompt}],
                model=self.config.model_name,
                temperature=0.7
            )
            raw_dialogue = response.choices[0].message.content
            
            # Parseo seguro del formato
            conversation = {"conversation": []}
            for line in raw_dialogue.split('\n'):
                if ':' in line:
                    speaker, _, content = line.partition(':')
                    if speaker.strip() in ("Host1", "Host2"):
                        conversation["conversation"].append({
                            "speaker": speaker.strip(),
                            "text": content.strip()
                        })
            
            return conversation
            
        except Exception as e:
            raise RuntimeError(f"Failed to parse dialogue: {str(e)}")

    async def text_to_speech(self, conversation_json: Dict, voice_1: str, voice_2: str) -> Tuple[List[str], str]:
        output_dir = Path(self._create_output_directory())
        filenames = []
        try:
            for i, turn in enumerate(conversation_json["conversation"]):
                filename = output_dir / f"segment_{i}.mp3"
                voice = voice_1 if turn["speaker"] == "Host1" else voice_2
                tmp_path = await self._generate_audio(turn["text"], voice)
                os.rename(tmp_path, filename)
                filenames.append(str(filename))
            return filenames, str(output_dir)
        except Exception as e:
            raise RuntimeError(f"Text-to-speech failed: {e}")

    async def _generate_audio(self, text: str, voice: str) -> str:
        if not text.strip():
            raise ValueError("Text cannot be empty")
        communicate = edge_tts.Communicate(
            text,
            voice.split(" - ")[0],
            rate="+0%",
            pitch="+0Hz"
        )
        with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
            await communicate.save(tmp_file.name)
            return tmp_file.name

    def _create_output_directory(self) -> str:
        folder_name = base64.urlsafe_b64encode(os.urandom(8)).decode("utf-8")
        os.makedirs(folder_name, exist_ok=True)
        return folder_name

    def combine_audio_files(self, filenames: List[str]) -> AudioSegment:
        if not filenames:
            raise ValueError("No audio files provided")
        combined = AudioSegment.empty()
        for filename in filenames:
            combined += AudioSegment.from_file(filename, format="mp3")
        return combined

    def add_background_music_and_tags(
        self,
        speech_audio: AudioSegment,
        music_path: str,
        tags_paths: List[str]
    ) -> AudioSegment:
        music = AudioSegment.from_file(music_path).fade_out(2000) - 25
        if len(music) < len(speech_audio):
            music = music * ((len(speech_audio) // len(music) + 1)
        music = music[:len(speech_audio)]
        mixed = speech_audio.overlay(music)
        
        tag_intro = AudioSegment.from_file(tags_paths[0]) - 10
        tag_trans = AudioSegment.from_file(tags_paths[1]) - 10
        final_audio = tag_intro + mixed
        
        # Insertar tags en silencios >500ms
        silent_ranges = []
        for i in range(0, len(speech_audio) - 500, 100):
            chunk = speech_audio[i:i+500]
            if chunk.dBFS < -40:
                silent_ranges.append((i, i + 500))
        
        for start, end in reversed(silent_ranges):
            if (end - start) >= len(tag_trans):
                final_audio = final_audio.overlay(tag_trans, position=start + 50)
        
        return final_audio

    async def url_to_audio(self, url: str, voice_1: str, voice_2: str) -> Tuple[str, str]:
        text = self.fetch_text(url)
        if len(words := text.split()) > self.config.max_words:
            text = " ".join(words[:self.config.max_words])
        conversation = self.extract_conversation(text)
        return await self._process_to_audio(conversation, voice_1, voice_2)

    async def text_to_audio(self, text: str, voice_1: str, voice_2: str) -> Tuple[str, str]:
        conversation = self.extract_conversation(text)
        return await self._process_to_audio(conversation, voice_1, voice_2)

    async def raw_text_to_audio(self, text: str, voice_1: str, voice_2: str) -> Tuple[str, str]:
        conversation = {"conversation": [{"speaker": "Host1", "text": text}]}
        return await self._process_to_audio(conversation, voice_1, voice_2)

    async def _process_to_audio(
        self,
        conversation: Dict,
        voice_1: str,
        voice_2: str
    ) -> Tuple[str, str]:
        audio_files, folder_name = await self.text_to_speech(conversation, voice_1, voice_2)
        combined = self.combine_audio_files(audio_files)
        final_audio = self.add_background_music_and_tags(
            combined,
            "musica.mp3",
            ["tag.mp3", "tag2.mp3"]
        )
        output_path = os.path.join(folder_name, "podcast_final.mp3")
        final_audio.export(output_path, format="mp3")
        
        for f in audio_files:
            os.remove(f)
            
        text_output = "\n".join(
            f"{turn['speaker']}: {turn['text']}" 
            for turn in conversation["conversation"]
        )
        return output_path, text_output