Spaces:
Running
Running
File size: 3,032 Bytes
5fe16b1 2247844 5fe16b1 587b534 5fe16b1 03c080b 5fe16b1 03c080b 5fe16b1 03c080b 5fe16b1 03c080b 5fe16b1 03c080b 5fe16b1 03c080b 5fe16b1 03c080b 587b534 00b6284 587b534 64bc311 587b534 03c080b 00b6284 64bc311 00b6284 64bc311 00b6284 64bc311 00b6284 bf07215 03c080b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
from dataclasses import dataclass
from typing import List, Tuple, Dict
import os
import json
import httpx
from openai import OpenAI
import edge_tts
import tempfile
from pydub import AudioSegment
import base64
from pathlib import Path
import time
from threading import Thread
@dataclass
class ConversationConfig:
max_words: int = 3000
prefix_url: str = "https://r.jina.ai/"
model_name: str = "meta-llama/Llama-3-8b-chat-hf"
class URLToAudioConverter:
def __init__(self, config: ConversationConfig, llm_api_key: str):
self.config = config
self.llm_client = OpenAI(api_key=llm_api_key, base_url="https://api.together.xyz/v1")
self.llm_out = None
self._start_cleaner() # 👈 Inicia el limpiador automático
def _start_cleaner(self, max_age_hours: int = 24):
"""Hilo para eliminar archivos antiguos automáticamente"""
def cleaner():
while True:
now = time.time()
for root, _, files in os.walk("."):
for file in files:
if file.endswith((".mp3", ".wav")): # Formatos a limpiar
filepath = os.path.join(root, file)
try:
file_age = now - os.path.getmtime(filepath)
if file_age > max_age_hours * 3600:
os.remove(filepath)
except:
continue
time.sleep(3600) # Revisa cada hora
Thread(target=cleaner, daemon=True).start()
# ... [TODOS TUS MÉTODOS ORIGINALES SE MANTIENEN IGUAL A PARTIR DE AQUÍ] ...
# fetch_text, extract_conversation, text_to_speech, etc.
# ...
# Método add_background_music_and_tags con paréntesis corregido (sin otros cambios)
def add_background_music_and_tags(
self,
speech_audio: AudioSegment,
music_path: str,
tags_paths: List[str]
) -> AudioSegment:
music = AudioSegment.from_file(music_path).fade_out(2000) - 25
if len(music) < len(speech_audio):
music = music * ((len(speech_audio) // len(music)) + 1 # 👈 Paréntesis corregido
music = music[:len(speech_audio)]
mixed = speech_audio.overlay(music)
tag_intro = AudioSegment.from_file(tags_paths[0]) - 10
tag_trans = AudioSegment.from_file(tags_paths[1]) - 10
final_audio = tag_intro + mixed
silent_ranges = []
for i in range(0, len(speech_audio) - 500, 100):
chunk = speech_audio[i:i+500]
if chunk.dBFS < -40:
silent_ranges.append((i, i + 500))
for start, end in reversed(silent_ranges):
if (end - start) >= len(tag_trans):
final_audio = final_audio.overlay(tag_trans, position=start + 50)
return final_audio
# ... [EL RESTO DE TUS MÉTODOS (url_to_audio, text_to_audio, etc.) SIN CAMBIOS] ... |