Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import moviepy.editor as mp
|
3 |
+
import numpy as np
|
4 |
+
import librosa
|
5 |
+
from PIL import Image, ImageDraw
|
6 |
+
import tempfile
|
7 |
+
import os
|
8 |
+
import logging
|
9 |
+
|
10 |
+
# Configuraci贸n de logging
|
11 |
+
logging.basicConfig(
|
12 |
+
level=logging.INFO,
|
13 |
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
14 |
+
handlers=[logging.StreamHandler()]
|
15 |
+
)
|
16 |
+
logger = logging.getLogger("audio_to_video")
|
17 |
+
|
18 |
+
def generate_video(audio_file, image_file):
|
19 |
+
try:
|
20 |
+
# 1. Cargar audio
|
21 |
+
y, sr = librosa.load(audio_file)
|
22 |
+
duration = librosa.get_duration(y=y, sr=sr)
|
23 |
+
logger.info(f"Audio cargado: {duration:.1f} segundos")
|
24 |
+
|
25 |
+
# 2. Cargar imagen
|
26 |
+
img = Image.open(image_file).convert('RGB')
|
27 |
+
img_w, img_h = img.size
|
28 |
+
logger.info(f"Imagen cargada: {img_w}x{img_h}")
|
29 |
+
|
30 |
+
# 3. Analizar audio
|
31 |
+
audio_envelope = np.abs(y) / np.max(np.abs(y)) # Normalizar
|
32 |
+
audio_envelope_zoom = audio_envelope * 0.2 + 0.9 # Escalar para zoom (0.9x a 1.1x)
|
33 |
+
audio_envelope_wave = audio_envelope * (img_h // 6) # Para el waveform
|
34 |
+
|
35 |
+
# 4. Generar frames con zoom y waveform
|
36 |
+
def make_frame(t):
|
37 |
+
# Calcular posici贸n en el audio
|
38 |
+
time_idx = int(t * sr)
|
39 |
+
|
40 |
+
# --- Efecto de Zoom ---
|
41 |
+
zoom_factor = audio_envelope_zoom[time_idx] if time_idx < len(audio_envelope_zoom) else 1.0
|
42 |
+
new_size = (int(img_w * zoom_factor), int(img_h * zoom_factor))
|
43 |
+
zoomed_img = img.resize(new_size, Image.LANCZOS)
|
44 |
+
|
45 |
+
# Centrar la imagen ampliada
|
46 |
+
x_offset = (new_size[0] - img_w) // 2
|
47 |
+
y_offset = (new_size[1] - img_h) // 2
|
48 |
+
cropped_img = zoomed_img.crop((
|
49 |
+
x_offset,
|
50 |
+
y_offset,
|
51 |
+
x_offset + img_w,
|
52 |
+
y_offset + img_h
|
53 |
+
))
|
54 |
+
|
55 |
+
# --- Dibujar Waveform ---
|
56 |
+
frame = ImageDraw.Draw(cropped_img)
|
57 |
+
|
58 |
+
# Posici贸n vertical del waveform (abajo)
|
59 |
+
start_y = int(img_h * 0.75) # 75% hacia abajo
|
60 |
+
|
61 |
+
# Extraer slice de audio
|
62 |
+
start = max(0, time_idx - sr//10)
|
63 |
+
end = min(len(audio_envelope_wave), time_idx + sr//10)
|
64 |
+
wave_slice = audio_envelope_wave[start:end]
|
65 |
+
|
66 |
+
# Dibujar onda
|
67 |
+
points = []
|
68 |
+
for i, val in enumerate(wave_slice):
|
69 |
+
x = int((i / len(wave_slice)) * img_w)
|
70 |
+
y_pos = start_y - int(val)
|
71 |
+
y_neg = start_y + int(val)
|
72 |
+
points.extend([(x, y_pos), (x, y_neg)])
|
73 |
+
|
74 |
+
if len(points) > 2:
|
75 |
+
frame.polygon(points, fill=(255, 0, 0, 150)) # Rojo semitransparente
|
76 |
+
|
77 |
+
return np.array(cropped_img)
|
78 |
+
|
79 |
+
# 5. Crear video
|
80 |
+
video = mp.VideoClip(make_frame, duration=duration)
|
81 |
+
video.fps = 24
|
82 |
+
video = video.set_audio(mp.AudioFileClip(audio_file))
|
83 |
+
|
84 |
+
# 6. Guardar video
|
85 |
+
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
|
86 |
+
video.write_videofile(
|
87 |
+
tmpfile.name,
|
88 |
+
codec="libx264",
|
89 |
+
audio_codec="aac",
|
90 |
+
fps=24,
|
91 |
+
logger=None
|
92 |
+
)
|
93 |
+
|
94 |
+
logger.info(f"Video guardado: {tmpfile.name}")
|
95 |
+
return tmpfile.name
|
96 |
+
|
97 |
+
except Exception as e:
|
98 |
+
logger.error(f"Error cr铆tico: {str(e)}")
|
99 |
+
return f"Error: {str(e)}"
|
100 |
+
|
101 |
+
# Interfaz Gradio
|
102 |
+
iface = gr.Interface(
|
103 |
+
fn=generate_video,
|
104 |
+
inputs=[
|
105 |
+
gr.Audio(type="filepath", label="Audio (WAV/MP3)"),
|
106 |
+
gr.Image(type="filepath", label="Imagen de Fondo")
|
107 |
+
],
|
108 |
+
outputs=gr.File(label="Descargar Video"),
|
109 |
+
title="Generador de Video Musical",
|
110 |
+
description="Crea videos con zoom autom谩tico y efectos de audio sincronizados"
|
111 |
+
)
|
112 |
+
|
113 |
+
if __name__ == "__main__":
|
114 |
+
iface.queue().launch()
|