Spaces:
Running
Running
import gradio as gr | |
import moviepy.editor as mp | |
import numpy as np | |
import librosa | |
import matplotlib.pyplot as plt | |
import io | |
def audio_to_video(audio_file, image_file, effect_type="waveform"): | |
try: | |
# Cargar audio | |
y, sr = librosa.load(audio_file) | |
duration = librosa.get_duration(y=y, sr=sr) | |
# Cargar imagen | |
img_clip = mp.ImageClip(image_file).set_duration(duration) | |
# Generar efecto visual (waveform) | |
if effect_type == "waveform": | |
audio_envelope = np.abs(y) # Envelope del audio | |
audio_envelope = (audio_envelope / np.max(audio_envelope)) * (img_clip.h / 2) | |
def make_frame(t): | |
fig, ax = plt.subplots(figsize=(img_clip.w/100, img_clip.h/100), dpi=100) | |
ax.set_xlim(0, duration) | |
ax.set_ylim(-img_clip.h/2, img_clip.h/2) | |
ax.axis('off') | |
time_index = int(t * sr) | |
wave_slice = audio_envelope[max(0, time_index - sr//10):min(len(audio_envelope), time_index + sr//10)] | |
ax.plot(np.linspace(t-0.1, t+0.1, len(wave_slice)), wave_slice - img_clip.h/4, color='red') | |
ax.plot(np.linspace(t-0.1, t+0.1, len(wave_slice)), -wave_slice + img_clip.h/4, color='red') | |
buf = io.BytesIO() | |
fig.savefig(buf, format='png', bbox_inches='tight', pad_inches=0) | |
plt.close(fig) | |
return np.array(Image.open(buf)) # Convertir a array de imagen | |
effect_clip = mp.VideoClip(make_frame, duration=duration).set_fps(24) | |
final_clip = mp.CompositeVideoClip([img_clip, effect_clip.set_pos("center")]) | |
else: | |
return "Error: Efecto no soportado." | |
# Agregar audio al video | |
final_clip = final_clip.set_audio(mp.AudioFileClip(audio_file)) | |
output_path = "output.mp4" | |
final_clip.write_videofile(output_path, fps=24, codec="libx264", audio_codec="aac") | |
return output_path | |
except Exception as e: | |
return f"Error: {str(e)}" | |
# Interfaz de Gradio | |
iface = gr.Interface( | |
fn=audio_to_video, | |
inputs=[ | |
gr.Audio(type="filepath", label="Subir Audio (WAV/MP3)"), | |
gr.Image(type="filepath", label="Subir Imagen"), | |
gr.Radio(["waveform"], value="waveform", label="Efecto Visual") | |
], | |
outputs=gr.Video(label="Video Generado"), | |
title="Audio + Imagen β Video con Efecto Sincronizado", | |
description="Sube un audio y una imagen para generar un video con efecto visual sincronizado (waveform)." | |
) | |
if __name__ == "__main__": | |
iface.queue().launch() |