feat: Add dynamic image backgrounds and spectrogram opacity
Browse filesThis commit introduces a major feature allowing users to add image backgrounds to the generated spectrogram video. It also enhances the visualizer by making it semi-transparent when a background is present.
feat: Add custom resolution and improve audio quality
This commit introduces two significant user-facing enhancements: the ability to customize the output video dimensions and a major improvement in audio fidelity for transcoded files.
app.py
CHANGED
@@ -9,7 +9,7 @@ import subprocess
|
|
9 |
import matplotlib.font_manager as fm
|
10 |
from typing import Tuple, List, Dict
|
11 |
from mutagen.flac import FLAC
|
12 |
-
from moviepy import CompositeVideoClip, TextClip, VideoClip, AudioFileClip
|
13 |
|
14 |
# --- Font Scanning and Management ---
|
15 |
def get_font_display_name(font_path: str) -> Tuple[str, str]:
|
@@ -128,7 +128,7 @@ SYSTEM_FONTS_MAP, FONT_DISPLAY_NAMES = get_font_data()
|
|
128 |
print(f"Scan complete. Found {len(FONT_DISPLAY_NAMES)} available fonts.")
|
129 |
|
130 |
|
131 |
-
# --- CUE Sheet Parsing Logic
|
132 |
def cue_time_to_seconds(time_str: str) -> float:
|
133 |
try:
|
134 |
minutes, seconds, frames = map(int, time_str.split(':'))
|
@@ -160,7 +160,7 @@ def parse_cue_sheet_manually(cue_data: str) -> List[Dict[str, any]]:
|
|
160 |
return tracks
|
161 |
|
162 |
|
163 |
-
# ---
|
164 |
def increase_video_framerate(input_path: str, output_path: str, target_fps: int = 24):
|
165 |
"""
|
166 |
Uses FFmpeg to increase the video's framerate without re-encoding.
|
@@ -203,7 +203,9 @@ def increase_video_framerate(input_path: str, output_path: str, target_fps: int
|
|
203 |
|
204 |
# --- Main Processing Function ---
|
205 |
def process_audio_to_video(
|
206 |
-
audio_path: str,
|
|
|
|
|
207 |
font_name: str, font_size: int, font_color: str,
|
208 |
font_bg_color: str, font_bg_alpha: float,
|
209 |
pos_h: str, pos_v: str
|
@@ -216,7 +218,8 @@ def process_audio_to_video(
|
|
216 |
temp_fps1_path = f"temp_{timestamp}_fps1.mp4"
|
217 |
final_output_path = f"final_video_{timestamp}_fps24.mp4"
|
218 |
|
219 |
-
WIDTH, HEIGHT
|
|
|
220 |
PLAYBACK_FPS = 24 # Final playback framerate
|
221 |
|
222 |
# --- A robust color parser for hex and rgb() strings ---
|
@@ -252,17 +255,102 @@ def process_audio_to_video(
|
|
252 |
y, sr = librosa.load(audio_path, sr=None, mono=True)
|
253 |
duration = librosa.get_duration(y=y, sr=sr)
|
254 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
255 |
# Spectrogram calculation
|
256 |
N_FFT, HOP_LENGTH, N_BANDS = 2048, 512, 32
|
257 |
MIN_DB, MAX_DB = -80.0, 0.0
|
258 |
S_mel = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=N_FFT, hop_length=HOP_LENGTH, n_mels=N_BANDS, fmax=sr/2)
|
259 |
S_mel_db = librosa.power_to_db(S_mel, ref=np.max)
|
260 |
|
261 |
-
# Frame generation logic
|
262 |
def frame_generator(t):
|
263 |
-
|
264 |
-
|
265 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
time_idx = int((t / duration) * (S_mel_db.shape[1] - 1))
|
267 |
bar_width = WIDTH / N_BANDS
|
268 |
for i in range(N_BANDS):
|
@@ -279,6 +367,13 @@ def process_audio_to_video(
|
|
279 |
return frame
|
280 |
|
281 |
video_clip = VideoClip(frame_function=frame_generator, duration=duration)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
282 |
audio_clip = AudioFileClip(audio_path)
|
283 |
|
284 |
# CUE Sheet title overlay logic
|
@@ -286,10 +381,11 @@ def process_audio_to_video(
|
|
286 |
tracks = []
|
287 |
if audio_path.lower().endswith('.flac'):
|
288 |
try:
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
|
|
293 |
|
294 |
if tracks:
|
295 |
font_path = SYSTEM_FONTS_MAP.get(font_name)
|
@@ -297,26 +393,41 @@ def process_audio_to_video(
|
|
297 |
|
298 |
# Use the robust parser for text colors as well
|
299 |
font_bg_rgb = parse_color_to_rgb(font_bg_color)
|
300 |
-
font_bg_rgba = (*font_bg_rgb, int(font_bg_alpha * 255))
|
301 |
|
302 |
position = (pos_h.lower(), pos_v.lower())
|
303 |
|
304 |
print(f"Using font: {font_name}, Size: {font_size}, Position: {position}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
305 |
|
306 |
for i, track in enumerate(tracks):
|
307 |
-
start_time = track.get('start_time', 0)
|
308 |
-
|
309 |
text_duration = end_time - start_time
|
310 |
if text_duration <= 0: continue
|
311 |
|
312 |
-
|
313 |
-
|
|
|
|
|
|
|
|
|
|
|
314 |
.with_position(position)
|
315 |
.with_duration(text_duration)
|
316 |
.with_start(start_time))
|
317 |
text_clips.append(txt_clip)
|
318 |
|
319 |
-
|
|
|
|
|
|
|
|
|
320 |
|
321 |
# Step 1: Render the slow, 1 FPS intermediate file
|
322 |
print(f"Step 1/2: Rendering base video at {RENDER_FPS} FPS...")
|
@@ -324,18 +435,18 @@ def process_audio_to_video(
|
|
324 |
# Attempt to copy audio stream directly
|
325 |
print("Attempting to copy audio stream directly...")
|
326 |
final_clip.write_videofile(
|
327 |
-
temp_fps1_path, codec="libx264", audio_codec="copy", fps=RENDER_FPS,
|
328 |
logger='bar', threads=os.cpu_count(), preset='ultrafast'
|
329 |
)
|
330 |
print("Audio stream successfully copied!")
|
331 |
except Exception:
|
332 |
# Fallback to AAC encoding if copy fails
|
333 |
-
print("Direct audio copy failed, falling back to AAC encoding...")
|
334 |
final_clip.write_videofile(
|
335 |
-
temp_fps1_path, codec="libx264", audio_codec="aac",
|
336 |
-
|
337 |
-
|
338 |
-
print("AAC audio encoding complete.")
|
339 |
|
340 |
final_clip.close()
|
341 |
|
@@ -361,25 +472,46 @@ with gr.Blocks(title="Spectrogram Video Generator") as iface:
|
|
361 |
with gr.Column(scale=1):
|
362 |
audio_input = gr.Audio(type="filepath", label="Upload Audio File")
|
363 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
with gr.Accordion("Visualizer Options", open=True):
|
|
|
|
|
|
|
365 |
fg_color = gr.ColorPicker(value="#71808c", label="Spectrogram Bar Top Color")
|
366 |
-
bg_color = gr.ColorPicker(value="#2C3E50", label="Background Color")
|
367 |
|
368 |
with gr.Accordion("Text Overlay Options", open=True):
|
369 |
-
|
370 |
-
# --- CORE CORRECTION: Add clarification text ---
|
371 |
gr.Markdown(
|
372 |
"**Note:** These options only take effect if the input audio file has an embedded CUE sheet."
|
373 |
)
|
374 |
-
gr.Markdown("---")
|
375 |
-
# --- CORRECTION END ---
|
376 |
-
|
377 |
gr.Markdown("If your CUE sheet contains non-English characters, please select a compatible font.")
|
378 |
default_font = "Microsoft JhengHei" if "Microsoft JhengHei" in FONT_DISPLAY_NAMES else ("Arial" if "Arial" in FONT_DISPLAY_NAMES else (FONT_DISPLAY_NAMES[0] if FONT_DISPLAY_NAMES else None))
|
379 |
font_name_dd = gr.Dropdown(choices=FONT_DISPLAY_NAMES, value=default_font, label="Font Family")
|
380 |
|
381 |
with gr.Row():
|
382 |
-
font_size_slider = gr.Slider(minimum=12, maximum=
|
383 |
font_color_picker = gr.ColorPicker(value="#FFFFFF", label="Font Color")
|
384 |
|
385 |
with gr.Row():
|
@@ -396,10 +528,13 @@ with gr.Blocks(title="Spectrogram Video Generator") as iface:
|
|
396 |
with gr.Column(scale=2):
|
397 |
video_output = gr.Video(label="Generated Video")
|
398 |
|
|
|
399 |
submit_btn.click(
|
400 |
fn=process_audio_to_video,
|
401 |
inputs=[
|
402 |
-
audio_input,
|
|
|
|
|
403 |
font_name_dd, font_size_slider, font_color_picker,
|
404 |
font_bg_color_picker, font_bg_alpha_slider,
|
405 |
pos_h_radio, pos_v_radio
|
|
|
9 |
import matplotlib.font_manager as fm
|
10 |
from typing import Tuple, List, Dict
|
11 |
from mutagen.flac import FLAC
|
12 |
+
from moviepy import CompositeVideoClip, TextClip, VideoClip, AudioFileClip, ImageClip
|
13 |
|
14 |
# --- Font Scanning and Management ---
|
15 |
def get_font_display_name(font_path: str) -> Tuple[str, str]:
|
|
|
128 |
print(f"Scan complete. Found {len(FONT_DISPLAY_NAMES)} available fonts.")
|
129 |
|
130 |
|
131 |
+
# --- CUE Sheet Parsing Logic ---
|
132 |
def cue_time_to_seconds(time_str: str) -> float:
|
133 |
try:
|
134 |
minutes, seconds, frames = map(int, time_str.split(':'))
|
|
|
160 |
return tracks
|
161 |
|
162 |
|
163 |
+
# --- FFmpeg Framerate Conversion ---
|
164 |
def increase_video_framerate(input_path: str, output_path: str, target_fps: int = 24):
|
165 |
"""
|
166 |
Uses FFmpeg to increase the video's framerate without re-encoding.
|
|
|
203 |
|
204 |
# --- Main Processing Function ---
|
205 |
def process_audio_to_video(
|
206 |
+
audio_path: str, image_paths: List[str],
|
207 |
+
video_width: int, video_height: int,
|
208 |
+
spec_fg_color: str, spec_bg_color: str,
|
209 |
font_name: str, font_size: int, font_color: str,
|
210 |
font_bg_color: str, font_bg_alpha: float,
|
211 |
pos_h: str, pos_v: str
|
|
|
218 |
temp_fps1_path = f"temp_{timestamp}_fps1.mp4"
|
219 |
final_output_path = f"final_video_{timestamp}_fps24.mp4"
|
220 |
|
221 |
+
WIDTH, HEIGHT = int(video_width), int(video_height)
|
222 |
+
RENDER_FPS = 1 # Render at 1 FPS
|
223 |
PLAYBACK_FPS = 24 # Final playback framerate
|
224 |
|
225 |
# --- A robust color parser for hex and rgb() strings ---
|
|
|
255 |
y, sr = librosa.load(audio_path, sr=None, mono=True)
|
256 |
duration = librosa.get_duration(y=y, sr=sr)
|
257 |
|
258 |
+
# --- Image Processing Logic ---
|
259 |
+
image_clips = []
|
260 |
+
# Check if any images were uploaded.
|
261 |
+
if image_paths and len(image_paths) > 0:
|
262 |
+
print(f"Found {len(image_paths)} images to process.")
|
263 |
+
|
264 |
+
# First, try to parse the CUE sheet from the audio file.
|
265 |
+
tracks = []
|
266 |
+
if audio_path.lower().endswith('.flac'):
|
267 |
+
try:
|
268 |
+
audio_meta = FLAC(audio_path)
|
269 |
+
if 'cuesheet' in audio_meta.tags:
|
270 |
+
tracks = parse_cue_sheet_manually(audio_meta.tags['cuesheet'][0])
|
271 |
+
print(f"Successfully parsed {len(tracks)} tracks from CUE sheet.")
|
272 |
+
except Exception as e: print(f"Warning: Could not read or parse CUE sheet: {e}")
|
273 |
+
|
274 |
+
# --- HELPER FUNCTION FOR ROBUST IMAGE CLIPS ---
|
275 |
+
def create_image_layer(img_path, start, dur):
|
276 |
+
"""
|
277 |
+
Creates an image layer that fits entirely within the video frame.
|
278 |
+
It scales the image down to fit and centers it on a transparent background.
|
279 |
+
"""
|
280 |
+
# This function implements a "cover" scaling mode to ensure the image
|
281 |
+
# fills the entire video frame without leaving black bars.
|
282 |
+
try:
|
283 |
+
img_clip_raw = ImageClip(img_path)
|
284 |
+
|
285 |
+
# 1. Calculate scaling factor to "contain" the image (fit inside).
|
286 |
+
# We use min() to find the ratio that requires the most shrinkage,
|
287 |
+
# ensuring the whole image fits without being cropped.
|
288 |
+
scale_factor = min(WIDTH / img_clip_raw.w, HEIGHT / img_clip_raw.h)
|
289 |
+
|
290 |
+
# 2. Resize the image so it fits perfectly within the video dimensions.
|
291 |
+
resized_image_clip = img_clip_raw.resized(scale_factor)
|
292 |
+
|
293 |
+
# 3. Create a composite clip to position the resized image on a
|
294 |
+
# correctly-sized transparent canvas. This is the key to preventing overflow.
|
295 |
+
final_layer = CompositeVideoClip(
|
296 |
+
[resized_image_clip.with_position("center")],
|
297 |
+
size=(WIDTH, HEIGHT)
|
298 |
+
)
|
299 |
+
|
300 |
+
# 4. Set the timing on the final composite layer.
|
301 |
+
return final_layer.with_duration(dur).with_start(start)
|
302 |
+
except Exception as e:
|
303 |
+
print(f"Warning: Failed to process image '{img_path}'. Skipping. Error: {e}")
|
304 |
+
return None
|
305 |
+
# --- END OF HELPER FUNCTION ---
|
306 |
+
|
307 |
+
# Mode 1: If CUE tracks match the number of images, align them.
|
308 |
+
if tracks and len(tracks) == len(image_paths):
|
309 |
+
print("Image count matches track count. Aligning images with tracks.")
|
310 |
+
for i, (track, img_path) in enumerate(zip(tracks, image_paths)):
|
311 |
+
start_time = track.get('start_time', 0)
|
312 |
+
# The end time of a track is the start time of the next, or the total duration for the last track.
|
313 |
+
end_time = tracks[i+1].get('start_time', duration) if i + 1 < len(tracks) else duration
|
314 |
+
img_duration = end_time - start_time
|
315 |
+
if img_duration <= 0: continue
|
316 |
+
|
317 |
+
# Create an ImageClip for the duration of the track.
|
318 |
+
clip = create_image_layer(img_path, start_time, img_duration)
|
319 |
+
if clip:
|
320 |
+
image_clips.append(clip)
|
321 |
+
|
322 |
+
# Mode 2: If no CUE or mismatch, distribute images evenly across the audio duration.
|
323 |
+
else:
|
324 |
+
if tracks: print("Image count does not match track count. Distributing images evenly.")
|
325 |
+
else: print("No CUE sheet found. Distributing images evenly.")
|
326 |
+
|
327 |
+
img_duration = duration / len(image_paths)
|
328 |
+
for i, img_path in enumerate(image_paths):
|
329 |
+
start_time = i * img_duration
|
330 |
+
# Create an ImageClip for a calculated segment of time.
|
331 |
+
clip = create_image_layer(img_path, start_time, img_duration)
|
332 |
+
if clip:
|
333 |
+
image_clips.append(clip)
|
334 |
+
|
335 |
# Spectrogram calculation
|
336 |
N_FFT, HOP_LENGTH, N_BANDS = 2048, 512, 32
|
337 |
MIN_DB, MAX_DB = -80.0, 0.0
|
338 |
S_mel = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=N_FFT, hop_length=HOP_LENGTH, n_mels=N_BANDS, fmax=sr/2)
|
339 |
S_mel_db = librosa.power_to_db(S_mel, ref=np.max)
|
340 |
|
341 |
+
# Frame generation logic for the spectrogram
|
342 |
def frame_generator(t):
|
343 |
+
# If images are used as background, the spectrogram's own background should be transparent.
|
344 |
+
# Otherwise, use the selected background color.
|
345 |
+
# Here, we will use a simple opacity setting on the final clip, so we always generate the frame.
|
346 |
+
frame_bg = bg_rgb if not image_clips else (0,0,0) # Use black if it will be made transparent later
|
347 |
+
frame = np.full((HEIGHT, WIDTH, 3), frame_bg, dtype=np.uint8)
|
348 |
+
|
349 |
+
# Draw the grid lines only if no images are being used.
|
350 |
+
if not image_clips:
|
351 |
+
for i in range(1, 9):
|
352 |
+
y_pos = int(i * (HEIGHT / 9)); frame[y_pos-1:y_pos, :] = grid_rgb
|
353 |
+
|
354 |
time_idx = int((t / duration) * (S_mel_db.shape[1] - 1))
|
355 |
bar_width = WIDTH / N_BANDS
|
356 |
for i in range(N_BANDS):
|
|
|
367 |
return frame
|
368 |
|
369 |
video_clip = VideoClip(frame_function=frame_generator, duration=duration)
|
370 |
+
|
371 |
+
# --- NEW: Set Spectrogram Opacity ---
|
372 |
+
# If image clips were created, make the spectrogram layer 50% transparent.
|
373 |
+
if image_clips:
|
374 |
+
print("Applying 50% opacity to spectrogram layer.")
|
375 |
+
video_clip = video_clip.with_opacity(0.5)
|
376 |
+
|
377 |
audio_clip = AudioFileClip(audio_path)
|
378 |
|
379 |
# CUE Sheet title overlay logic
|
|
|
381 |
tracks = []
|
382 |
if audio_path.lower().endswith('.flac'):
|
383 |
try:
|
384 |
+
audio_meta = FLAC(audio_path)
|
385 |
+
if 'cuesheet' in audio_meta.tags:
|
386 |
+
tracks = parse_cue_sheet_manually(audio_meta.tags['cuesheet'][0])
|
387 |
+
except Exception:
|
388 |
+
pass # Already handled above
|
389 |
|
390 |
if tracks:
|
391 |
font_path = SYSTEM_FONTS_MAP.get(font_name)
|
|
|
393 |
|
394 |
# Use the robust parser for text colors as well
|
395 |
font_bg_rgb = parse_color_to_rgb(font_bg_color)
|
|
|
396 |
|
397 |
position = (pos_h.lower(), pos_v.lower())
|
398 |
|
399 |
print(f"Using font: {font_name}, Size: {font_size}, Position: {position}")
|
400 |
+
|
401 |
+
# Create the RGBA tuple for the background color.
|
402 |
+
# The alpha value is converted from a 0.0-1.0 float to a 0-255 integer.
|
403 |
+
bg_color_tuple = (font_bg_rgb[0], font_bg_rgb[1], font_bg_rgb[2], int(font_bg_alpha * 255))
|
404 |
+
|
405 |
+
# 1. Define a maximum width for the caption. 90% of the video width is a good choice.
|
406 |
+
caption_width = int(WIDTH * 0.9)
|
407 |
|
408 |
for i, track in enumerate(tracks):
|
409 |
+
start_time, title = track.get('start_time', 0), track.get('title', 'Unknown Track')
|
410 |
+
end_time = tracks[i+1].get('start_time', duration) if i + 1 < len(tracks) else duration
|
411 |
text_duration = end_time - start_time
|
412 |
if text_duration <= 0: continue
|
413 |
|
414 |
+
txt_clip = (TextClip(text=f"{i+1}. {title}",
|
415 |
+
font_size=font_size,
|
416 |
+
color=font_color,
|
417 |
+
font=font_path,
|
418 |
+
bg_color=bg_color_tuple,
|
419 |
+
method='caption', # <-- Set method to caption
|
420 |
+
size=(caption_width, None)) # <-- Provide size for wrapping
|
421 |
.with_position(position)
|
422 |
.with_duration(text_duration)
|
423 |
.with_start(start_time))
|
424 |
text_clips.append(txt_clip)
|
425 |
|
426 |
+
# --- Clip Composition ---
|
427 |
+
# The final composition order is important: images at the bottom, then spectrogram, then text.
|
428 |
+
# The base layer is now the list of image clips.
|
429 |
+
final_layers = image_clips + [video_clip] + text_clips
|
430 |
+
final_clip = CompositeVideoClip(final_layers, size=(WIDTH, HEIGHT)).with_audio(audio_clip)
|
431 |
|
432 |
# Step 1: Render the slow, 1 FPS intermediate file
|
433 |
print(f"Step 1/2: Rendering base video at {RENDER_FPS} FPS...")
|
|
|
435 |
# Attempt to copy audio stream directly
|
436 |
print("Attempting to copy audio stream directly...")
|
437 |
final_clip.write_videofile(
|
438 |
+
temp_fps1_path, codec="libx264", audio_codec="copy", fps=RENDER_FPS,
|
439 |
logger='bar', threads=os.cpu_count(), preset='ultrafast'
|
440 |
)
|
441 |
print("Audio stream successfully copied!")
|
442 |
except Exception:
|
443 |
# Fallback to AAC encoding if copy fails
|
444 |
+
print("Direct audio copy failed, falling back to high-quality AAC encoding...")
|
445 |
final_clip.write_videofile(
|
446 |
+
temp_fps1_path, codec="libx264", audio_codec="aac",
|
447 |
+
audio_bitrate="320k", fps=RENDER_FPS,
|
448 |
+
logger='bar', threads=os.cpu_count(), preset='ultrafast')
|
449 |
+
print("High-quality AAC audio encoding complete.")
|
450 |
|
451 |
final_clip.close()
|
452 |
|
|
|
472 |
with gr.Column(scale=1):
|
473 |
audio_input = gr.Audio(type="filepath", label="Upload Audio File")
|
474 |
|
475 |
+
# --- Image Upload Component ---
|
476 |
+
gr.Markdown(
|
477 |
+
"""
|
478 |
+
### Background Image Options (Optional)
|
479 |
+
|
480 |
+
Upload one or more images to create a dynamic background for the video. The display behavior changes based on your audio file and the number of images provided.
|
481 |
+
|
482 |
+
* **Mode 1: CUE Sheet Synchronization**
|
483 |
+
If your audio file contains an embedded CUE sheet AND the number of images you upload **exactly matches** the number of tracks, the images will be synchronized with the tracks. The first image will appear during the first track, the second during the second, and so on.
|
484 |
+
|
485 |
+
* **Mode 2: Even Time Distribution**
|
486 |
+
In all other cases (e.g., the audio has no CUE sheet, or the number of images and tracks do not match), the images will be displayed sequentially. The total duration of the video will be divided equally among all uploaded images.
|
487 |
+
|
488 |
+
**Note:** When any image is used as a background, the spectrogram visualizer will automatically become **semi-transparent** to ensure the background is clearly visible.
|
489 |
+
"""
|
490 |
+
)
|
491 |
+
image_uploads = gr.File(
|
492 |
+
label="Upload Background Images",
|
493 |
+
file_count="multiple", # Allow multiple files
|
494 |
+
file_types=["image"] # Accept only image formats
|
495 |
+
)
|
496 |
+
|
497 |
with gr.Accordion("Visualizer Options", open=True):
|
498 |
+
with gr.Row():
|
499 |
+
width_input = gr.Number(value=1920, label="Video Width (px)", precision=0)
|
500 |
+
height_input = gr.Number(value=1080, label="Video Height (px)", precision=0)
|
501 |
fg_color = gr.ColorPicker(value="#71808c", label="Spectrogram Bar Top Color")
|
502 |
+
bg_color = gr.ColorPicker(value="#2C3E50", label="Background Color (if no images)")
|
503 |
|
504 |
with gr.Accordion("Text Overlay Options", open=True):
|
|
|
|
|
505 |
gr.Markdown(
|
506 |
"**Note:** These options only take effect if the input audio file has an embedded CUE sheet."
|
507 |
)
|
508 |
+
gr.Markdown("---")
|
|
|
|
|
509 |
gr.Markdown("If your CUE sheet contains non-English characters, please select a compatible font.")
|
510 |
default_font = "Microsoft JhengHei" if "Microsoft JhengHei" in FONT_DISPLAY_NAMES else ("Arial" if "Arial" in FONT_DISPLAY_NAMES else (FONT_DISPLAY_NAMES[0] if FONT_DISPLAY_NAMES else None))
|
511 |
font_name_dd = gr.Dropdown(choices=FONT_DISPLAY_NAMES, value=default_font, label="Font Family")
|
512 |
|
513 |
with gr.Row():
|
514 |
+
font_size_slider = gr.Slider(minimum=12, maximum=256, value=80, step=1, label="Font Size")
|
515 |
font_color_picker = gr.ColorPicker(value="#FFFFFF", label="Font Color")
|
516 |
|
517 |
with gr.Row():
|
|
|
528 |
with gr.Column(scale=2):
|
529 |
video_output = gr.Video(label="Generated Video")
|
530 |
|
531 |
+
# --- Add image_uploads to the inputs list ---
|
532 |
submit_btn.click(
|
533 |
fn=process_audio_to_video,
|
534 |
inputs=[
|
535 |
+
audio_input, image_uploads,
|
536 |
+
width_input, height_input,
|
537 |
+
fg_color, bg_color,
|
538 |
font_name_dd, font_size_slider, font_color_picker,
|
539 |
font_bg_color_picker, font_bg_alpha_slider,
|
540 |
pos_h_radio, pos_v_radio
|