image-to-vector-video / app_exp2.py
svjack's picture
Update app_exp2.py
0e65629 verified
import gradio as gr
import vtracer
import os
import pandas as pd
from io import BytesIO
from PIL import Image
import cairosvg
import cv2
import numpy as np
import tempfile
def clean_svg(svg_string):
"""Optional function to clean SVG if needed"""
return svg_string
def rasterize_svg(svg_string, width, height, dpi=128, scale=1):
"""Convert SVG string to PNG image while maintaining aspect ratio"""
try:
svg_raster_bytes = cairosvg.svg2png(
bytestring=svg_string,
background_color='white',
output_width=width,
output_height=height,
dpi=dpi,
scale=scale)
svg_raster = Image.open(BytesIO(svg_raster_bytes))
except:
try:
svg = clean_svg(svg_string)
svg_raster_bytes = cairosvg.svg2png(
bytestring=svg,
background_color='white',
output_width=width,
output_height=height,
dpi=dpi,
scale=scale)
svg_raster = Image.open(BytesIO(svg_raster_bytes))
except:
svg_raster = Image.new('RGB', (width, height), color='white')
return svg_raster
def create_video_from_frames(frame_files, output_path, duration_seconds, width, height):
"""Create video from sequence of frames with specified duration"""
# Calculate frame rate based on desired duration
num_frames = len(frame_files)
fps = max(1, num_frames / duration_seconds) # Ensure at least 1 fps
# Initialize video writer
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
# Read each frame and write to video
for frame_file in frame_files:
# Read image with PIL and convert to OpenCV format
pil_img = Image.open(frame_file)
cv_img = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
video.write(cv_img)
# Add last frame to fill remaining time if needed
if num_frames > 0:
remaining_frames = max(0, int(fps * duration_seconds) - num_frames)
for _ in range(remaining_frames):
video.write(cv_img)
video.release()
def process_svg_to_video(input_svg_path, original_width, original_height, video_duration_seconds=10, chunk_size=30):
"""Process SVG file and create a video with specified duration using exponential row slicing"""
# Read SVG file as a table to maintain exact row slicing logic
df = pd.read_table(input_svg_path, header=None)
df_head = df.head(3)
df_tail = df.tail(1)
df_middle = df.iloc[3:-1, :]
# Use the original image dimensions
width, height = original_width, original_height
# If chunk_size is 0, use automatic calculation (start with 1)
total_rows = len(df_middle)
if chunk_size == 0:
initial_chunk = 1 # Start with 1 path element
else:
initial_chunk = max(1, min(chunk_size, total_rows)) # Ensure it's within valid range
# Create a temporary directory for images
temp_dir = tempfile.mkdtemp()
frame_files = []
# Process with exponential chunk sizes
current_chunk_size = initial_chunk
processed_rows = 0
while processed_rows < total_rows:
# Calculate end index for this chunk
end_idx = min(processed_rows + current_chunk_size, total_rows)
current_chunk = df_middle.iloc[:end_idx]
# Combine with head and tail
combined_df = pd.concat([df_head, current_chunk, df_tail])
svg_content = "\n".join(combined_df[0].astype(str).values.tolist())
# Convert to image using original dimensions
img = rasterize_svg(svg_content, width, height)
img_filename = os.path.join(temp_dir, f"frame_{processed_rows:04d}.png")
img.save(img_filename)
frame_files.append(img_filename)
# Update counters
processed_rows = end_idx
current_chunk_size *= 2 # Double the chunk size for next iteration
# Create output video path
output_video_path = os.path.join(temp_dir, "output_video.mp4")
# Create video from frames
create_video_from_frames(frame_files, output_video_path, video_duration_seconds, width, height)
# Clean up temporary files (except the video)
for file in frame_files:
os.remove(file)
return output_video_path, temp_dir
def convert_to_vector_and_video(
image,
video_duration=10,
chunk_size=30,
colormode="color",
hierarchical="stacked",
mode="spline",
filter_speckle=4,
color_precision=6,
layer_difference=16,
corner_threshold=60,
length_threshold=4.0,
max_iterations=10,
splice_threshold=45,
path_precision=3
):
# Create temporary directory
temp_dir = tempfile.mkdtemp()
input_path = os.path.join(temp_dir, "temp_input.jpg")
output_svg_path = os.path.join(temp_dir, "svg_output.svg")
# Save the input image to a temporary file
image.save(input_path)
# Get original dimensions from the uploaded image
original_width, original_height = image.size
# Convert the image to SVG using VTracer
vtracer.convert_image_to_svg_py(
input_path,
output_svg_path,
colormode=colormode,
hierarchical=hierarchical,
mode=mode,
filter_speckle=int(filter_speckle),
color_precision=int(color_precision),
layer_difference=int(layer_difference),
corner_threshold=int(corner_threshold),
length_threshold=float(length_threshold),
max_iterations=int(max_iterations),
splice_threshold=int(splice_threshold),
path_precision=int(path_precision)
)
# Process SVG to create video using the original dimensions
video_path, video_temp_dir = process_svg_to_video(
output_svg_path,
original_width,
original_height,
video_duration_seconds=video_duration,
chunk_size=chunk_size
)
# Read the SVG output
with open(output_svg_path, "r") as f:
svg_content = f.read()
# Return the SVG preview, SVG file, and video file
return (
gr.HTML(f'<svg viewBox="0 0 {original_width} {original_height}">{svg_content}</svg>'),
output_svg_path,
video_path
)
def handle_color_mode(value):
return value
def clear_inputs():
return (
gr.Image(value=None),
gr.Slider(value=10),
gr.Slider(value=30),
gr.Radio(value="color"),
gr.Radio(value="stacked"),
gr.Radio(value="spline"),
gr.Slider(value=4),
gr.Slider(value=6),
gr.Slider(value=16),
gr.Slider(value=60),
gr.Slider(value=4.0),
gr.Slider(value=10),
gr.Slider(value=45),
gr.Slider(value=3)
)
def update_interactivity_and_visibility(colormode, color_precision_value, layer_difference_value):
is_color_mode = colormode == "color"
return (
gr.update(interactive=is_color_mode),
gr.update(interactive=is_color_mode),
gr.update(visible=is_color_mode)
)
def update_interactivity_and_visibility_for_mode(mode):
is_spline_mode = mode == "spline"
return (
gr.update(interactive=is_spline_mode),
gr.update(interactive=is_spline_mode),
gr.update(interactive=is_spline_mode)
)
css = """
#col-container {
margin: 0 auto;
max-width: 960px;
}
.generate-btn {
background: linear-gradient(90deg, #4B79A1 0%, #283E51 100%) !important;
border: none !important;
color: white !important;
}
.generate-btn:hover {
transform: translateY(-2px);
box-shadow: 0 5px 15px rgba(0,0,0,0.2);
}
"""
examples = [
"examples/玉子.jpg",
"examples/异闻录.jpg",
"examples/化物语封面.jpeg",
"examples/01.jpg",
"examples/02.jpg",
"examples/03.jpg",
]
# Define the Gradio interface
with gr.Blocks(css=css) as app:
with gr.Column(elem_id="col-container"):
gr.HTML("""
<div style="text-align: center;">
<h2>Image to Vector Video Converter ⚡</h2>
<p>Converts raster images to vector graphics and creates progressive rendering videos.</p>
</div>
""")
with gr.Row():
with gr.Column():
image_input = gr.Image(type="pil", label="Upload Image")
video_duration = gr.Slider(1, 60, value=10, step=1, label="Video Duration (seconds)")
chunk_size = gr.Slider(0, 1000, value=300, step=1, label="Chunk Size (0=auto)",
info="Number of SVG path elements to add per frame (0 for automatic calculation)")
with gr.Accordion("Advanced Settings", open=False):
with gr.Accordion("Clustering", open=False):
colormode = gr.Radio([("COLOR","color"),("B/W", "binary")], value="color", label="Color Mode", show_label=False)
filter_speckle = gr.Slider(0, 128, value=4, step=1, label="Filter Speckle", info="Cleaner")
color_precision = gr.Slider(1, 8, value=6, step=1, label="Color Precision", info="More accurate")
layer_difference = gr.Slider(0, 128, value=16, step=1, label="Gradient Step", info="Less layers")
hierarchical = gr.Radio([("STACKED","stacked"), ("CUTOUT","cutout")], value="stacked", label="Hierarchical Mode",show_label=False)
with gr.Accordion("Curve Fitting", open=False):
mode = gr.Radio([("SPLINE","spline"),("POLYGON", "polygon"), ("PIXEL","none")], value="spline", label="Mode", show_label=False)
corner_threshold = gr.Slider(0, 180, value=60, step=1, label="Corner Threshold", info="Smoother")
length_threshold = gr.Slider(3.5, 10, value=4.0, step=0.1, label="Segment Length", info ="More coarse")
splice_threshold = gr.Slider(0, 180, value=45, step=1, label="Splice Threshold", info="Less accurate")
max_iterations = gr.Slider(1, 20, value=10, step=1, label="Max Iterations", visible=False)
path_precision = gr.Slider(1, 10, value=3, step=1, label="Path Precision", visible=False)
output_text = gr.Textbox(label="Selected Mode", visible=False)
with gr.Row():
clear_button = gr.Button("Clear")
convert_button = gr.Button("✨ Convert to Video", variant='primary', elem_classes=["generate-btn"])
with gr.Column():
html = gr.HTML(label="SVG Preview")
svg_output = gr.File(label="Download SVG")
video_output = gr.Video(label="Rendering Video")
gr.Examples(
examples=examples,
fn=convert_to_vector_and_video,
inputs=[image_input],
outputs=[html, svg_output, video_output],
cache_examples=False,
run_on_click=True
)
# Event handlers
colormode.change(handle_color_mode, inputs=colormode, outputs=output_text)
hierarchical.change(handle_color_mode, inputs=hierarchical, outputs=output_text)
mode.change(handle_color_mode, inputs=mode, outputs=output_text)
colormode.change(
update_interactivity_and_visibility,
inputs=[colormode, color_precision, layer_difference],
outputs=[color_precision, layer_difference, hierarchical]
)
mode.change(
update_interactivity_and_visibility_for_mode,
inputs=[mode],
outputs=[corner_threshold, length_threshold, splice_threshold]
)
clear_button.click(
clear_inputs,
outputs=[
image_input,
video_duration,
chunk_size,
colormode,
hierarchical,
mode,
filter_speckle,
color_precision,
layer_difference,
corner_threshold,
length_threshold,
max_iterations,
splice_threshold,
path_precision
]
)
convert_button.click(
convert_to_vector_and_video,
inputs=[
image_input,
video_duration,
chunk_size,
colormode,
hierarchical,
mode,
filter_speckle,
color_precision,
layer_difference,
corner_threshold,
length_threshold,
max_iterations,
splice_threshold,
path_precision
],
outputs=[html, svg_output, video_output]
)
# Launch the app
if __name__ == "__main__":
app.launch(share=True)