ControlVideo / app.py
fffiloni's picture
Update app.py
c93a0cb
raw
history blame
4.79 kB
import gradio as gr
import os
import subprocess
import cv2
from moviepy.editor import VideoFileClip, concatenate_videoclips
import math
from huggingface_hub import snapshot_download
model_ids = [
'runwayml/stable-diffusion-v1-5',
'lllyasviel/sd-controlnet-depth',
'lllyasviel/sd-controlnet-canny',
'lllyasviel/sd-controlnet-openpose',
]
for model_id in model_ids:
model_name = model_id.split('/')[-1]
snapshot_download(model_id, local_dir=f'checkpoints/{model_name}')
def get_frame_count_in_duration(filepath):
video = cv2.VideoCapture(filepath)
fps = video.get(cv2.CAP_PROP_FPS)
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frame_count / fps
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
video.release()
return gr.update(maximum=frame_count)
# Function to split video into chunks
def split_video_into_chunks(video_path, chunk_size):
# Load the video clip
video = VideoFileClip(video_path)
# Calculate the total number of frames
total_frames = int(video.duration * video.fps)
# Calculate the number of chunks needed
num_chunks = math.ceil(total_frames / chunk_size)
# Create a list to store the chunks
chunks = []
# Split the video into chunks
for i in range(num_chunks):
# Calculate the start and end frame for the chunk
start_frame = i * chunk_size
end_frame = min((i + 1) * chunk_size, total_frames)
# Extract the chunk from the video
chunk = video.subclip(start_frame / video.fps, end_frame / video.fps)
# Add the chunk to the list
chunks.append(chunk)
# If the last chunk is smaller than the chunk size
if len(chunks) > 0 and len(chunks[-1]) < chunk_size:
# Adjust the end frame of the last chunk to the total frames
chunks[-1] = video.subclip(chunks[-1].t_start, video.duration)
return chunks
def run_inference(prompt, video_path, condition, video_length):
output_path = 'output/'
os.makedirs(output_path, exist_ok=True)
# Construct the final video path
video_path_output = 'final_video.mp4'
# Check if the file already exists
if os.path.exists(video_path_output):
# Delete the existing file
os.remove(video_path_output)
# Specify the path to your video file
video_path = video_path
# Specify the maximum number of frames per chunk
chunk_size = 12
# Split the video into chunks
video_chunks = split_video_into_chunks(video_path, chunk_size)
# Process each chunk and store the processed chunk filenames
processed_chunk_filenames = []
for i, chunk in enumerate(video_chunks):
# Count the frame number of the video chunk
frame_count = len(chunk)
command = f"python inference.py --prompt '{prompt}' --condition '{condition}' --video_path '{chunk}' --output_path '{output_path}' --video_length {frame_count} --smoother_steps 19 20"
subprocess.run(command, shell=True)
# Construct the video path
video_path_output = os.path.join(output_path, f"{prompt}_{i}.mp4")
processed_chunk_filename = video_path_output
processed_chunk_filenames.append(processed_chunk_filename)
# Load the processed video chunks
processed_chunks = [VideoFileClip(filename) for filename in processed_chunk_filenames]
# Concatenate the processed video chunks into a final video
final_video = concatenate_videoclips(processed_chunks)
# Export the final video to a file
final_video.write_videofile('final_video.mp4')
# Clean up the temporary processed chunk files (optional)
for filename in processed_chunk_filenames:
os.remove(filename)
return "done", 'final_video.mp4'
with gr.Blocks() as demo:
with gr.Column():
prompt = gr.Textbox(label="prompt")
video_path = gr.Video(source="upload", type="filepath")
condition = gr.Textbox(label="Condition", value="depth")
video_length = gr.Slider(label="video length", minimum=1, maximum=15, step=1, value=2)
#seed = gr.Number(label="seed", value=42)
submit_btn = gr.Button("Submit")
video_res = gr.Video(label="result")
status = gr.Textbox(label="result")
video_path.change(fn=get_frame_count_in_duration,
inputs=[video_path],
outputs=[video_length]
)
submit_btn.click(fn=run_inference,
inputs=[prompt,
video_path,
condition,
video_length
],
outputs=[status, video_res])
demo.queue(max_size=12).launch()