DimensionX / app.py
fffiloni's picture
complete memory management
ef5add3 verified
raw
history blame
7.24 kB
import gradio as gr
import os
import torch
import gc
from diffusers import AutoencoderKLCogVideoX, CogVideoXImageToVideoPipeline, CogVideoXTransformer3DModel
from diffusers.utils import export_to_video, load_image
from transformers import T5EncoderModel, T5Tokenizer
from datetime import datetime
import random
from huggingface_hub import hf_hub_download
# Ensure 'checkpoint' directory exists
os.makedirs("checkpoints", exist_ok=True)
hf_hub_download(
repo_id="wenqsun/DimensionX",
filename="orbit_left_lora_weights.safetensors",
local_dir="checkpoints"
)
hf_hub_download(
repo_id="wenqsun/DimensionX",
filename="orbit_up_lora_weights.safetensors",
local_dir="checkpoints"
)
model_id = "THUDM/CogVideoX-5b-I2V"
transformer = CogVideoXTransformer3DModel.from_pretrained(model_id, subfolder="transformer", torch_dtype=torch.float16)
text_encoder = T5EncoderModel.from_pretrained(model_id, subfolder="text_encoder", torch_dtype=torch.float16)
vae = AutoencoderKLCogVideoX.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float16)
tokenizer = T5Tokenizer.from_pretrained(model_id, subfolder="tokenizer")
pipe = CogVideoXImageToVideoPipeline.from_pretrained(model_id, tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, torch_dtype=torch.float16)
def find_and_move_object_to_cpu():
for obj in gc.get_objects():
try:
# Check if the object is a PyTorch model
if isinstance(obj, torch.nn.Module):
# Check if any parameter of the model is on CUDA
if any(param.is_cuda for param in obj.parameters()):
print(f"Found PyTorch model on CUDA: {type(obj).__name__}")
# Move the model to CPU
obj.to('cpu')
print(f"Moved {type(obj).__name__} to CPU.")
# Optionally check if buffers are on CUDA
if any(buf.is_cuda for buf in obj.buffers()):
print(f"Found buffer on CUDA in {type(obj).__name__}")
obj.to('cpu')
print(f"Moved buffers of {type(obj).__name__} to CPU.")
except Exception as e:
# Handle any exceptions if obj is not a torch model
pass
def clear_gpu():
"""Clear GPU memory by removing tensors, freeing cache, and moving data to CPU."""
# List memory usage before clearing
print(f"Memory allocated before clearing: {torch.cuda.memory_allocated() / (1024 ** 2)} MB")
print(f"Memory reserved before clearing: {torch.cuda.memory_reserved() / (1024 ** 2)} MB")
# Move any bound tensors back to CPU if needed
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.synchronize() # Ensure that all operations are completed
print("GPU memory cleared.")
print(f"Memory allocated after clearing: {torch.cuda.memory_allocated() / (1024 ** 2)} MB")
print(f"Memory reserved after clearing: {torch.cuda.memory_reserved() / (1024 ** 2)} MB")
def infer(image_path, prompt, orbit_type, progress=gr.Progress(track_tqdm=True)):
lora_path = "checkpoints/"
adapter_name = None
if orbit_type == "Left":
weight_name = "orbit_left_lora_weights.safetensors"
elif orbit_type == "Up":
weight_name = "orbit_up_lora_weights.safetensors"
lora_rank = 256
# Generate a timestamp for adapter_name
adapter_timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
pipe.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=f"adapter_name_{adapter_timestamp}")
pipe.fuse_lora(lora_scale=1 / lora_rank)
pipe.to("cuda")
prompt = f"{prompt}. High quality, ultrarealistic detail and breath-taking movie-like camera shot."
image = load_image(image_path)
seed = random.randint(0, 2**8 - 1)
video = pipe(
image,
prompt,
num_inference_steps=50, # NOT Changed
guidance_scale=7.0, # NOT Changed
use_dynamic_cfg=True,
generator=torch.Generator(device="cpu").manual_seed(seed)
)
find_and_move_object_to_cpu()
clear_gpu()
# Generate a timestamp for the output filename
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
export_to_video(video.frames[0], f"output_{timestamp}.mp4", fps=8)
return f"output_{timestamp}.mp4"
with gr.Blocks() as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("# DimensionX")
gr.Markdown("### Create Any 3D and 4D Scenes from a Single Image with Controllable Video Diffusion")
gr.HTML("""
<div style="display:flex;column-gap:4px;">
<a href="https://github.com/wenqsun/DimensionX">
<img src='https://img.shields.io/badge/GitHub-Repo-blue'>
</a>
<a href="https://chenshuo20.github.io/DimensionX/">
<img src='https://img.shields.io/badge/Project-Page-green'>
</a>
<a href="https://arxiv.org/abs/2411.04928">
<img src='https://img.shields.io/badge/ArXiv-Paper-red'>
</a>
<a href="https://huggingface.co/spaces/fffiloni/DimensionX?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-sm.svg" alt="Duplicate this Space">
</a>
<a href="https://huggingface.co/fffiloni">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/follow-me-on-HF-sm-dark.svg" alt="Follow me on HF">
</a>
</div>
""")
with gr.Row():
with gr.Column():
image_in = gr.Image(label="Image Input", type="filepath")
prompt = gr.Textbox(label="Prompt")
orbit_type = gr.Radio(label="Orbit type", choices=["Left", "Up"], value="Left", interactive=True)
submit_btn = gr.Button("Submit")
with gr.Column():
video_out = gr.Video(label="Video output")
examples = gr.Examples(
examples = [
[
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg",
"An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in the background.",
"Left",
"./examples/output_astronaut_left.mp4"
],
[
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg",
"An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in the background.",
"Up",
"./examples/output_astronaut_up.mp4"
]
],
inputs=[image_in, prompt, orbit_type, video_out]
)
submit_btn.click(
fn=infer,
inputs=[image_in, prompt, orbit_type],
outputs=[video_out]
)
demo.queue().launch(show_error=True, show_api=False)