LucidDreamer / genshin_impact_couple_app.py
svjack's picture
Create genshin_impact_couple_app.py
44d3d88 verified
raw
history blame
11.5 kB
import os
import glob
import time
import pathlib
import shlex
import subprocess
import gradio as gr
from huggingface_hub import snapshot_download
from datasets import load_from_disk
import random
# 下载数据集
repo_id = "svjack/3DitScene_cache"
folder_path = "Genshin-Impact-Couple-with-Tags-IID-Gender-Only-Two-Joy-Caption_Head10"
local_dir = snapshot_download(
repo_id=repo_id,
repo_type="dataset",
allow_patterns=f"{folder_path}/*",
cache_dir=os.getcwd(),
local_dir="."
)
# 加载数据集
dataset = load_from_disk(folder_path)
# 提取 image 和 joy-caption
examples = []
for example in dataset:
examples.append({
'image': example['image'],
'joy-caption': example['joy-caption']
})
# 定义可用的 gen_camerapath 和 render_camerapath 选项
gen_camerapath_options = ['lookaround', 'lookdown', 'rotate360']
render_camerapath_options = ['back_and_forth', 'llff', 'headbanging']
# 为每个例子随机分配 gen_camerapath 和 render_camerapath
examples_with_combinations = []
for example in examples:
gen_camerapath = random.choice(gen_camerapath_options)
render_camerapath = random.choice(render_camerapath_options)
examples_with_combinations.append({
'image': example['image'],
'joy-caption': example['joy-caption'],
'gen_camerapath': gen_camerapath,
'render_camerapath': render_camerapath
})
root = pathlib.Path(__file__).parent
example_root = os.path.join(root, 'examples')
ckpt_root = os.path.join(root, 'stablediffusion')
d = example_root
if len(glob.glob(os.path.join(d, '*.ply'))) < 8:
snapshot_download(repo_id="ironjr/LucidDreamerDemo", repo_type="model", local_dir=d)
d = os.path.join(ckpt_root, 'Blazing Drive V11m')
if not os.path.exists(d):
snapshot_download(repo_id="ironjr/BlazingDriveV11m", repo_type="model", local_dir=d)
d = os.path.join(ckpt_root, 'RealCartoon-Pixar V5')
if not os.path.exists(d):
snapshot_download(repo_id="ironjr/RealCartoon-PixarV5", repo_type="model", local_dir=d)
d = os.path.join(ckpt_root, 'Realistic Vision V5.1')
if not os.path.exists(d):
snapshot_download(repo_id="ironjr/RealisticVisionV5-1", repo_type="model", local_dir=d)
d = os.path.join(ckpt_root, 'SD1-5')
if not os.path.exists(d):
snapshot_download(repo_id="runwayml/stable-diffusion-inpainting", repo_type="model", local_dir=d)
try:
import simple_knn
except ModuleNotFoundError:
subprocess.run(shlex.split(f'pip install {root}/dist/simple_knn-0.0.0-cp39-cp39-linux_x86_64.whl'))
try:
import depth_diff_gaussian_rasterization_min
except ModuleNotFoundError:
subprocess.run(shlex.split(f'pip install {root}/dist/depth_diff_gaussian_rasterization_min-0.0.0-cp39-cp39-linux_x86_64.whl'))
from luciddreamer import LucidDreamer
css = """
#run-button {
background: coral;
color: white;
}
"""
save_dir = "local_save"
os.makedirs(save_dir, exist_ok=True)
ld = LucidDreamer(save_dir=save_dir)
with gr.Blocks(css=css) as demo:
gr.HTML(
"""
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
<div>
<h1>LucidDreamer: Domain-free Generation of 3D Gaussian Splatting Scenes - Genshin Impact Couple</h1>
<h5 style="margin: 0;">If you like our project, please visit our Github, too! ✨✨✨ More features are waiting!</h5>
</br>
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
<a href='https://arxiv.org/abs/2311.13384'>
<img src="https://img.shields.io/badge/Arxiv-2311.13384-red">
</a>
&nbsp;
<a href='https://luciddreamer-cvlab.github.io'>
<img src='https://img.shields.io/badge/Project-LucidDreamer-green' alt='Project Page'>
</a>
&nbsp;
<a href='https://github.com/luciddreamer-cvlab/LucidDreamer'>
<img src='https://img.shields.io/github/stars/luciddreamer-cvlab/LucidDreamer?label=Github&color=blue'>
</a>
&nbsp;
<a href='https://twitter.com/_ironjr_'>
<img src='https://img.shields.io/twitter/url?label=_ironjr_&url=https%3A%2F%2Ftwitter.com%2F_ironjr_'>
</a>
</div>
<div style="display: flex; justify-content: center; align-items: center; text-align: left; border: 1px solid lightgray; padding: 10px; margin-top: 20px; margin-left: 100px; margin-right: 100px; border-radius: 10px">
<p style="align-items: center;">
<a style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/ironjr/LucidDreamer-mini"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg"></a>
<a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/ironjr/LucidDreamer?duplicate=true"><img style="margin-top:0;margin-bottom:0" src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&amp;style=flat&amp;logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&amp;logoWidth=14" alt="Duplicate Space"></a>
</p>
<p style="margin-left: 15px">
<b>Attention</b>: In case of high traffic, you can alternatively use our backup server (first button: without custom SD support) or clone this repository to run on your own machine (second button). We gratefully welcome any type of your contributions!
</p>
</div>
</div>
</div>
"""
)
with gr.Row():
result_gallery = gr.Video(label='RGB Video', show_label=True, autoplay=True, format='mp4')
result_depth = gr.Video(label='Depth Video', show_label=True, autoplay=True, format='mp4')
result_ply_file = gr.File(label='Gaussian splatting PLY', show_label=True)
with gr.Row():
input_image = gr.Image(
label='Image prompt',
sources='upload',
type='pil',
)
with gr.Column():
model_name = gr.Radio(
label='SD checkpoint',
choices=['SD1.5 (default)', 'Blazing Drive V11m', 'Realistic Vision V5.1', 'RealCartoon-Pixar V5',],
value='SD1.5 (default)'
)
prompt = gr.Textbox(
label='Text prompt',
value='A cozy livingroom',
)
n_prompt = gr.Textbox(
label='Negative prompt',
value='photo frame, frame, boarder, simple color, inconsistent, humans, people',
)
gen_camerapath = gr.Radio(
label='Camera trajectory for generation (STEP 1)',
choices=['lookaround', 'lookdown', 'rotate360'],
value='lookaround',
)
with gr.Row():
seed = gr.Slider(
label='Seed',
minimum=1,
maximum=2147483647,
step=1,
randomize=True,
)
diff_steps = gr.Slider(
label='SD inpainting steps',
minimum=1,
maximum=50,
step=1,
value=30,
)
render_camerapath = gr.Radio(
label='Camera trajectory for rendering (STEP 2)',
choices=['back_and_forth', 'llff', 'headbanging'],
value='llff',
)
with gr.Column():
run_button = gr.Button(value='Run! (it may take a while)', elem_id='run-button')
gr.HTML(
"""
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
<div>
<h3>...or you can run in two steps</h3>
<h5>(hint: press STEP 2 if you have already baked Gaussians in STEP 1).</h5>
</div>
</div>
"""
)
with gr.Row():
gaussian_button = gr.Button(value='STEP 1: Generate Gaussians')
render_button = gr.Button(value='STEP 2: Render A Video')
gr.HTML(
"""
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
<div>
<h5>...or you can just watch a quick preload we have baked already.</h5>
</div>
</div>
"""
)
example_name = gr.Radio(
label='Quick load',
choices=['DON\'T'],
value='DON\'T',
)
ips = [example_name, input_image, prompt, n_prompt, gen_camerapath, seed, diff_steps, render_camerapath, model_name]
run_button.click(fn=ld.run, inputs=ips[1:] + ips[:1], outputs=[result_ply_file, result_gallery, result_depth])
gaussian_button.click(fn=ld.create, inputs=ips[1:-2] + ips[-1:] + ips[:1], outputs=[result_ply_file])
render_button.click(fn=ld.render_video, inputs=ips[-2:-1] + ips[:1], outputs=[result_gallery, result_depth])
# 替换 examples
gr.Examples(
examples=[
[
'DON\'T',
example['image'],
example['joy-caption'],
'photo frame, frame, boarder, simple color, inconsistent, humans, people',
example['gen_camerapath'], # 随机分配的 gen_camerapath
10, # seed
25, # diff_steps
example['render_camerapath'], # 随机分配的 render_camerapath
'RealCartoon-Pixar V5',
] for example in examples_with_combinations
],
inputs=ips,
outputs=[result_ply_file, result_gallery, result_depth],
fn=ld.run,
cache_examples=False,
)
gr.HTML(
"""
<div style="display: flex; justify-content: center; align-items: center; text-align: left;">
</br>
<div>
<h5 style="margin: 0;">Acknowledgement and Disclaimer</h5>
</br>
<p>We deeply thank <a href="https://twitter.com/br_d">br_d</a>, <a href="https://ko-fi.com/7whitefire7">7whitefire7</a>, and <a href="https://huggingface.co/SG161222">SG161222</a> for their awesome Stable Diffusion models. We also appreciate <a href="https://twitter.com/ai_pictures21">ai_pictures21</a> and <a href="https://twitter.com/recatm">recatm</a> for the beautiful illustrations used in the examples. Please note that the authors of this work do not own the model checkpoints and the illustrations in this demo. LucidDreamer algorithm cannot be used for commercial purpose. Please contact the authors for permission requests.</p>
</div>
</div>
"""
)
if __name__ == '__main__':
demo.launch(share=True)