import os import glob import time import pathlib import shlex import subprocess import gradio as gr from huggingface_hub import snapshot_download from datasets import load_from_disk import random # Fix the random seed for reproducibility random.seed(42) # You can use any fixed number as the seed # Define common combinations common_combinations = [ ("lookaround", "back_and_forth"), # Most common ("lookdown", "back_and_forth"), # Less common ("rotate360", "headbanging") # Rare ] # 下载数据集 repo_id = "svjack/3DitScene_cache" folder_path = "Genshin-Impact-Couple-with-Tags-IID-Gender-Only-Two-Joy-Caption_Head10" local_dir = snapshot_download( repo_id=repo_id, repo_type="dataset", allow_patterns=f"{folder_path}/*", cache_dir=os.getcwd(), local_dir="." ) # 加载数据集 dataset = load_from_disk(folder_path) # 提取 image 和 joy-caption examples = [] for example in dataset: examples.append({ 'image': example['image'], 'joy-caption': example['joy_caption_surrounding'] }) # 为每个例子随机分配 gen_camerapath 和 render_camerapath examples_with_combinations = [] for example in examples: # Randomly select a combination from common_combinations gen_camerapath, render_camerapath = random.choice(common_combinations) examples_with_combinations.append({ 'image': example['image'], 'joy-caption': example['joy-caption'], 'gen_camerapath': gen_camerapath, 'render_camerapath': render_camerapath }) root = pathlib.Path(__file__).parent example_root = os.path.join(root, 'examples') ckpt_root = os.path.join(root, 'stablediffusion') d = example_root if len(glob.glob(os.path.join(d, '*.ply'))) < 8: snapshot_download(repo_id="ironjr/LucidDreamerDemo", repo_type="model", local_dir=d) d = os.path.join(ckpt_root, 'Blazing Drive V11m') if not os.path.exists(d): snapshot_download(repo_id="ironjr/BlazingDriveV11m", repo_type="model", local_dir=d) d = os.path.join(ckpt_root, 'RealCartoon-Pixar V5') if not os.path.exists(d): snapshot_download(repo_id="ironjr/RealCartoon-PixarV5", repo_type="model", local_dir=d) d = os.path.join(ckpt_root, 'Realistic Vision V5.1') if not os.path.exists(d): snapshot_download(repo_id="ironjr/RealisticVisionV5-1", repo_type="model", local_dir=d) d = os.path.join(ckpt_root, 'SD1-5') if not os.path.exists(d): snapshot_download(repo_id="runwayml/stable-diffusion-inpainting", repo_type="model", local_dir=d) try: import simple_knn except ModuleNotFoundError: subprocess.run(shlex.split(f'pip install {root}/dist/simple_knn-0.0.0-cp39-cp39-linux_x86_64.whl')) try: import depth_diff_gaussian_rasterization_min except ModuleNotFoundError: subprocess.run(shlex.split(f'pip install {root}/dist/depth_diff_gaussian_rasterization_min-0.0.0-cp39-cp39-linux_x86_64.whl')) from luciddreamer import LucidDreamer css = """ #run-button { background: coral; color: white; } """ save_dir = "local_save" os.makedirs(save_dir, exist_ok=True) ld = LucidDreamer(save_dir=save_dir) with gr.Blocks(css=css) as demo: gr.HTML( """

LucidDreamer: Domain-free Generation of 3D Gaussian Splatting Scenes - Genshin Impact Couple

If you like our project, please visit our Github, too! ✨✨✨ More features are waiting!

  Project Page    

Duplicate Space

Attention: In case of high traffic, you can alternatively use our backup server (first button: without custom SD support) or clone this repository to run on your own machine (second button). We gratefully welcome any type of your contributions!

""" ) with gr.Row(): result_gallery = gr.Video(label='RGB Video', show_label=True, autoplay=True, format='mp4') result_depth = gr.Video(label='Depth Video', show_label=True, autoplay=True, format='mp4') result_ply_file = gr.File(label='Gaussian splatting PLY', show_label=True) with gr.Row(): input_image = gr.Image( label='Image prompt', sources='upload', type='pil', ) with gr.Column(): model_name = gr.Radio( label='SD checkpoint', choices=['SD1.5 (default)', 'Blazing Drive V11m', 'Realistic Vision V5.1', 'RealCartoon-Pixar V5',], value='SD1.5 (default)' ) prompt = gr.Textbox( label='Text prompt', value='A cozy livingroom', ) n_prompt = gr.Textbox( label='Negative prompt', value='photo frame, frame, boarder, simple color, inconsistent, humans, people', ) gen_camerapath = gr.Radio( label='Camera trajectory for generation (STEP 1)', choices=['lookaround', 'lookdown', 'rotate360'], value='lookaround', ) with gr.Row(): seed = gr.Slider( label='Seed', minimum=1, maximum=2147483647, step=1, randomize=True, ) diff_steps = gr.Slider( label='SD inpainting steps', minimum=1, maximum=50, step=1, value=30, ) render_camerapath = gr.Radio( label='Camera trajectory for rendering (STEP 2)', choices=['back_and_forth', 'llff', 'headbanging'], value='llff', ) with gr.Column(): run_button = gr.Button(value='Run! (it may take a while)', elem_id='run-button') gr.HTML( """

...or you can run in two steps

(hint: press STEP 2 if you have already baked Gaussians in STEP 1).
""" ) with gr.Row(): gaussian_button = gr.Button(value='STEP 1: Generate Gaussians') render_button = gr.Button(value='STEP 2: Render A Video') gr.HTML( """
...or you can just watch a quick preload we have baked already.
""" ) example_name = gr.Radio( label='Quick load', choices=['DON\'T'], value='DON\'T', ) ips = [example_name, input_image, prompt, n_prompt, gen_camerapath, seed, diff_steps, render_camerapath, model_name] run_button.click(fn=ld.run, inputs=ips[1:] + ips[:1], outputs=[result_ply_file, result_gallery, result_depth]) gaussian_button.click(fn=ld.create, inputs=ips[1:-2] + ips[-1:] + ips[:1], outputs=[result_ply_file]) render_button.click(fn=ld.render_video, inputs=ips[-2:-1] + ips[:1], outputs=[result_gallery, result_depth]) # 替换 examples gr.Examples( examples=[ [ 'DON\'T', example['image'], example['joy-caption'], 'photo frame, frame, boarder, simple color, inconsistent, humans, people', example['gen_camerapath'], # 随机分配的 gen_camerapath 10, # seed 25, # diff_steps example['render_camerapath'], # 随机分配的 render_camerapath 'RealCartoon-Pixar V5', ] for example in examples_with_combinations ], inputs=ips, outputs=[result_ply_file, result_gallery, result_depth], fn=ld.run, cache_examples=False, ) gr.HTML( """

Acknowledgement and Disclaimer

We deeply thank br_d, 7whitefire7, and SG161222 for their awesome Stable Diffusion models. We also appreciate ai_pictures21 and recatm for the beautiful illustrations used in the examples. Please note that the authors of this work do not own the model checkpoints and the illustrations in this demo. LucidDreamer algorithm cannot be used for commercial purpose. Please contact the authors for permission requests.

""" ) if __name__ == '__main__': demo.launch(share=True)