fffiloni commited on
Commit
3aca8ee
β€’
1 Parent(s): c705408

Create gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +130 -0
gradio_app.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import shutil
4
+ import uuid
5
+ import subprocess
6
+ import gradio as gr
7
+ import shutil
8
+ from glob import glob
9
+
10
+ from huggingface_hub import snapshot_download, hf_hub_download
11
+
12
+ # Download models
13
+ os.makedirs("pretrained_weights", exist_ok=True)
14
+
15
+ # List of subdirectories to create inside "checkpoints"
16
+ subfolders = [
17
+ "stable-video-diffusion-img2vid-xt"
18
+ ]
19
+
20
+ # Create each subdirectory
21
+ for subfolder in subfolders:
22
+ os.makedirs(os.path.join("pretrained_weights", subfolder), exist_ok=True)
23
+
24
+ snapshot_download(
25
+ repo_id = "stabilityai/stable-video-diffusion-img2vid",
26
+ local_dir = "./pretrained_weights/stable-video-diffusion-img2vid-xt"
27
+ )
28
+
29
+ snapshot_download(
30
+ repo_id = "Yhmeng1106/anidoc",
31
+ local_dir = "./pretrained_weights"
32
+ )
33
+
34
+ hf_hub_download(
35
+ repo_id = "facebook/cotracker",
36
+ filename = "cotracker2.pth",
37
+ local_dir = "./pretrained_weights"
38
+ )
39
+
40
+ def generate(control_sequence, ref_image):
41
+ control_image = control_sequence # "data_test/sample4_2.mp4"
42
+ ref_image = ref_image # "data_test/sample4.png"
43
+ unique_id = str(uuid.uuid4())
44
+ output_dir = f"results_{unique_id}"
45
+
46
+ try:
47
+ # Run the inference command
48
+ subprocess.run(
49
+ [
50
+ "python", "scripts_infer/anidoc_inference.py",
51
+ "--config",
52
+ "--all_sketch",
53
+ "--matching",
54
+ "--tracking",
55
+ "--control_image", f"{control_image}",
56
+ "--ref_image", f"{ref_image}",
57
+ "--output_dir", f"{output_dir}",
58
+ "--max_point", "10",
59
+ ],
60
+ check=True
61
+ )
62
+
63
+ # Search for the mp4 file in a subfolder of output_dir
64
+ output_video = glob.glob(os.path.join(output_dir, "*", "*.mp4"))
65
+ print(output_video)
66
+
67
+ if output_video:
68
+ output_video_path = output_video[0] # Get the first match
69
+ else:
70
+ output_video_path = None
71
+
72
+ print(output_video_path)
73
+ return output_video_path
74
+
75
+ except subprocess.CalledProcessError as e:
76
+ raise gr.Error(f"Error during inference: {str(e)}")
77
+
78
+ css="""
79
+ div#col-container{
80
+ margin: 0 auto;
81
+ max-width: 982px;
82
+ }
83
+ """
84
+ with gr.Blocks(css=css) as demo:
85
+ with gr.Column(elem_id="col-container"):
86
+ gr.Markdown("# AniDoc: Animation Creation Made Easier")
87
+ gr.Markdown("AniDoc colorizes a sequence of sketches based on a character design reference with high fidelity, even when the sketches significantly differ in pose and scale.")
88
+ gr.HTML("""
89
+ <div style="display:flex;column-gap:4px;">
90
+ <a href="https://github.com/yihao-meng/AniDoc">
91
+ <img src='https://img.shields.io/badge/GitHub-Repo-blue'>
92
+ </a>
93
+ <a href="https://yihao-meng.github.io/AniDoc_demo/">
94
+ <img src='https://img.shields.io/badge/Project-Page-green'>
95
+ </a>
96
+ <a href="https://arxiv.org/pdf/2412.14173">
97
+ <img src='https://img.shields.io/badge/ArXiv-Paper-red'>
98
+ </a>
99
+ <a href="https://huggingface.co/spaces/fffiloni/AniDoc?duplicate=true">
100
+ <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-sm.svg" alt="Duplicate this Space">
101
+ </a>
102
+ <a href="https://huggingface.co/fffiloni">
103
+ <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/follow-me-on-HF-sm-dark.svg" alt="Follow me on HF">
104
+ </a>
105
+ </div>
106
+ """)
107
+ with gr.Row():
108
+ with gr.Column():
109
+ control_sequence = gr.Video(label="Control Sequence")
110
+ ref_image = gr.Image(label="Reference Image", type="filepath")
111
+ submit_btn = gr.Button("Submit")
112
+ with gr.Column():
113
+ video_result = gr.Video(label="Result")
114
+
115
+ gr.Examples(
116
+ examples = [
117
+ ["data_test/sample4_2.mp4", "data_test/sample4.png"]
118
+ ],
119
+ inputs = [control_sequence, ref_image]
120
+ )
121
+
122
+ submit_btn.click(
123
+ fn = generate,
124
+ inputs = [control_sequence, ref_image],
125
+ outputs = [video_result]
126
+ )
127
+
128
+ demo.queue().launch(show_api=False, show_error=True)
129
+
130
+