File size: 10,737 Bytes
73d6edb
 
 
 
 
 
59c3dd8
ef187eb
 
0cffd40
24478b9
ef187eb
9c1fd31
2b0f02c
24478b9
 
 
 
80eaab0
898752d
 
 
eb7c9df
bb56b33
 
 
 
 
 
 
 
3de5a45
bb56b33
 
 
 
 
 
24478b9
fec3be6
8b1e96d
9c1fd31
ec35e66
4efab5c
 
 
ec35e66
 
4efab5c
 
 
 
 
 
 
 
8b1e96d
a0f72b8
96fa82a
 
 
cddab4e
 
96fa82a
898752d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f75e66
2373506
3f75e66
fb5a8e0
3f75e66
898752d
0c24dd1
 
e33a9af
0c24dd1
 
24478b9
 
 
3f75e66
 
f0de53b
bab7b15
3038c3a
0c24dd1
 
 
 
f0de53b
 
24478b9
96fa82a
f0de53b
4429dd4
3038c3a
0c24dd1
d94350f
 
82ba711
96fa82a
11fa80e
24478b9
 
 
0c24dd1
3f75e66
 
898752d
0affb77
898752d
 
0c24dd1
 
898752d
 
 
 
 
 
 
3f75e66
898752d
3f75e66
0affb77
f8a2258
898752d
a629162
898752d
 
 
 
f8a2258
 
f0de53b
0c24dd1
 
f0de53b
 
898752d
 
 
 
9c1fd31
 
d06d30a
24478b9
 
0cffd40
8b3ca8d
0a1b48f
58b264f
 
0a1b48f
 
 
24478b9
8b3ca8d
0cffd40
8b1e96d
0cffd40
4efab5c
898752d
0c24dd1
 
0affb77
0c24dd1
 
db04c05
fb5a8e0
 
24478b9
898752d
 
a629162
898752d
 
 
 
 
 
 
 
 
 
 
 
 
 
0c24dd1
898752d
0c24dd1
898752d
0c24dd1
 
 
 
 
 
 
 
 
 
0affb77
0c24dd1
 
 
 
 
 
0affb77
0c24dd1
 
 
 
 
898752d
 
 
 
 
db04c05
898752d
db04c05
898752d
 
 
 
 
 
0c24dd1
898752d
24478b9
 
 
 
a9fe87b
24478b9
0c24dd1
24478b9
 
 
 
a9fe87b
 
24478b9
0c24dd1
898752d
0c24dd1
82ba711
 
9a5c550
a0d1812
80eaab0
 
 
3f75e66
2295a73
3f75e66
 
80eaab0
 
3f75e66
0c24dd1
0affb77
9a5c550
8b1e96d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
import subprocess
subprocess.run(
    'pip install numpy==1.26.4',
    shell=True
)

import os
import gradio as gr
import torch
import spaces
import random
from PIL import Image
import numpy as np

from glob import glob
from pathlib import Path
from typing import Optional

#Core functions from https://github.com/modelscope/DiffSynth-Studio
from diffsynth import save_video, ModelManager, SVDVideoPipeline
from diffsynth import SDVideoPipeline, ControlNetConfigUnit, VideoData, save_frames
from diffsynth.extensions.RIFE import RIFESmoother

import requests


def download_model(url, file_path):
  model_file = requests.get(url, allow_redirects=True)
  with open(file_path, "wb") as f:
    f.write(model_file.content)

download_model("https://civitai.com/api/download/models/266360?type=Model&format=SafeTensor&size=pruned&fp=fp16", "models/stable_diffusion/flat2DAnimerge_v45Sharp.safetensors")
download_model("https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15_v2.ckpt", "models/AnimateDiff/mm_sd_v15_v2.ckpt")
download_model("https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart.pth", "models/ControlNet/control_v11p_sd15_lineart.pth")
download_model("https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile.pth", "models/ControlNet/control_v11f1e_sd15_tile.pth")
download_model("https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model.pth", "models/Annotators/sk_model.pth")
download_model("https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model2.pth", "models/Annotators/sk_model2.pth")
download_model("https://civitai.com/api/download/models/25820?type=Model&format=PickleTensor&size=full&fp=fp16", "models/textual_inversion/verybadimagenegative_v1.3.pt")

HF_TOKEN = os.environ.get("HF_TOKEN", None)
# Constants
MAX_SEED = np.iinfo(np.int32).max
CSS = """
footer {
    visibility: hidden;
}
"""

JS = """function () {
  gradioURL = window.location.href
  if (!gradioURL.endsWith('?__theme=dark')) {
    window.location.replace(gradioURL + '?__theme=dark');
  }
}"""


# Ensure model and scheduler are initialized in GPU-enabled function
if torch.cuda.is_available():
    model_manager = ModelManager(
        torch_dtype=torch.float16, 
        device="cuda", 
        model_id_list=["stable-video-diffusion-img2vid-xt", "ExVideo-SVD-128f-v1"],
        downloading_priority=["HuggingFace"])
    pipe = SVDVideoPipeline.from_model_manager(model_manager)
        

    model_manager2 = ModelManager(torch_dtype=torch.float16, device="cuda")
    model_manager2.load_textual_inversions("models/textual_inversion")
    model_manager2.load_models([
            "models/stable_diffusion/flat2DAnimerge_v45Sharp.safetensors",
            "models/AnimateDiff/mm_sd_v15_v2.ckpt",
            "models/ControlNet/control_v11p_sd15_lineart.pth",
            "models/ControlNet/control_v11f1e_sd15_tile.pth",
            "models/RIFE/flownet.pkl"
    ])
    pipe2 = SDVideoPipeline.from_model_manager(
        model_manager2,
        [
            ControlNetConfigUnit(
                processor_id="lineart",
                model_path="models/ControlNet/control_v11p_sd15_lineart.pth",
                scale=0.5
            ),
            ControlNetConfigUnit(
                processor_id="tile",
                model_path="models/ControlNet/control_v11f1e_sd15_tile.pth",
                scale=0.5
            )
        ]
    )
    smoother = RIFESmoother.from_model_manager(model_manager2)


def change_media(image_in, video_in, selected):
    if selected == "ExVideo":
        return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
    elif selected == "Diffutoon":
        return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)

def update_frames(video_in):
    up_video = VideoData(
            video_file=video_in)
    frame_len = len(up_video)
    return gr.update(maximum=frame_len)

@spaces.GPU(duration=120)
def generate(
    video_in, 
    image_in,
    selected,
    prompt: str = "best quality",
    seed: int = -1,
    num_inference_steps: int = 10,
    num_frames: int = 30,
    height: int = 512,
    width: int = 512,
    animatediff_batch_size: int = 32,
    animatediff_stride: int = 16,
    motion_bucket_id: int = 127,
    fps_id: int = 25,
    output_folder: str = "outputs",
    progress=gr.Progress(track_tqdm=True)):
    
    video = ""
    if seed == -1:
        seed = random.randint(0, MAX_SEED)

    torch.manual_seed(seed)
    
    os.makedirs(output_folder, exist_ok=True)
    base_count = len(glob(os.path.join(output_folder, "*.mp4")))
    video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
    
    if selected == "ExVideo" and image_in:
        image = Image.open(image_in)
        video = pipe(
            input_image=image.resize((width, height)), 
            num_frames=num_frames, 
            fps=fps_id, 
            height=height, 
            width=width,
            motion_bucket_id=motion_bucket_id,
            num_inference_steps=num_inference_steps,
            min_cfg_scale=2, 
            max_cfg_scale=2, 
            contrast_enhance_scale=1.2
        )
        model_manager.to("cpu")
    elif selected == "Diffutoon" and video_in:
        up_video = VideoData(
            video_file=video_in,
            height=height, width=width)
        input_video = [up_video[i] for i in range(1, num_frames)]

        video = pipe2(
            prompt=prompt,
            negative_prompt="verybadimagenegative_v1.3",
            cfg_scale=3, 
            clip_skip=2,
            controlnet_frames=input_video,
            num_frames=len(input_video),
            num_inference_steps=num_inference_steps, 
            height=height, 
            width=width,
            animatediff_batch_size=animatediff_batch_size, 
            animatediff_stride=animatediff_stride,
            vram_limit_level=0,
        )
        video = smoother(video)

    
    save_video(video, video_path, fps=fps_id)
    
    return video_path, seed


examples = [
        ['./walking.mp4', None, "Diffutoon", "A woman walking on the street"],
        ['./smilegirl.mp4', None, "Diffutoon", "A girl stand on the grass"],
        ['./working.mp4', None, "Diffutoon", "A woman is doing the dishes"],
        [None, "./train.jpg", "ExVideo", ""],
        [None, "./girl.webp", "ExVideo", ""],
        [None, "./robo.jpg", "ExVideo", ""],
    ]


# Gradio Interface

with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
    gr.HTML("<h1><center>Exvideo📽️Diffutoon</center></h1>")
    gr.HTML("""
        <p><center>Exvideo and Diffutoon video generation
        <br><b>Update</b>: Output resize, Frames length control.
        <br><b>Note</b>: ZeroGPU limited, Set the parameters appropriately.</center></p>
        """)
    with gr.Row():
        video_in = gr.Video(label='Upload Video', height=600, scale=2)
        image_in = gr.Image(label='Upload Image', height=600, scale=2, image_mode="RGB", type="filepath", visible=False)
        video = gr.Video(label="Generated Video", height=600, scale=2)
        with gr.Column(scale=1):
            selected = gr.Radio(
                label="Select App",
                    choices=["ExVideo", "Diffutoon"],
                    value="Diffutoon"
            )
            seed = gr.Slider(
                label="Seed (-1 Random)",
                minimum=-1,
                maximum=MAX_SEED,
                step=1,
                value=-1,
                )
            num_inference_steps = gr.Slider(
                label="Inference steps", 
                info="Inference steps",
                step=1,
                value=10, 
                minimum=1, 
                maximum=50,
                )
            num_frames = gr.Slider(
                label="Num frames", 
                info="Output Frames",
                step=1,
                value=30,
                minimum=1, 
                maximum=128,
            )
            with gr.Row():
                height = gr.Slider(
                    label="Height", 
                    step=8,
                    value=512, 
                    minimum=256, 
                    maximum=2560,
                    )
                width = gr.Slider(
                    label="Width", 
                    step=8,
                    value=512, 
                    minimum=256, 
                    maximum=2560,
                    )
            with gr.Accordion("Diffutoon Options", open=False):
                animatediff_batch_size = gr.Slider(
                    label="Animatediff batch size",
                    minimum=1,
                    maximum=50,
                    step=1,
                    value=32,
                )
                animatediff_stride = gr.Slider(
                    label="Animatediff stride",
                    minimum=1,
                    maximum=50,
                    step=1,
                    value=16,
                )
            with gr.Accordion("ExVideo Options", open=False):
                motion_bucket_id = gr.Slider(
                    label="Motion bucket id", 
                    info="Controls how much motion to add/remove from the image", 
                    value=127, 
                    step=1,
                    minimum=1, 
                    maximum=255,
                )
                fps_id = gr.Slider(
                    label="Frames per second", 
                    info="The length of your video in seconds will be 25/fps", 
                    value=6,
                    step=1,
                    minimum=5, 
                    maximum=30,
                )
    prompt = gr.Textbox(label="Prompt", value="best quality")
    with gr.Row():
        submit_btn = gr.Button(value="Generate")
        #stop_btn = gr.Button(value="Stop", variant="stop")
        clear_btn = gr.ClearButton([video_in, image_in, seed, video])
        
    gr.Examples(
        examples=examples,
        fn=generate,
        inputs=[video_in, image_in, selected, prompt],
        outputs=[video, seed],
        cache_examples="lazy",
        examples_per_page=4,
    )
    selected.change(change_media, inputs=[image_in, video_in, selected], outputs=[image_in, video_in, prompt])
    video_in.upload(update_frames, inputs=[video_in], outputs=[num_frames])
    submit_event = submit_btn.click(fn=generate, inputs=[video_in, image_in, selected, prompt, seed, num_inference_steps, num_frames, height, width, animatediff_batch_size, animatediff_stride, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video")
    #stop_btn.click(fn=None, inputs=None, outputs=None, cancels=[submit_event])
    
demo.queue().launch()