File size: 9,221 Bytes
73d6edb
 
 
 
 
 
59c3dd8
ef187eb
 
0cffd40
24478b9
ef187eb
9c1fd31
2b0f02c
24478b9
 
 
 
898752d
 
 
eb7c9df
bb56b33
 
 
 
 
 
 
 
3de5a45
bb56b33
 
 
 
 
 
24478b9
fec3be6
8b1e96d
9c1fd31
ec35e66
4efab5c
 
 
ec35e66
 
4efab5c
 
 
 
 
 
 
 
bb56b33
 
8b1e96d
a0f72b8
96fa82a
 
 
cddab4e
 
96fa82a
898752d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb5a8e0
2373506
fb5a8e0
 
7748e83
898752d
fec3be6
24478b9
 
 
898752d
f0de53b
24478b9
f0de53b
 
 
24478b9
96fa82a
9a5c550
42c4c5b
f0de53b
4429dd4
898752d
 
82ba711
d94350f
 
82ba711
96fa82a
11fa80e
24478b9
 
 
d06d30a
898752d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f0de53b
898752d
 
f0de53b
 
898752d
 
 
 
9c1fd31
 
d06d30a
24478b9
 
0cffd40
8b3ca8d
24478b9
 
 
898752d
24478b9
8b3ca8d
0cffd40
3958ec9
8b1e96d
0cffd40
4efab5c
898752d
 
db04c05
fb5a8e0
 
 
24478b9
898752d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db04c05
898752d
db04c05
898752d
 
 
 
 
 
 
 
24478b9
 
 
 
a9fe87b
24478b9
 
 
 
 
 
a9fe87b
 
24478b9
 
 
a9fe87b
 
cf63248
 
 
a9fe87b
cf63248
898752d
 
82ba711
 
9a5c550
898752d
8b1e96d
fb5a8e0
898752d
9a5c550
8b1e96d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
import subprocess
subprocess.run(
    'pip install numpy==1.26.4',
    shell=True
)

import os
import gradio as gr
import torch
import spaces
import random
from PIL import Image
import numpy as np

from glob import glob
from pathlib import Path
from typing import Optional

from diffsynth import save_video, ModelManager, SVDVideoPipeline
from diffsynth import SDVideoPipeline, ControlNetConfigUnit, VideoData, save_frames
from diffsynth.extensions.RIFE import RIFESmoother

import requests


def download_model(url, file_path):
  model_file = requests.get(url, allow_redirects=True)
  with open(file_path, "wb") as f:
    f.write(model_file.content)

download_model("https://civitai.com/api/download/models/266360?type=Model&format=SafeTensor&size=pruned&fp=fp16", "models/stable_diffusion/flat2DAnimerge_v45Sharp.safetensors")
download_model("https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15_v2.ckpt", "models/AnimateDiff/mm_sd_v15_v2.ckpt")
download_model("https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart.pth", "models/ControlNet/control_v11p_sd15_lineart.pth")
download_model("https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile.pth", "models/ControlNet/control_v11f1e_sd15_tile.pth")
download_model("https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model.pth", "models/Annotators/sk_model.pth")
download_model("https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model2.pth", "models/Annotators/sk_model2.pth")
download_model("https://civitai.com/api/download/models/25820?type=Model&format=PickleTensor&size=full&fp=fp16", "models/textual_inversion/verybadimagenegative_v1.3.pt")

HF_TOKEN = os.environ.get("HF_TOKEN", None)
# Constants
MAX_SEED = np.iinfo(np.int32).max
CSS = """
footer {
    visibility: hidden;
}
"""

JS = """function () {
  gradioURL = window.location.href
  if (!gradioURL.endsWith('?__theme=dark')) {
    window.location.replace(gradioURL + '?__theme=dark');
  }
}"""




# Ensure model and scheduler are initialized in GPU-enabled function
if torch.cuda.is_available():
    model_manager = ModelManager(
        torch_dtype=torch.float16, 
        device="cuda", 
        model_id_list=["stable-video-diffusion-img2vid-xt", "ExVideo-SVD-128f-v1"],
        downloading_priority=["HuggingFace"])
    pipe = SVDVideoPipeline.from_model_manager(model_manager)
        

    model_manager2 = ModelManager(torch_dtype=torch.float16, device="cuda")
    model_manager2.load_textual_inversions("models/textual_inversion")
    model_manager2.load_models([
            "models/stable_diffusion/flat2DAnimerge_v45Sharp.safetensors",
            "models/AnimateDiff/mm_sd_v15_v2.ckpt",
            "models/ControlNet/control_v11p_sd15_lineart.pth",
            "models/ControlNet/control_v11f1e_sd15_tile.pth",
            "models/RIFE/flownet.pkl"
    ])
    pipe2 = SDVideoPipeline.from_model_manager(
        model_manager2,
        [
            ControlNetConfigUnit(
                processor_id="lineart",
                model_path="models/ControlNet/control_v11p_sd15_lineart.pth",
                scale=0.5
            ),
            ControlNetConfigUnit(
                processor_id="tile",
                model_path="models/ControlNet/control_v11f1e_sd15_tile.pth",
                scale=0.5
            )
        ]
    )
    smoother = RIFESmoother.from_model_manager(model_manager2)


def change_media(image_in, video_in, selected):
    if selected == "ExVideo":
        return gr.update(visible=True), gr.update(visible=False), image_in
    elif selected == "Diffutoon":
        return gr.update(visible=False), gr.update(visible=True), video_in



@spaces.GPU(duration=120)
def generate(
    media,
    selected,
    seed: Optional[int] = -1,
    num_inference_steps: int = 10,
    animatediff_batch_size: int = 32,
    animatediff_stride: int = 16,
    motion_bucket_id: int = 127,
    fps_id: int = 25,
    num_frames: int = 50,
    prompt: str = "best quality",
    output_folder: str = "outputs",
    progress=gr.Progress(track_tqdm=True)):

    print(media)
    
    if seed == -1:
        seed = random.randint(0, MAX_SEED)

    torch.manual_seed(seed)
    
    os.makedirs(output_folder, exist_ok=True)
    base_count = len(glob(os.path.join(output_folder, "*.mp4")))
    video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")

    if selected == "ExVideo":
        image = Image.open(media)
        video = pipe(
            input_image=image.resize((512, 512)), 
            num_frames=num_frames, 
            fps=fps_id, 
            height=512, 
            width=512,
            motion_bucket_id=motion_bucket_id,
            num_inference_steps=num_inference_steps,
            min_cfg_scale=2, 
            max_cfg_scale=2, 
            contrast_enhance_scale=1.2
        )
        model_manager.to("cpu")
    else:
        up_video = VideoData(
            video_file=media,
            height=1024, width=1024)
        input_video = [up_video[i] for i in range(40*60, 41*60)]

        video = pipe(
            prompt=prompt,
            negative_prompt="verybadimagenegative_v1.3",
            cfg_scale=3, 
            clip_skip=2,
            controlnet_frames=input_video, num_frames=len(input_video),
            num_inference_steps=num_inference_steps, 
            height=1024, 
            width=1024,
            animatediff_batch_size=animatediff_batch_size, 
            animatediff_stride=animatediff_stride,
            vram_limit_level=0,
        )
        video = smoother(video)

    
    save_video(video, video_path, fps=fps_id)
    
    return video_path, seed


examples = [
        "./train.jpg",
        "./girl.webp",
        "./robo.jpg",
        './working.mp4',
    ]



# Gradio Interface

with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
    gr.HTML("<h1><center>Exvideo📽️Diffutoon</center></h1>")
    gr.HTML("<p><center>Exvideo and Diffutoon video generation<br><b>Update</b>: first version<br><b>Note</b>: ZeroGPU limited, Set the parameters appropriately.</center></p>")
    with gr.Row():
        video_in = gr.Video(label='Upload Video', height=600, scale=2)
        image_in = gr.Image(label='Upload Image', height=600, scale=2, image_mode="RGB", type="filepath", visible=False)
        media = video_in
        video = gr.Video(label="Generated Video", height=600, scale=2)
        with gr.Column(scale=1):
            selected = gr.Radio(
                label="Selected App",
                    choices=["ExVideo", "Diffutoon"],
                    value="Diffutoon"
            )
            seed = gr.Slider(
                label="Seed (-1 Random)",
                minimum=-1,
                maximum=MAX_SEED,
                step=1,
                value=-1,
                )
            num_inference_steps = gr.Slider(
                label="Inference steps", 
                info="Inference steps",
                step=1,
                value=10, 
                minimum=1, 
                maximum=50
                )
            with gr.Accordion("Diffutoon Options", open=False):
                animatediff_batch_size = gr.Slider(
                    label="Animatediff batch size",
                    minimum=1,
                    maximum=50,
                    step=1,
                    value=32,
                )
                animatediff_stride = gr.Slider(
                    label="Animatediff stride",
                    minimum=1,
                    maximum=50,
                    step=1,
                    value=16,
                ) 
            with gr.Accordion("ExVideo Options", open=False):
                motion_bucket_id = gr.Slider(
                    label="Motion bucket id", 
                    info="Controls how much motion to add/remove from the image", 
                    value=127, 
                    step=1,
                    minimum=1, 
                    maximum=255
                )
                fps_id = gr.Slider(
                    label="Frames per second", 
                    info="The length of your video in seconds will be 25/fps", 
                    value=6,
                    step=1,
                    minimum=5, 
                    maximum=30
                )
                num_frames = gr.Slider(
                    label="Frames num", 
                    info="Frames num",
                    step=1,
                    value=50, 
                    minimum=1, 
                    maximum=128
                )
    prompt = gr.Textbox(label="Prompt")
    with gr.Row():
        submit_btn = gr.Button(value="Generate")
        #stop_btn = gr.Button(value="Stop", variant="stop")
        clear_btn = gr.ClearButton([media, seed, video])

    selected.change(change_media, inputs=[image_in, video_in, selected], outputs=[image_in, video_in, media])
    submit_event = submit_btn.click(fn=generate, inputs=[media, selected, seed, num_inference_steps, animatediff_batch_size, animatediff_stride, motion_bucket_id, fps_id, num_frames, prompt], outputs=[video, seed], api_name="video")
    #stop_btn.click(fn=None, inputs=None, outputs=None, cancels=[submit_event])
    
demo.queue().launch()