File size: 4,141 Bytes
75f2d00
 
 
 
 
3ab49e3
75f2d00
 
 
 
4a341a4
 
75f2d00
 
4a341a4
75f2d00
 
 
 
4a341a4
75f2d00
 
 
 
 
 
 
 
 
 
 
 
 
4a341a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75f2d00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import os
import sys
import time
import gradio as gr
from videocrafter_test import Text2Video
sys.path.insert(1, os.path.join(sys.path[0], 'lvdm'))

def videocrafter_demo(result_dir='./tmp/'):
    text2video = Text2Video(result_dir)
    with gr.Blocks(analytics_enabled=False) as videocrafter_iface:
        gr.Markdown("<div align='center'> <h2> VideoCrafter: A Toolkit for Text-to-Video Generation and Editing </span> </h2> \
                     <a style='font-size:18px;color: #efefef' href='https://github.com/VideoCrafter/VideoCrafter'> Github </div>")
        with gr.Row().style(equal_height=False):
            with gr.Tab(label="VideoCrafter"):
                input_text = gr.Text(label='Prompts')
                model_choices=['origin','vangogh','frozen','yourname', 'coco']

                with gr.Row():
                    model_index = gr.Dropdown(label='Models', elem_id=f"model", choices=model_choices, value=model_choices[0], type="index",interactive=True)
                    
                with gr.Row():
                    steps = gr.Slider(minimum=1, maximum=200, step=1, elem_id=f"steps", label="Sampling steps", value=50)
                    eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label='ETA', value=1.0, elem_id="eta")

                with gr.Row():
                    lora_scale = gr.Slider(minimum=0.0, maximum=2.0, step=0.1, label='Lora Scale', value=1.0, elem_id="lora_scale")
                    cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=15.0, elem_id="cfg_scale")

                send_btn = gr.Button("Send")

            with gr.Column():
                output_video_1 = gr.PlayableVideo()

        with gr.Row():
            examples = [
                [
                    'an elephant is walking under the sea, 4K, high definition',
                    50,
                    'origin',
                    1,
                    15,
                    1,
                ],
                [
                    'an astronaut riding a horse in outer space',
                    25,
                    'origin',
                    1,
                    15,
                    1,
                ],
                [
                    'a monkey is playing a piano',
                    25,
                    'vangogh',
                    1,
                    15,
                    1,
                ],
                [
                    'A fire is burning on a candle',
                    25,
                    'frozen',
                    1,
                    15,
                    1,
                ],
                [
                    'a horse is drinking in the river',
                    25,
                    'yourname',
                    1,
                    15,
                    1,
                ],
                [
                    'Robot dancing in times square',
                    25,
                    'coco',
                    1,
                    15,
                    1,
                ],                    

            ]
            gr.Examples(examples=examples,
                        inputs=[
                        input_text,
                        steps,
                        model_index,
                        eta,
                        cfg_scale,
                        lora_scale],
                        outputs=[output_video_1],
                        fn=text2video.get_prompt,
                        cache_examples=False)
                        #cache_examples=os.getenv('SYSTEM') == 'spaces')

            send_btn.click(
                fn=text2video.get_prompt, 
                inputs=[
                    input_text,
                    steps,
                    model_index,
                    eta,
                    cfg_scale,
                    lora_scale,
                ],
                outputs=[output_video_1],
            )
    return videocrafter_iface

if __name__ == "__main__":
    result_dir = os.path.join('./', 'results')
    videocrafter_iface = videocrafter_demo(result_dir)
    videocrafter_iface.launch()