File size: 5,188 Bytes
0d31210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251c1ad
0d31210
251c1ad
0d31210
 
251c1ad
0d31210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251c1ad
 
0d31210
 
 
251c1ad
0d31210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import gradio as gr
from share_btn import community_icon_html, loading_icon_html, share_js
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
from diffusers.utils import export_to_video

pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()

def infer(prompt):
    negative_prompt = "text, watermark, copyright, blurry, nsfw"
    video_frames = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
    video_path = export_to_video(video_frames)
    print(video_path)
    return video_path, gr.Group.update(visible=True)

css = """
#col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
.animate-spin {
  animation: spin 1s linear infinite;
}

@keyframes spin {
  from {
      transform: rotate(0deg);
  }
  to {
      transform: rotate(360deg);
  }
}

#share-btn-container {
  display: flex; 
  padding-left: 0.5rem !important; 
  padding-right: 0.5rem !important; 
  background-color: #000000; 
  justify-content: center; 
  align-items: center; 
  border-radius: 9999px !important; 
  max-width: 13rem;
}

#share-btn-container:hover {
  background-color: #060606;
}

#share-btn {
  all: initial; 
  color: #ffffff;
  font-weight: 600; 
  cursor:pointer; 
  font-family: 'IBM Plex Sans', sans-serif; 
  margin-left: 0.5rem !important; 
  padding-top: 0.5rem !important; 
  padding-bottom: 0.5rem !important;
  right:0;
}

#share-btn * {
  all: unset;
}

#share-btn-container div:nth-child(-n+2){
  width: auto !important;
  min-height: 0px !important;
}

#share-btn-container .wrap {
  display: none !important;
}

#share-btn-container.hidden {
  display: none!important;
}
img[src*='#center'] { 
    display: block;
    margin: auto;
}

.footer {
        margin-bottom: 45px;
        margin-top: 10px;
        text-align: center;
        border-bottom: 1px solid #e5e5e5;
    }
    .footer>p {
        font-size: .8rem;
        display: inline-block;
        padding: 0 10px;
        transform: translateY(10px);
        background: white;
    }
    .dark .footer {
        border-color: #303030;
    }
    .dark .footer>p {
        background: #0b0f19;
    }
"""

with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown(
            """
            <h1 style="text-align: center;">HedgehogAI</h1>
            <p style="text-align: center;">
            This is a demo version of HedgehogAI, this is a neural network for generating video from text in a fast mode, developed by the CofAI team!<br />
            </p>
            
            [![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg#center)](https://huggingface.co/spaces/CofAI/hedgehog?duplicate=true)
            
            """
        )

        prompt_in = gr.Textbox(label="Prompt", placeholder="Darth Vader is surfing on waves", elem_id="prompt-in")
        #neg_prompt = gr.Textbox(label="Negative prompt", value="text, watermark, copyright, blurry, nsfw", elem_id="neg-prompt-in")
        #inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False)
        submit_btn = gr.Button("Submit")
        video_result = gr.Video(label="Video Output", elem_id="video-output")

        with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
            community_icon = gr.HTML(community_icon_html)
            loading_icon = gr.HTML(loading_icon_html)
            share_button = gr.Button("Share to community", elem_id="share-btn")

        gr.HTML("""
            <div class="footer">
                <p>
                <a href="https://huggingface.co/cerspense/zeroscope_v2_576w" 🦔 HedgehogAI Text2Video V1.2 model</a> by ☕ CofAI
                Demo by <a href="https://hf.co/OptorAI" target="_blank">🐻‍❄️ OptorAI</a>
                </p>
            </div>
            <div id="may-like-container" style="display: flex;justify-content: center;flex-direction: column;align-items: center;">
                <p style="font-size: 0.8em;margin-bottom: 4px;">Powered by: </p>
                <div id="may-like" style="display:flex; align-items:center; justify-content: center;height:20px;">
                    <svg height="20" width="148" style="margin-left:4px">       
                        <a href="https://huggingface.co/spaces/fffiloni/zeroscope-XL" target="_blank">
                            <image href="https://img.shields.io/badge/🤗 Spaces-Zeroscope XL-blue" src="https://img.shields.io/badge/🤗 Spaces-Image to Music-blue.png" height="20"/>
                        </a>
                    </svg>
                </div>
            </div>
        """)
        
    submit_btn.click(fn=infer,
                    inputs=[prompt_in],
                    outputs=[video_result, share_group])
    
    share_button.click(None, [], [], _js=share_js)

demo.queue(max_size=12).launch()