Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -16,19 +16,15 @@ import random
|
|
16 |
from huggingface_hub import login, hf_hub_download
|
17 |
import spaces
|
18 |
|
19 |
-
#gradio.helpers.CACHED_FOLDER = '/data/cache'
|
20 |
-
|
21 |
-
# SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
|
22 |
-
|
23 |
-
# HF_API_KEY = os.getenv('HF_API_KEY', '')
|
24 |
-
# login(token=HF_API_KEY)
|
25 |
-
|
26 |
pipe = StableVideoDiffusionPipeline.from_pretrained(
|
27 |
# "stabilityai/stable-video-diffusion-img2vid-xt-1-1",
|
28 |
"vdo/stable-video-diffusion-img2vid-xt-1-1",
|
29 |
torch_dtype=torch.float16,
|
30 |
variant="fp16"
|
31 |
)
|
|
|
|
|
|
|
32 |
pipe.to("cuda")
|
33 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
34 |
#pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
@@ -37,7 +33,6 @@ max_64_bit_int = 2**63 - 1
|
|
37 |
|
38 |
@spaces.GPU(enable_queue=True)
|
39 |
def generate_video(
|
40 |
-
secret_token: str,
|
41 |
image: Image,
|
42 |
seed: int,
|
43 |
motion_bucket_id: int = 127,
|
@@ -48,11 +43,6 @@ def generate_video(
|
|
48 |
device: str = "cuda",
|
49 |
output_folder: str = "outputs",
|
50 |
):
|
51 |
-
# if secret_token != SECRET_TOKEN:
|
52 |
-
# raise gr.Error(
|
53 |
-
# f'Invalid secret token. Please fork the original space if you want to use it for yourself.')
|
54 |
-
|
55 |
-
|
56 |
# note julian: normally we should resize input images, but normally they are already in 1024x576, so..
|
57 |
|
58 |
# also, I would like to experiment with vertical videos, and 1024x512 videos
|
@@ -116,11 +106,6 @@ def resize_image(image, output_size=(1024, 576)):
|
|
116 |
return cropped_image
|
117 |
|
118 |
with gr.Blocks() as demo:
|
119 |
-
# secret_token = gr.Text(
|
120 |
-
# label='Secret Token',
|
121 |
-
# max_lines=1,
|
122 |
-
# placeholder='Enter your secret token')
|
123 |
-
|
124 |
image = gr.Image(label="Upload your image", type="pil")
|
125 |
generate_btn = gr.Button("Generate")
|
126 |
base64_out = gr.Textbox(label="Base64 Video")
|
@@ -130,7 +115,6 @@ with gr.Blocks() as demo:
|
|
130 |
|
131 |
generate_btn.click(
|
132 |
fn=generate_video,
|
133 |
-
# inputs=[secret_token, image, seed, motion_bucket_id, fps_id],
|
134 |
inputs=[image, seed, motion_bucket_id, fps_id],
|
135 |
outputs=base64_out,
|
136 |
api_name="run"
|
|
|
16 |
from huggingface_hub import login, hf_hub_download
|
17 |
import spaces
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
pipe = StableVideoDiffusionPipeline.from_pretrained(
|
20 |
# "stabilityai/stable-video-diffusion-img2vid-xt-1-1",
|
21 |
"vdo/stable-video-diffusion-img2vid-xt-1-1",
|
22 |
torch_dtype=torch.float16,
|
23 |
variant="fp16"
|
24 |
)
|
25 |
+
|
26 |
+
pipe.save_pretrained("model", variant="fp16")
|
27 |
+
|
28 |
pipe.to("cuda")
|
29 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
30 |
#pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
|
|
33 |
|
34 |
@spaces.GPU(enable_queue=True)
|
35 |
def generate_video(
|
|
|
36 |
image: Image,
|
37 |
seed: int,
|
38 |
motion_bucket_id: int = 127,
|
|
|
43 |
device: str = "cuda",
|
44 |
output_folder: str = "outputs",
|
45 |
):
|
|
|
|
|
|
|
|
|
|
|
46 |
# note julian: normally we should resize input images, but normally they are already in 1024x576, so..
|
47 |
|
48 |
# also, I would like to experiment with vertical videos, and 1024x512 videos
|
|
|
106 |
return cropped_image
|
107 |
|
108 |
with gr.Blocks() as demo:
|
|
|
|
|
|
|
|
|
|
|
109 |
image = gr.Image(label="Upload your image", type="pil")
|
110 |
generate_btn = gr.Button("Generate")
|
111 |
base64_out = gr.Textbox(label="Base64 Video")
|
|
|
115 |
|
116 |
generate_btn.click(
|
117 |
fn=generate_video,
|
|
|
118 |
inputs=[image, seed, motion_bucket_id, fps_id],
|
119 |
outputs=base64_out,
|
120 |
api_name="run"
|