bugfix
Browse files
app.py
CHANGED
@@ -3,18 +3,19 @@ import torch
|
|
3 |
import re
|
4 |
import gradio as gr
|
5 |
import random
|
|
|
6 |
from diffusers import AutoPipelineForText2Image
|
7 |
from diffusers import AutoPipelineForImage2Image
|
8 |
from diffusers.utils import load_image, export_to_video
|
9 |
from diffusers import StableVideoDiffusionPipeline
|
10 |
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
pipeline_text2image = pipeline_text2image.to("cuda")
|
16 |
|
17 |
def img2video(image,seed="",fps=7,outfile=""):
|
|
|
18 |
if seed=="":
|
19 |
seed=random.randint(0, 5000)
|
20 |
|
@@ -30,13 +31,17 @@ def img2video(image,seed="",fps=7,outfile=""):
|
|
30 |
generator = torch.manual_seed(seed)
|
31 |
frames = pipelineVideo(image, decode_chunk_size=8, generator=generator).frames[0]
|
32 |
export_to_video(frames, outfile, fps=fps)
|
|
|
33 |
return outfile
|
34 |
|
35 |
def text2img(prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe.",guidance_scale=0.0, num_inference_steps=1):
|
|
|
36 |
image = pipeline_text2image(prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
|
37 |
return image
|
38 |
|
39 |
def img2img(image,prompt="A cinematic shot of a baby racoon wearing an intricate italian priest robe.", guidance_scale=0.0, num_inference_steps=1,strength=0.5):
|
|
|
|
|
40 |
init_image = load_image(image)
|
41 |
init_image = init_image.resize((512, 512))
|
42 |
image = pipeline_image2image(prompt, image=init_image, strength=strength, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
|
|
|
3 |
import re
|
4 |
import gradio as gr
|
5 |
import random
|
6 |
+
import time
|
7 |
from diffusers import AutoPipelineForText2Image
|
8 |
from diffusers import AutoPipelineForImage2Image
|
9 |
from diffusers.utils import load_image, export_to_video
|
10 |
from diffusers import StableVideoDiffusionPipeline
|
11 |
|
12 |
|
13 |
+
|
14 |
+
|
15 |
+
|
|
|
16 |
|
17 |
def img2video(image,seed="",fps=7,outfile=""):
|
18 |
+
pipelineVideo = StableVideoDiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt",).to("cuda")
|
19 |
if seed=="":
|
20 |
seed=random.randint(0, 5000)
|
21 |
|
|
|
31 |
generator = torch.manual_seed(seed)
|
32 |
frames = pipelineVideo(image, decode_chunk_size=8, generator=generator).frames[0]
|
33 |
export_to_video(frames, outfile, fps=fps)
|
34 |
+
time.time(30)
|
35 |
return outfile
|
36 |
|
37 |
def text2img(prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe.",guidance_scale=0.0, num_inference_steps=1):
|
38 |
+
pipeline_text2image = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo").to("cuda")
|
39 |
image = pipeline_text2image(prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
|
40 |
return image
|
41 |
|
42 |
def img2img(image,prompt="A cinematic shot of a baby racoon wearing an intricate italian priest robe.", guidance_scale=0.0, num_inference_steps=1,strength=0.5):
|
43 |
+
pipeline_text2image = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo").to("cuda")
|
44 |
+
pipeline_image2image = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda")
|
45 |
init_image = load_image(image)
|
46 |
init_image = init_image.resize((512, 512))
|
47 |
image = pipeline_image2image(prompt, image=init_image, strength=strength, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
|