Spaces:
Paused
Paused
File size: 1,725 Bytes
7db9ab2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio
import torch
import PIL
from torchvision import transforms
from diffusers import StableDiffusionInpaintPipeline
pipeline = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting",
revision="fp16",
torch_dtype=torch.float16,
safety_checker=lambda images, **kwargs: (images, False))
pipeline.to("cuda")
#generator = torch.Generator(device).manual_seed(seed)
def diffuse(prompt, negativePrompt, inputImage, mask, guidanceScale, numInferenceSteps):
return pipeline(prompt=prompt,
negative_prompt=negativePrompt,
image=inputImage,
mask_image=mask,
guidance_scale=guidanceScale,
num_inference_steps=numInferenceSteps).images[0]
prompt = gradio.Textbox(label="Prompt", placeholder="A person in a room", lines=3)
negativePrompt = gradio.Textbox(label="Negative Prompt", placeholder="Text", lines=3)
inputImage = gradio.Image(label="Input Image", type="pil")
#inputFeed = gradio.Image(label="Input Feed", source="webcam", streaming=True)
mask = gradio.Image(label="Mask", type="pil")
outputImage = gradio.Image(label="Extrapolated Field of View")
guidanceScale = gradio.Slider(label="Guidance Scale", maximum=1, value = 0.75)
numInferenceSteps = gradio.Slider(label="Number of Inference Steps", maximum=100, value = 0)
ux = gradio.Interface(fn=diffuse, title="View Diffusion", inputs=[prompt, negativePrompt, inputImage, mask, guidanceScale, numInferenceSteps], outputs=outputImage, live=True)
ux.launch() |