Spaces:
Paused
Paused
import gradio | |
import torch | |
import PIL | |
from torchvision import transforms | |
from diffusers import StableDiffusionInpaintPipeline | |
pipeline = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", | |
#revision="fp16", | |
#torch_dtype=torch.float16, | |
safety_checker=lambda images, **kwargs: (images, False)) | |
#pipeline.to("cuda") | |
#generator = torch.Generator(device).manual_seed(seed) | |
def diffuse(prompt, negativePrompt, inputImage, mask, guidanceScale, numInferenceSteps): | |
return pipeline(prompt=prompt, | |
negative_prompt=negativePrompt, | |
image=inputImage, | |
mask_image=mask, | |
guidance_scale=guidanceScale, | |
num_inference_steps=numInferenceSteps).images[0] | |
prompt = gradio.Textbox(label="Prompt", placeholder="A person in a room", lines=3) | |
negativePrompt = gradio.Textbox(label="Negative Prompt", placeholder="Text", lines=3) | |
inputImage = gradio.Image(label="Input Image", type="pil") | |
#inputFeed = gradio.Image(label="Input Feed", source="webcam", streaming=True) | |
mask = gradio.Image(label="Mask", type="pil") | |
outputImage = gradio.Image(label="Extrapolated Field of View") | |
guidanceScale = gradio.Slider(label="Guidance Scale", maximum=1, value = 0.75) | |
numInferenceSteps = gradio.Slider(label="Number of Inference Steps", maximum=100, value = 0) | |
ux = gradio.Interface(fn=diffuse, title="View Diffusion", inputs=[prompt, negativePrompt, inputImage, mask, guidanceScale, numInferenceSteps], outputs=outputImage, live=True) | |
ux.launch() |