Spaces:
Running
Running
File size: 2,517 Bytes
3bbb2bc 5a1023e e6a73f7 c92f81a ebf5ce4 53f544e 5a1023e 53f544e e6a73f7 5af0a6f 866ad2c 5849d75 591cbd9 5849d75 e6a73f7 591cbd9 3fd2083 591cbd9 3fd2083 591cbd9 3fd2083 591cbd9 3fd2083 591cbd9 6cbe8da 591cbd9 9f33d67 5af0a6f 591cbd9 e6a73f7 591cbd9 e6a73f7 ef1c1db e6a73f7 082d0de e6a73f7 591cbd9 e6a73f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import gradio as gr
import torch
from diffusers import DiffusionPipeline
from PIL import Image
import numpy as np
from torchvision import transforms
device = "cuda" if torch.cuda.is_available() else "cpu"
# Initialize the inpainting model
try:
inpaint_model = DiffusionPipeline.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1").to(device)
except Exception as e:
print(f"Error initializing model: {e}")
inpaint_model = None
# Load a test image
def load_test_image():
try:
# Provide the absolute path to a test image
return Image.open("/absolute/path/to/your/test_image.png")
except Exception as e:
print(f"Error loading test image: {e}")
return None
def process_image(prompt, image, style, upscale_factor, inpaint):
if image is None:
image = load_test_image()
if image is None:
return None, "No image received and failed to load test image."
try:
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
elif isinstance(image, torch.Tensor):
image = transforms.ToPILImage()(image)
elif not isinstance(image, Image.Image):
return None, f"Unsupported image format: {type(image)}."
print(f"Received image: {image.size}")
# Placeholder for processing logic
return image, None
except Exception as e:
error_message = f"Error in process_image function: {e}"
print(error_message)
return None, error_message
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
prompt_input = gr.Textbox(label="Enter your prompt")
image_input = gr.Image(label="Image (for inpainting)", type="pil")
style_input = gr.Dropdown(choices=["Fooocus Style", "SAI Anime"], label="Select Style")
upscale_input = gr.Slider(minimum=1, maximum=4, step=1, label="Upscale Factor")
inpaint_input = gr.Checkbox(label="Enable Inpainting")
output_image = gr.Image(label="Generated Image", type="pil")
error_output = gr.Textbox(label="Error Details", lines=4, placeholder="Error details will appear here")
generate_button = gr.Button("Generate Image")
generate_button.click(
process_image,
inputs=[prompt_input, image_input, style_input, upscale_input, inpaint_input],
outputs=[output_image, error_output]
)
demo.launch()
|