Chris4K's picture
Update app.py
01e9899 verified
raw
history blame
2.44 kB
import gradio as gr
from diffusers import StableDiffusionXLImg2ImgPipeline
import torch
# Load a lightweight pipeline that works well on CPU
def load_image_generator():
try:
model = StableDiffusionXLImg2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1",
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True
)
# Ensure it runs on CPU
#model = model.to("cpu")
return model
except Exception as e:
print(f"Error loading model: {e}")
return None
# Generate chatbot icon
def generate_chatbot_icon(
prompt,
negative_prompt="low quality, bad composition, blurry",
num_inference_steps=20,
guidance_scale=7.5,
strength=0.75
):
# Load the model
model = load_image_generator()
if model is None:
return None
# Default icon if no initial image
default_init_image = torch.randn((1, 3, 512, 512))
try:
# Generate the image
image = model(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
strength=strength,
image=default_init_image
).images[0]
return image
except Exception as e:
print(f"Error generating image: {e}")
return None
# Create Gradio interface
def create_gradio_interface():
with gr.Blocks() as demo:
gr.Markdown("# πŸ€– Chatbot Icon Generator")
with gr.Row():
with gr.Column():
# Prompt input
prompt = gr.Textbox(
label="Icon Description",
value="Cute minimalist chatbot avatar, clean design, friendly expression, cartoon style"
)
# Generate button
generate_btn = gr.Button("Generate Icon")
with gr.Column():
# Output image
output_image = gr.Image(label="Generated Chatbot Icon")
# Connect generate button to function
generate_btn.click(
fn=generate_chatbot_icon,
inputs=[prompt],
outputs=[output_image]
)
return demo
# Launch the app
if __name__ == "__main__":
demo = create_gradio_interface()
demo.launch()