|
import gradio as gr |
|
from diffusers import StableDiffusionPipeline |
|
import torch |
|
|
|
|
|
def load_model(base_model_id, adapter_model_id=None): |
|
if torch.cuda.is_available(): |
|
device = "cuda" |
|
info = "Running on GPU (CUDA) 🔥" |
|
else: |
|
device = "cpu" |
|
info = "Running on CPU 🥶" |
|
|
|
|
|
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16 if device == "cuda" else torch.float32) |
|
pipe = pipe.to(device) |
|
|
|
|
|
if adapter_model_id: |
|
adapter_model = StableDiffusionPipeline.from_pretrained(adapter_model_id, torch_dtype=torch.float16 if device == "cuda" else torch.float32) |
|
pipe.unet.load_attn_procs(adapter_model_id) |
|
info += f" with Adapter Model: {adapter_model_id}" |
|
|
|
return pipe, info |
|
|
|
|
|
def generate_image(base_model_id, adapter_model_id, prompt): |
|
pipe, info = load_model(base_model_id, adapter_model_id) |
|
image = pipe(prompt).images[0] |
|
return image, info |
|
|
|
|
|
if torch.cuda.is_available(): |
|
device = "cuda" |
|
info = "Running on GPU (CUDA) 🔥" |
|
else: |
|
device = "cpu" |
|
info = "Running on CPU 🥶" |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## Custom Text-to-Image Generator with Adapter Support") |
|
gr.Markdown(f"**{info}**") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
base_model_id = gr.Textbox(label="Enter Base Model ID (e.g., CompVis/stable-diffusion-v1-4)", placeholder="Base Model ID") |
|
adapter_model_id = gr.Textbox(label="Enter Adapter Model ID (optional, e.g., nevreal/vMurderDrones-Lora)", placeholder="Adapter Model ID (optional)", value="") |
|
prompt = gr.Textbox(label="Enter your prompt", placeholder="Describe the image you want to generate") |
|
generate_btn = gr.Button("Generate Image") |
|
|
|
with gr.Column(): |
|
output_image = gr.Image(label="Generated Image") |
|
device_info = gr.Markdown() |
|
|
|
|
|
generate_btn.click(fn=generate_image, inputs=[base_model_id, adapter_model_id, prompt], outputs=[output_image, device_info]) |
|
|
|
|
|
demo.launch() |
|
|