|
import gradio as gr |
|
from diffusers import DiffusionPipeline |
|
|
|
|
|
|
|
|
|
def generate_image(basem, model, prompt): |
|
|
|
|
|
pipeline = DiffusionPipeline.from_pretrained(basem) |
|
pipeline.load_lora_weights(model) |
|
|
|
|
|
image = pipeline(prompt).images[0] |
|
return image |
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
|
gr.Markdown("# Text-to-Image Generation WebUI") |
|
|
|
with gr.Row(): |
|
base_model = gr.Textbox(label="Enter your base model here", placeholder="John6666/mala-anime-mix-nsfw-pony-xl-v3-sdxl") |
|
main_model = gr.Textbox(label="Enter your main model here", placeholder="nevreal/vMurderDrones") |
|
|
|
|
|
|
|
with gr.Row(): |
|
prompt = gr.Textbox(label="Enter your prompt here", placeholder="Type your text prompt...") |
|
|
|
|
|
with gr.Column(scale=4): |
|
output_image = gr.Image(label="Generated Image") |
|
|
|
|
|
generate_button = gr.Button("Generate Image") |
|
|
|
|
|
generate_button.click(fn=generate_image, inputs=[base_model, main_model, prompt], outputs=output_image) |
|
|
|
|
|
demo.launch() |
|
|