File size: 1,336 Bytes
c2a8649
216062e
6031dc7
78f6a44
 
216062e
f698509
216062e
78f6a44
f698509
 
c2a8649
216062e
 
 
9af81fd
216062e
6031dc7
216062e
 
f698509
 
 
 
 
216062e
 
c2a8649
216062e
6031dc7
216062e
7416a17
f698509
216062e
 
 
 
 
9d52d71
c2a8649
216062e
6031dc7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
from diffusers import DiffusionPipeline



# Function to generate image based on input text
def generate_image(basem, model, prompt):
    # Load the pipeline
    
    pipeline = DiffusionPipeline.from_pretrained(basem)
    pipeline.load_lora_weights(model)

    # Generate the image using the text prompt
    image = pipeline(prompt).images[0]
    return image

# Create Gradio interface
with gr.Blocks() as demo:
    # Title
    gr.Markdown("# Text-to-Image Generation WebUI")

    with gr.Row():
        base_model = gr.Textbox(label="Enter your base model here", placeholder="John6666/mala-anime-mix-nsfw-pony-xl-v3-sdxl")
        main_model = gr.Textbox(label="Enter your main model here", placeholder="nevreal/vMurderDrones")
    
    
    # Input for text prompt
    with gr.Row():
        prompt = gr.Textbox(label="Enter your prompt here", placeholder="Type your text prompt...")
    
    # Output image display
    with gr.Column(scale=4):
        output_image = gr.Image(label="Generated Image")

    # Button to trigger the image generation
    generate_button = gr.Button("Generate Image")

    # When the button is clicked, call the generate_image function
    generate_button.click(fn=generate_image, inputs=[base_model, main_model, prompt], outputs=output_image)

# Launch the interface
demo.launch()