File size: 1,441 Bytes
7091ee2
 
 
507ba49
 
7091ee2
507ba49
7091ee2
507ba49
7091ee2
 
 
 
507ba49
7091ee2
507ba49
 
7091ee2
 
 
 
 
 
 
 
 
507ba49
1c47bb4
 
507ba49
7091ee2
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr
from diffusers import DiffusionPipeline

# Initialize the pipeline variable globally
pipeline = None

# Load the pipeline and LoRA weights
def load_cust(base_model, models_sec):
    global pipeline
    pipeline = DiffusionPipeline.from_pretrained(base_model)
    pipeline.load_lora_weights(models_sec)

def generate_image(prompt, negative_prompt):
    global pipeline
    # Generate the image with the provided prompts
    if pipeline is None:
        return "Pipeline not loaded. Please load the models first."
    image = pipeline(prompt, negative_prompt=negative_prompt).images[0]
    return image

# Define the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Text to Image Generation Custom models Demo")
    prompt = gr.Textbox(label="Prompt", placeholder="Enter your text prompt here")
    negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter your negative prompt here")
    submit_button = gr.Button("Generate Image")
    with gr.Accordion('Load your custom models first'):
        basem = gr.Textbox(label="Your base model")
        secondm = gr.Textbox(label="Your LoRA model")
        exports = gr.Button("Load your models")
        exports.click(load_cust, inputs=[basem, secondm], outputs=[])
    output_image = gr.Image(label="Generated Image")
    submit_button.click(generate_image, inputs=[prompt, negative_prompt], outputs=output_image)

# Launch the demo
demo.launch()