File size: 1,480 Bytes
f7235bd
 
62d9bf1
 
 
f7235bd
62d9bf1
f7235bd
 
 
62d9bf1
 
 
 
 
 
 
 
f7235bd
 
 
 
 
62d9bf1
 
 
 
 
 
 
f7235bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
from huggingface_hub import InferenceClient
import base64
from io import BytesIO
from PIL import Image

# Define the list of models
models = [
    "CompVis/stable-diffusion-v1-4",
    "runwayml/stable-diffusion-v1-5",
    "stabilityai/stable-diffusion-2-1-base",
    "stabilityai/stable-diffusion-2-1",
    "CompVis/ldm-text2im-large-256",
    "lambdalabs/sd-text2img-base-2-0",
    "ZB-Tech/Text-to-Image",
    "cloudqi/cqi_text_to_image_pt_v0",
    "kothariyashhh/GenAi-Texttoimage",
    "sairajg/Text_To_Image"
]

def generate_image(prompt, model_name):
    client = InferenceClient(model_name)
    response = client.text_to_image(prompt)
    
    if isinstance(response, list):
        image_data = response[0]['image']
        image = Image.open(BytesIO(base64.b64decode(image_data)))
        return image
    else:
        return "Failed to generate image."

# Create Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("## Text-to-Image Generation with Hugging Face Models")
    with gr.Row():
        with gr.Column():
            model_dropdown = gr.Dropdown(models, label="Select Model")
            prompt_input = gr.Textbox(label="Enter Text Prompt")
            generate_button = gr.Button("Generate Image")
        with gr.Column():
            output_image = gr.Image(label="Generated Image")
    
    generate_button.click(generate_image, inputs=[prompt_input, model_dropdown], outputs=output_image)

# Launch the interface
demo.launch()