File size: 1,430 Bytes
297632a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import gradio as gr
import torch
from transformers import DiffusionModel, DiffusionImageProcessor, AutoTokenizer
from threading import Thread

print("Starting to load the model to memory")
# Load the diffusion model and image processor
model = DiffusionModel.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
processor = DiffusionImageProcessor.from_model(model)
tokenizer = AutoTokenizer.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")

print("Successfully loaded the model to memory")

def generate_image(text):
    # Generate an image from the given text prompt
    inputs = tokenizer(text, return_tensors="pt")
    # Run generation on GPU if available
    inputs.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))

    # Run diffusion model for image generation
    with torch.no_grad():
        result = processor.generate(**inputs)

    # Return the generated image
    return result[0]

# Define a function to handle user input and generate images
def image_generator(text):
    generated_image = generate_image(text)
    return generated_image

# Create a Gradio interface for the image generation
interface = gr.Interface(
    fn=image_generator,
    inputs="text",
    outputs="image",
    title="Image Generation from Text",
    description="Enter a text prompt to generate an image.",
    examples=["a cat sitting on a couch"]
)

# Launch the interface
interface.launch(share=True)