Spaces:
Runtime error
Runtime error
File size: 1,061 Bytes
5c80069 092a751 ea10931 092a751 abdc685 5c80069 abdc685 5c80069 092a751 abdc685 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
import gradio as gr
import torch
from torch import autocast
from diffusers import StableDiffusionPipeline
model_id = "CompVis/stable-diffusion-v1-4"
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token='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', torch_dtype=torch.float32, low_cpu_mem_usage=True)
has_cuda = torch.cuda.is_available()
device = torch.device('cpu' if not has_cuda else 'cuda')
pipe = pipe.to(device)
def convert(prompt):
samples = 4
generator = torch.Generator(device=device)
torch.cuda.empty_cache()
with autocast("cuda"):
images_list = pipe(
[prompt] * samples,
height=256, width=384,
num_inference_steps=50,
)
images = []
for i, image in enumerate(images_list["sample"]):
images.append(image)
return images
gr.Interface(convert,
inputs = [gr.inputs.Textbox(label="Enter text")],
outputs = [gr.outputs.Image(label="Generated Image")],
title="Text to Image Generation").launch() |