File size: 854 Bytes
092a751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
from transformers import pipeline, Wav2Vec2Processor, AutoModelForCTC
import torch
from torch import autocast
from diffusers import StableDiffusionPipeline
import gradio as gr

model_id = "CompVis/stable-diffusion-v1-4"
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token='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', revision="fp16", torch_dtype=torch.float16)
device = 'cuda'
#has_cuda = torch.cuda.is_available()
#device = torch.device('cpu' if not has_cuda else 'cuda')    
pipe = pipe.to(device)

def convert(prompt):
        with autocast("cuda"):
                image = pipe(prompt)["sample"][0]
        return image

gr.Interface(convert,
             inputs = [gr.inputs.Textbox(label="Enter text")],    
             outputs = [gr.outputs.Image(label="Generated Image")], 
             title="Text to Image Generation").launch()