File size: 1,598 Bytes
719468d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5211fb7
719468d
 
 
 
 
 
 
349f7e6
719468d
 
 
 
 
b07bd25
719468d
2337841
719468d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import os
import json
import random

import torch
from torch import autocast
from diffusers import StableDiffusionPipeline, DDIMScheduler


import gradio as gr

from gradio.components import Textbox, Image


repo_name = 'mohansathya/twosd' # YOUR REPO NAME
pipe2 = StableDiffusionPipeline.from_pretrained(repo_name, torch_dtype=torch.bfloat16)



def generate_query_response(prompt):
    negative_prompt = "bad anatomy, ugly, deformed, desfigured, distorted, poorly drawn, blurry, low quality, low definition, lowres, out of frame, out of image, cropped, cut off, signature, watermark"
    num_samples = 5
    guidance_scale = 7.5
    num_inference_steps = 6
    height = 512
    width = 512

    seed = random.randint(0, 2147483647)
    print("Seed: {}".format(str(seed)))
    generator = torch.Generator(device='cpu').manual_seed(seed)

    with autocast("cpu", dtype=torch.bfloat16), torch.inference_mode():
        imgs = pipe2(
            prompt,
            negative_prompt=negative_prompt,
            height=height, width=width,
            num_images_per_prompt=num_samples,
            num_inference_steps=num_inference_steps,
            guidance_scale=guidance_scale,
            generator=generator
        ).images

    for img in imgs:
        return img

    
# Input from user
in_prompt = Textbox(label="Enter a prompt:")

# Output response
out_response = Image(label="Generated image:")
    
# Gradio interface to generate UI link
iface = gr.Interface(
    fn=generate_query_response, inputs=in_prompt, outputs=out_response) 

# Launch the interface to generate UI
iface.launch()