File size: 3,410 Bytes
9986603
 
e7e8fe0
acfacdd
200d2fc
9986603
d6f2c7d
02018b9
d6f2c7d
 
e4d9c14
d6f2c7d
 
72c3831
d6f2c7d
 
 
 
0485290
d6f2c7d
e4d9c14
f3ed143
d6e0495
9e62359
 
d6e0495
750d01b
571b468
e8b5fd4
02018b9
9e62359
 
ec02565
b4932e6
54f5f58
571b468
 
2231b16
7597a8f
9b9a598
9da89fa
2231b16
8bbabe2
 
b4932e6
02018b9
 
8bbabe2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import gradio as gr
import torch
import modin.pandas as pd
from diffusers import DiffusionPipeline 

device = "cuda" if torch.cuda.is_available() else "cpu"
if torch.cuda.is_available():
    #PYTORCH_CUDA_ALLOC_CONF={'max_split_size_mb': 6000}
    torch.cuda.max_memory_allocated(device=device)
    torch.cuda.empty_cache()
    pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
    pipe.enable_xformers_memory_efficient_attention()
    pipe = pipe.to(device)
    pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
    torch.cuda.empty_cache()
    refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16")
    refiner.enable_xformers_memory_efficient_attention()
    refiner.enable_sequential_cpu_offload()
    refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
else: 
    pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
    pipe = pipe.to(device)
    #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
    #refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True)
    #refiner = refiner.to(device)
    #refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)

def genie (prompt, steps, seed):
    generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
    int_image = pipe(prompt=prompt, generator=generator, num_inference_steps=steps, guidance_scale=0.0).images[0] 8
    #image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image, denoising_start=high_noise_frac).images[0]   
    return int_image
    
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'), 
    #gr.Textbox(label='What you Do Not want the AI to generate.'), 
    #gr.Slider(512, 1024, 768, step=128, label='Height'),
    #gr.Slider(512, 1024, 768, step=128, label='Width'),
    #gr.Slider(1, 15, 10, label='Guidance Scale'), 
    gr.Slider(1, maximum=5, value=2, step=1, label='Number of Iterations'), 
    gr.Slider(minimum=1, step=1, maximum=999999999999999999, randomize=True),
    #gr.Textbox(label='Embedded Prompt'),
    #gr.Textbox(label='Embedded Negative Prompt'),
    #gr.Slider(minimum=.7, maximum=.99, value=.95, step=.01, label='Refiner Denoise Start %')
    ],
    outputs='image', 
    title="Stable Diffusion Turbo CPU or GPU", 
    description="SDXL Turbo CPU or GPU. Currently running on CPU. <br><br><b>WARNING:</b> Extremely Slow. 65s/Iteration. Expect 25-50mins an image for 25-50 iterations respectively. This model is capable of producing NSFW (Softcore) images.", 
    article = "If You Enjoyed this Demo and would like to Donate, you can send to any of these W7allets. <br>BTC: bc1qzdm9j73mj8ucwwtsjx4x4ylyfvr6kp7svzjn84 <br>3LWRoKYx6bCLnUrKEdnPo3FCSPQUSFDjFP <br>DOGE: DK6LRc4gfefdCTRk9xPD239N31jh9GjKez <br>SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>PayPal: https://www.paypal.me/ManjushriBodhisattva <br>ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=80)