Spaces:
Runtime error
Runtime error
File size: 3,663 Bytes
00da6c1 e770867 00da6c1 3b1a716 00da6c1 f5ee026 3b1a716 00da6c1 3b1a716 00da6c1 3b1a716 d5159b8 f5eed48 3b1a716 d5159b8 903c633 3b1a716 d5159b8 3b1a716 f5eed48 3b1a716 6c562f3 00da6c1 3b1a716 00da6c1 15eacc9 167fca4 903c633 e20590d 00da6c1 903c633 00da6c1 692a571 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import gradio as gr
import torch
import numpy as np
import modin.pandas as pd
from PIL import Image
from diffusers import DiffusionPipeline, StableDiffusion3Pipeline
from huggingface_hub import hf_hub_download
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.cuda.max_memory_allocated(device=device)
torch.cuda.empty_cache()
def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed):
generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
if Model == "SD3":
#torch.cuda.max_memory_allocated(device=device)
torch.cuda.empty_cache()
SD3 = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16).to(device)
torch.cuda.empty_cache()
image=SD3(
prompt=Prompt,
height=height,
width=width,
negative_prompt=negative_prompt,
guidance_scale=scale,
num_images_per_prompt=1,
num_inference_steps=steps).images[0]
if Model == "FXL":
torch.cuda.empty_cache()
#torch.cuda.max_memory_allocated(device=device)
pipe = DiffusionPipeline.from_pretrained("circulus/canvers-fusionXL-v1", torch_dtype=torch.float16)
pipe.enable_xformers_memory_efficient_attention()
pipe = pipe.to(device)
torch.cuda.empty_cache()
#torch.cuda.max_memory_allocated(device=device)
int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
pipe.enable_xformers_memory_efficient_attention()
pipe = pipe.to(device)
torch.cuda.empty_cache()
image = pipe(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=.99).images[0]
torch.cuda.empty_cache()
return image
gr.Interface(fn=genie, inputs=[gr.Radio(["SD3", "FXL"], value='SD3', label='Choose Model'),
gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
gr.Slider(512, 1536, 1024, step=128, label='Height'),
gr.Slider(512, 1536, 1024, step=128, label='Width'),
gr.Slider(.5, maximum=15, value=7, step=.25, label='Guidance Scale'),
gr.Slider(10, maximum=50, value=25, step=5, label='Number of Prior Iterations'),
gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random')],
outputs=gr.Image(label='Generated Image'),
title="Manju Dream Booth V2.4 with Stable Diffusion 3 & Fusion XL - GPU",
description="<br><br><b/>Warning: This Demo is capable of producing NSFW content.",
article = "If You Enjoyed this Demo and would like to Donate, you can send any amount to any of these Wallets. <br><br>SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>PayPal: https://www.paypal.me/ManjushriBodhisattva <br>ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>DOGE: D9QdVPtcU1EFH8jDC8jhU9uBcSTqUiA8h6<br><br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True) |