import gradio as gr
import torch
import numpy as np
import modin.pandas as pd
from PIL import Image
from diffusers import StableCascadeCombinedPipeline
from huggingface_hub import hf_hub_download
device = 'cuda' #if torch.cuda.is_available() else 'cpu'
torch.cuda.max_memory_allocated(device=device)
torch.cuda.empty_cache()
pipe = StableCascadeCombinedPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16)
pipe.enable_xformers_memory_efficient_attention()
pipe = pipe.to(device)
torch.cuda.empty_cache()
def genie (Prompt, negative_prompt, height, width, scale, steps, seed, upscale):
generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
torch.cuda.empty_cache()
image = pipe(prompt=Prompt, negative_prompt=negative_prompt, num_inference_steps=15, prior_num_inference_steps=steps, prior_guidance_scale=scale, width=width, height=height).images[0]
return image
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
gr.Slider(512, 2048, 768, step=128, label='Height'),
gr.Slider(512, 2048, 768, step=128, label='Width'),
gr.Slider(1, maximum=15, value=5, step=.25, label='Guidance Scale'),
gr.Slider(5, maximum=100, value=50, step=5, label='Number of Iterations'),
gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random')],
outputs=gr.Image(label='Generated Image'),
title="Manju Dream Booth V1.9 with SDXL 1.0 Refiner and SD X2 Latent Upscaler - GPU",
description="
Warning: This Demo is capable of producing NSFW content.",
article = "If You Enjoyed this Demo and would like to Donate, you can send any amount to any of these Wallets.
SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891
PayPal: https://www.paypal.me/ManjushriBodhisattva
ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891
DOGE: D9QdVPtcU1EFH8jDC8jhU9uBcSTqUiA8h6
Code Monkey: Manjushri").launch(debug=True, max_threads=80)