import os import spaces import torch from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler import gradio as gr import random import tqdm # Enable TQDM progress tracking tqdm.monitor_interval = 0 # Load the diffusion pipeline pipe = StableDiffusionXLPipeline.from_pretrained( "kayfahaarukku/UrangDiffusion-1.0", torch_dtype=torch.float16, custom_pipeline="lpw_stable_diffusion_xl", ) pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Function to generate an image @spaces.GPU def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()): pipe.to('cuda') if randomize_seed: seed = random.randint(0, 99999999) original_prompt = prompt original_negative_prompt = negative_prompt if use_defaults: prompt = f"{prompt}, masterpiece, best quality" negative_prompt = f"nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, {negative_prompt}" generator = torch.manual_seed(seed) def callback(step, timestep, latents): progress(step / num_inference_steps) return width, height = map(int, resolution.split('x')) image = pipe( prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, generator=generator, callback=callback, callback_steps=1 ).images[0] torch.cuda.empty_cache() return image, seed, original_prompt, original_negative_prompt # Define Gradio interface def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()): image, seed, original_prompt, original_negative_prompt = generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress) details = f"""Prompt: {original_prompt} Negative prompt: {original_negative_prompt} Steps: {num_inference_steps}, CFG scale: {guidance_scale}, Seed: {seed}, Size: {resolution} Default quality tags: {"Enabled" if use_defaults else "Disabled"}""" if use_defaults: details += f""" Default prompt addition: , masterpiece, best quality Default negative prompt addition: nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name""" return image, seed, gr.update(value=seed), details def reset_inputs(): return gr.update(value=''), gr.update(value=''), gr.update(value=True), gr.update(value='832x1216'), gr.update(value=7), gr.update(value=28), gr.update(value=0), gr.update(value=True) with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/miku@1.2.1") as demo: gr.HTML( "