DarkIdol-flux / app.py
aifeifei798's picture
Update app.py
04d020a verified
raw
history blame
3.77 kB
import gradio as gr
import numpy as np
import random
import spaces
import torch
from diffusers import DiffusionPipeline, AutoencoderTiny
from huggingface_hub import hf_hub_download
def feifeimodload():
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = DiffusionPipeline.from_pretrained(
"aifeifei798/DarkIdol-flux-v1.1", torch_dtype=dtype
).to(device)
pipe.vae.enable_slicing()
pipe.vae.enable_tiling()
pipe.unload_lora_weights()
torch.cuda.empty_cache()
return pipe
pipe = feifeimodload()
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
@spaces.GPU()
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
#prompt = f"{prompt}, slight smile, Master of Light and Shadow."
image = pipe(
prompt = prompt,
width = width,
height = height,
num_inference_steps = num_inference_steps,
generator = generator,
guidance_scale=3.5
).images[0]
return image, seed
examples = [
"real model girl in real life"
]
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""# DarkIdol-flux
DarkIdol-flux is a text-to-image AI model designed to create aesthetic, detailed and diverse images from textual prompts in just 6-8 steps. It offers enhanced performance in image quality, typography, understanding complex prompts, and resource efficiency.
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=12,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run")
result = gr.Image(label="Result", show_label=False,height=520)
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=64,
value=832,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=64,
value=1280,
)
with gr.Row():
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=6,
)
gr.Examples(
examples = examples,
fn = infer,
inputs = [prompt],
outputs = [result, seed],
cache_examples=False
)
gr.on(
triggers=[run_button.click, prompt.submit],
fn = infer,
inputs = [prompt, seed, randomize_seed, width, height, num_inference_steps],
outputs = [result, seed]
)
demo.launch()