Spaces:
Runtime error
Runtime error
Model X demo
Browse files
app.py
CHANGED
@@ -9,32 +9,55 @@ import tqdm
|
|
9 |
# Enable TQDM progress tracking
|
10 |
tqdm.monitor_interval = 0
|
11 |
|
12 |
-
# Load the diffusion
|
13 |
-
|
14 |
"kayfahaarukku/UrangDiffusion-1.0",
|
15 |
-
torch_dtype=torch.
|
16 |
custom_pipeline="lpw_stable_diffusion_xl",
|
17 |
)
|
18 |
-
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
if randomize_seed:
|
26 |
seed = random.randint(0, 99999999)
|
27 |
if use_defaults:
|
28 |
-
prompt = f"{prompt}, masterpiece, best quality"
|
29 |
-
negative_prompt = f"
|
30 |
generator = torch.manual_seed(seed)
|
31 |
|
32 |
def callback(step, timestep, latents):
|
33 |
-
progress(step / num_inference_steps)
|
34 |
return
|
35 |
|
36 |
width, height = map(int, resolution.split('x'))
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
prompt,
|
39 |
negative_prompt=negative_prompt,
|
40 |
width=width,
|
@@ -50,20 +73,20 @@ def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_s
|
|
50 |
|
51 |
metadata_text = f"{prompt}\nNegative prompt: {negative_prompt}\nSteps: {num_inference_steps}, Sampler: Euler a, Size: {width}x{height}, Seed: {seed}, CFG scale: {guidance_scale}"
|
52 |
|
53 |
-
return
|
54 |
|
55 |
# Define Gradio interface
|
56 |
def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
|
57 |
-
|
58 |
-
return
|
59 |
|
60 |
def reset_inputs():
|
61 |
-
return gr.update(value=''), gr.update(value=''), gr.update(value=True), gr.update(value='
|
62 |
|
63 |
-
with gr.Blocks(title="
|
64 |
gr.HTML(
|
65 |
-
"<h1>UrangDiffusion 1.0 Demo</h1>"
|
66 |
-
"This demo
|
67 |
)
|
68 |
with gr.Row():
|
69 |
with gr.Column():
|
@@ -76,17 +99,19 @@ with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as d
|
|
76 |
"1344x768", "768x1344", "1536x640", "640x1536"
|
77 |
],
|
78 |
label="Resolution",
|
79 |
-
value="
|
80 |
)
|
81 |
guidance_scale_input = gr.Slider(minimum=1, maximum=20, step=0.5, label="Guidance Scale", value=7)
|
82 |
num_inference_steps_input = gr.Slider(minimum=1, maximum=100, step=1, label="Number of Inference Steps", value=28)
|
83 |
seed_input = gr.Slider(minimum=0, maximum=99999999, step=1, label="Seed", value=0, interactive=True)
|
84 |
randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
|
85 |
-
generate_button = gr.Button("Generate")
|
86 |
reset_button = gr.Button("Reset")
|
87 |
|
88 |
with gr.Column():
|
89 |
-
|
|
|
|
|
90 |
with gr.Accordion("Parameters", open=False):
|
91 |
gr.Markdown(
|
92 |
"""
|
@@ -112,7 +137,7 @@ with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as d
|
|
112 |
inputs=[
|
113 |
prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
|
114 |
],
|
115 |
-
outputs=[
|
116 |
)
|
117 |
|
118 |
reset_button.click(
|
|
|
9 |
# Enable TQDM progress tracking
|
10 |
tqdm.monitor_interval = 0
|
11 |
|
12 |
+
# Load the diffusion pipelines
|
13 |
+
pipe1 = StableDiffusionXLPipeline.from_pretrained(
|
14 |
"kayfahaarukku/UrangDiffusion-1.0",
|
15 |
+
torch_dtype=torch.float16,
|
16 |
custom_pipeline="lpw_stable_diffusion_xl",
|
17 |
)
|
18 |
+
pipe1.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe1.scheduler.config)
|
19 |
|
20 |
+
pipe2 = StableDiffusionXLPipeline.from_pretrained(
|
21 |
+
"kayfahaarukku/model-x",
|
22 |
+
torch_dtype=torch.float16,
|
23 |
+
custom_pipeline="lpw_stable_diffusion_xl",
|
24 |
+
)
|
25 |
+
pipe2.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe2.scheduler.config)
|
26 |
+
|
27 |
+
# Function to generate images from both models
|
28 |
+
@spaces.GPU
|
29 |
+
def generate_comparison(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
|
30 |
+
pipe1.to('cuda')
|
31 |
+
pipe2.to('cuda')
|
32 |
|
33 |
if randomize_seed:
|
34 |
seed = random.randint(0, 99999999)
|
35 |
if use_defaults:
|
36 |
+
prompt = f"{prompt}, masterpiece, best quality, amazing quality, very aesthetic"
|
37 |
+
negative_prompt = f"lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, displeasing, {negative_prompt}"
|
38 |
generator = torch.manual_seed(seed)
|
39 |
|
40 |
def callback(step, timestep, latents):
|
41 |
+
progress(step / (2 * num_inference_steps))
|
42 |
return
|
43 |
|
44 |
width, height = map(int, resolution.split('x'))
|
45 |
+
|
46 |
+
# Generate image with UrangDiffusion-1.0
|
47 |
+
image1 = pipe1(
|
48 |
+
prompt,
|
49 |
+
negative_prompt=negative_prompt,
|
50 |
+
width=width,
|
51 |
+
height=height,
|
52 |
+
guidance_scale=guidance_scale,
|
53 |
+
num_inference_steps=num_inference_steps,
|
54 |
+
generator=generator,
|
55 |
+
callback=callback,
|
56 |
+
callback_steps=1
|
57 |
+
).images[0]
|
58 |
+
|
59 |
+
# Generate image with model-x
|
60 |
+
image2 = pipe2(
|
61 |
prompt,
|
62 |
negative_prompt=negative_prompt,
|
63 |
width=width,
|
|
|
73 |
|
74 |
metadata_text = f"{prompt}\nNegative prompt: {negative_prompt}\nSteps: {num_inference_steps}, Sampler: Euler a, Size: {width}x{height}, Seed: {seed}, CFG scale: {guidance_scale}"
|
75 |
|
76 |
+
return image1, image2, seed, metadata_text
|
77 |
|
78 |
# Define Gradio interface
|
79 |
def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
|
80 |
+
image1, image2, seed, metadata_text = generate_comparison(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress)
|
81 |
+
return image1, image2, seed, gr.update(value=metadata_text)
|
82 |
|
83 |
def reset_inputs():
|
84 |
+
return gr.update(value=''), gr.update(value=''), gr.update(value=True), gr.update(value='896x1152'), gr.update(value=7), gr.update(value=28), gr.update(value=0), gr.update(value=True), gr.update(value='')
|
85 |
|
86 |
+
with gr.Blocks(title="Model X Comparison Demo", theme="NoCrypt/[email protected]") as demo:
|
87 |
gr.HTML(
|
88 |
+
"<h1>UrangDiffusion 1.0 vs Model X Comparison Demo</h1>"
|
89 |
+
"This demo showcases a comparison between UrangDiffusion 1.0 and Model X."
|
90 |
)
|
91 |
with gr.Row():
|
92 |
with gr.Column():
|
|
|
99 |
"1344x768", "768x1344", "1536x640", "640x1536"
|
100 |
],
|
101 |
label="Resolution",
|
102 |
+
value="896x1152"
|
103 |
)
|
104 |
guidance_scale_input = gr.Slider(minimum=1, maximum=20, step=0.5, label="Guidance Scale", value=7)
|
105 |
num_inference_steps_input = gr.Slider(minimum=1, maximum=100, step=1, label="Number of Inference Steps", value=28)
|
106 |
seed_input = gr.Slider(minimum=0, maximum=99999999, step=1, label="Seed", value=0, interactive=True)
|
107 |
randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
|
108 |
+
generate_button = gr.Button("Generate Comparison")
|
109 |
reset_button = gr.Button("Reset")
|
110 |
|
111 |
with gr.Column():
|
112 |
+
with gr.Row():
|
113 |
+
output_image1 = gr.Image(type="pil", label="UrangDiffusion 1.0")
|
114 |
+
output_image2 = gr.Image(type="pil", label="Model X")
|
115 |
with gr.Accordion("Parameters", open=False):
|
116 |
gr.Markdown(
|
117 |
"""
|
|
|
137 |
inputs=[
|
138 |
prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
|
139 |
],
|
140 |
+
outputs=[output_image1, output_image2, seed_input, metadata_textbox]
|
141 |
)
|
142 |
|
143 |
reset_button.click(
|