Spaces:
Sleeping
Sleeping
Tony Lian
commited on
Commit
•
363907c
1
Parent(s):
282616b
Update placeholders and fix defaults with low_memory
Browse files
app.py
CHANGED
@@ -56,7 +56,7 @@ def get_ours_image(response, overall_prompt_override="", seed=0, num_inference_s
|
|
56 |
# Both are empty so generate a placeholder
|
57 |
response = layout_placeholder
|
58 |
else:
|
59 |
-
raise gr.Error("
|
60 |
gen_boxes, bg_prompt, neg_prompt = parse_input_with_negative(response, no_input=True)
|
61 |
gen_boxes = filter_boxes(gen_boxes, scale_boxes=scale_boxes)
|
62 |
spec = {
|
@@ -159,8 +159,8 @@ with gr.Blocks(
|
|
159 |
with gr.Tab("Stage 2 (New). Layout to Image generation"):
|
160 |
with gr.Row():
|
161 |
with gr.Column(scale=1):
|
162 |
-
overall_prompt_override = gr.Textbox(lines=2, label="Prompt for the overall image (optional but recommended)", placeholder="You can put your input prompt for layout generation here, helpful if your scene cannot be represented by background prompt and boxes only, e.g., with object interactions. If left empty: background prompt with [objects].", value="")
|
163 |
-
response = gr.Textbox(lines=8, label="Paste ChatGPT response here (no original caption needed here)", placeholder=layout_placeholder)
|
164 |
num_inference_steps = gr.Slider(1, 100 if low_memory else 250, value=default_num_inference_steps, step=1, label="Number of denoising steps (set to >=50 for higher generation quality)")
|
165 |
# Using a environment variable allows setting default to faster/fastest on low-end GPUs.
|
166 |
preset = gr.Radio(label="Guidance: apply less control for faster generation", choices=["Standard", "Faster (disable attention guidance, keep per-box guidance)", "Faster (disable per-box guidance, keep attention guidance)", "Fastest (disable both)"], value="Faster (disable attention guidance, keep per-box guidance)" if low_memory else "Standard")
|
@@ -169,8 +169,9 @@ with gr.Blocks(
|
|
169 |
with gr.Tab("Guidance"):
|
170 |
frozen_step_ratio = gr.Slider(0, 1, value=0.5, step=0.1, label="Foreground frozen steps ratio (higher: stronger attribute binding; lower: higher coherence")
|
171 |
gligen_scheduled_sampling_beta = gr.Slider(0, 1, value=0.4, step=0.1, label="GLIGEN guidance steps ratio (the beta value, higher: stronger GLIGEN guidance)")
|
172 |
-
|
173 |
-
|
|
|
174 |
use_ref_ca = gr.Checkbox(label="Using per-box attention to guide reference attention", show_label=False, value=True)
|
175 |
with gr.Tab("Generation"):
|
176 |
dpm_scheduler = gr.Checkbox(label="Use DPM scheduler (unchecked: DDIM scheduler, may have better coherence, recommend >=50 inference steps)", show_label=False, value=True)
|
|
|
56 |
# Both are empty so generate a placeholder
|
57 |
response = layout_placeholder
|
58 |
else:
|
59 |
+
raise gr.Error("You entered a prompt for overall image but left the ChatGPT response empty. Please paste ChatGPT response or select an example below to get started.")
|
60 |
gen_boxes, bg_prompt, neg_prompt = parse_input_with_negative(response, no_input=True)
|
61 |
gen_boxes = filter_boxes(gen_boxes, scale_boxes=scale_boxes)
|
62 |
spec = {
|
|
|
159 |
with gr.Tab("Stage 2 (New). Layout to Image generation"):
|
160 |
with gr.Row():
|
161 |
with gr.Column(scale=1):
|
162 |
+
overall_prompt_override = gr.Textbox(lines=2, label="Prompt for the overall image (optional but recommended)", placeholder="You can put your input prompt for layout generation here, helpful if your scene cannot be represented by background prompt and boxes only, e.g., with object interactions. If left empty, we will use: background prompt with [objects].", value="")
|
163 |
+
response = gr.Textbox(lines=8, label="Paste ChatGPT response here (no original caption needed here)", placeholder="Get started with some examples at the bottom of the page. If left empty, we will use the following: \n\n" + layout_placeholder)
|
164 |
num_inference_steps = gr.Slider(1, 100 if low_memory else 250, value=default_num_inference_steps, step=1, label="Number of denoising steps (set to >=50 for higher generation quality)")
|
165 |
# Using a environment variable allows setting default to faster/fastest on low-end GPUs.
|
166 |
preset = gr.Radio(label="Guidance: apply less control for faster generation", choices=["Standard", "Faster (disable attention guidance, keep per-box guidance)", "Faster (disable per-box guidance, keep attention guidance)", "Fastest (disable both)"], value="Faster (disable attention guidance, keep per-box guidance)" if low_memory else "Standard")
|
|
|
169 |
with gr.Tab("Guidance"):
|
170 |
frozen_step_ratio = gr.Slider(0, 1, value=0.5, step=0.1, label="Foreground frozen steps ratio (higher: stronger attribute binding; lower: higher coherence")
|
171 |
gligen_scheduled_sampling_beta = gr.Slider(0, 1, value=0.4, step=0.1, label="GLIGEN guidance steps ratio (the beta value, higher: stronger GLIGEN guidance)")
|
172 |
+
# Since the default mode is "Faster (disable attention guidance, keep per-box guidance)" if `low_memory`, we disable attention guidance here if `low_memory` by default to match the preset and they can be enabled if the user selects another preset.
|
173 |
+
attn_guidance_step_ratio = gr.Slider(0, 1, value=0.6 if not low_memory else 0, step=0.01, label="Attention guidance steps ratio (higher: stronger attention guidance; lower: faster and higher coherence", interactive=not low_memory)
|
174 |
+
attn_guidance_scale = gr.Slider(0, 50, value=20, step=0.5, label="Attention guidance scale: 0 means no attention guidance.", interactive=not low_memory)
|
175 |
use_ref_ca = gr.Checkbox(label="Using per-box attention to guide reference attention", show_label=False, value=True)
|
176 |
with gr.Tab("Generation"):
|
177 |
dpm_scheduler = gr.Checkbox(label="Use DPM scheduler (unchecked: DDIM scheduler, may have better coherence, recommend >=50 inference steps)", show_label=False, value=True)
|