Spaces:
Running
on
Zero
Running
on
Zero
Update optimization.py (#3)
Browse files- Update optimization.py (8dc6c106b60acd3e7a1089f80e31f02d7a789c3a)
- Update app.py (de0bec3971c17324a1f71c8e40b8e9da28546c6d)
- app.py +6 -15
- optimization.py +7 -0
app.py
CHANGED
@@ -299,7 +299,7 @@ def update_history(new_image, history):
|
|
299 |
def use_history_as_input(evt: gr.SelectData, history):
|
300 |
"""Sets the selected history image as the new input image."""
|
301 |
if history and evt.index < len(history):
|
302 |
-
return gr.update(value=history[evt.index])
|
303 |
return gr.update()
|
304 |
|
305 |
def use_output_as_input(output_image):
|
@@ -441,16 +441,7 @@ with gr.Blocks(css=css) as demo:
|
|
441 |
<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" alt="Qwen-Image Edit Logo" width="400" style="display: block; margin: 0 auto;">
|
442 |
</div>
|
443 |
""")
|
444 |
-
gr.Markdown("""
|
445 |
-
## Qwen-Image Edit with Outpainting
|
446 |
-
|
447 |
-
Extend your images beyond their original boundaries with intelligent outpainting. The model will generate new content that seamlessly blends with your original image.
|
448 |
-
|
449 |
-
**Tips:**
|
450 |
-
- Use the preview button to see which areas will be generated before running
|
451 |
-
- Click on any image in the history to use it as a new input
|
452 |
-
- Try different alignments to expand your image in specific directions
|
453 |
-
|
454 |
[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series.
|
455 |
Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit) to run locally.
|
456 |
""")
|
@@ -478,9 +469,9 @@ with gr.Blocks(css=css) as demo:
|
|
478 |
label="Alignment"
|
479 |
)
|
480 |
|
481 |
-
run_button = gr.Button("
|
482 |
|
483 |
-
with gr.Accordion("
|
484 |
with gr.Row():
|
485 |
width_slider = gr.Slider(
|
486 |
label="Target Width",
|
@@ -555,9 +546,9 @@ with gr.Blocks(css=css) as demo:
|
|
555 |
num_inference_steps = gr.Slider(
|
556 |
label="Number of inference steps",
|
557 |
minimum=1,
|
558 |
-
maximum=
|
559 |
step=1,
|
560 |
-
value=
|
561 |
)
|
562 |
|
563 |
rewrite_prompt = gr.Checkbox(
|
|
|
299 |
def use_history_as_input(evt: gr.SelectData, history):
|
300 |
"""Sets the selected history image as the new input image."""
|
301 |
if history and evt.index < len(history):
|
302 |
+
return gr.update(value=history[evt.index][0])
|
303 |
return gr.update()
|
304 |
|
305 |
def use_output_as_input(output_image):
|
|
|
441 |
<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" alt="Qwen-Image Edit Logo" width="400" style="display: block; margin: 0 auto;">
|
442 |
</div>
|
443 |
""")
|
444 |
+
gr.Markdown("""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
445 |
[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series.
|
446 |
Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit) to run locally.
|
447 |
""")
|
|
|
469 |
label="Alignment"
|
470 |
)
|
471 |
|
472 |
+
run_button = gr.Button("run", variant="primary")
|
473 |
|
474 |
+
with gr.Accordion("Outpainting Settings", open=False) as settings_panel:
|
475 |
with gr.Row():
|
476 |
width_slider = gr.Slider(
|
477 |
label="Target Width",
|
|
|
546 |
num_inference_steps = gr.Slider(
|
547 |
label="Number of inference steps",
|
548 |
minimum=1,
|
549 |
+
maximum=28,
|
550 |
step=1,
|
551 |
+
value=8,
|
552 |
)
|
553 |
|
554 |
rewrite_prompt = gr.Checkbox(
|
optimization.py
CHANGED
@@ -49,6 +49,13 @@ def optimize_pipeline_(pipeline: Callable[P, Any], *args: P.args, **kwargs: P.kw
|
|
49 |
|
50 |
@spaces.GPU(duration=1500)
|
51 |
def compile_transformer():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
with spaces.aoti_capture(pipeline.transformer) as call:
|
54 |
pipeline(*args, **kwargs)
|
|
|
49 |
|
50 |
@spaces.GPU(duration=1500)
|
51 |
def compile_transformer():
|
52 |
+
|
53 |
+
pipeline.load_lora_weights(
|
54 |
+
"lightx2v/Qwen-Image-Lightning",
|
55 |
+
weight_name="Qwen-Image-Lightning-8steps-V1.1.safetensors"
|
56 |
+
)
|
57 |
+
pipeline.fuse_lora()
|
58 |
+
pipeline.unload_lora_weights()
|
59 |
|
60 |
with spaces.aoti_capture(pipeline.transformer) as call:
|
61 |
pipeline(*args, **kwargs)
|