LPX55 commited on
Commit
ebcf420
·
verified ·
1 Parent(s): 3f6ec7e

Update app_local.py

Browse files
Files changed (1) hide show
  1. app_local.py +19 -15
app_local.py CHANGED
@@ -173,10 +173,10 @@ def polish_prompt(original_prompt: str) -> str:
173
  with torch.no_grad():
174
  generated_ids = rewriter_model.generate(
175
  **model_inputs,
176
- max_new_tokens=256,
177
  do_sample=True,
178
- temperature=0.7,
179
- top_p=0.8,
180
  repetition_penalty=1.1,
181
  no_repeat_ngram_size=3,
182
  pad_token_id=rewriter_tokenizer.eos_token_id
@@ -486,7 +486,7 @@ def infer(
486
  num_images_per_prompt=1
487
  ).images
488
  edited_images.extend(result)
489
- print(f"Generated image {i+1}/{len(batch_prompts)} with prompt: {current_prompt[:75]}...")
490
  # Clear cache after generation
491
  # if device == "cuda":
492
  # torch.cuda.empty_cache()
@@ -505,7 +505,7 @@ def infer(
505
  f"</div>"
506
  )
507
 
508
- with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
509
  preset_prompts_state = gr.State(value=[])
510
  # preset_prompts_state = gr.State(value=["", "", "", ""])
511
 
@@ -524,7 +524,7 @@ with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
524
  label="Source Image",
525
  type="pil",
526
  height=300
527
- )
528
 
529
  prompt = gr.Textbox(
530
  label="Edit Instructions / Base Prompt",
@@ -550,11 +550,7 @@ with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
550
  preset_prompt_4 = gr.Textbox(label="Prompt 4", lines=1, value="")
551
  update_preset_button = gr.Button("Update Preset", variant="secondary")
552
 
553
- rewrite_toggle = gr.Checkbox(
554
- label="Enable Prompt Enhancement",
555
- value=True,
556
- interactive=True
557
- )
558
 
559
  # Add prompt preview component
560
  prompt_preview = gr.Textbox(
@@ -565,6 +561,14 @@ with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
565
  value="Enter a base prompt and select a preset above to see how your prompt will be modified for batch generation.",
566
  placeholder="Prompt preview will appear here..."
567
  )
 
 
 
 
 
 
 
 
568
  run_button = gr.Button(
569
  "Generate Edit(s)",
570
  variant="primary"
@@ -588,14 +592,14 @@ with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
588
  minimum=1.0,
589
  maximum=10.0,
590
  step=0.1,
591
- value=1.0
592
  )
593
  num_inference_steps = gr.Slider(
594
  label="Inference Steps",
595
- minimum=2,
596
  maximum=16,
597
  step=1,
598
- value=4
599
  )
600
 
601
  num_images_per_prompt = gr.Slider(
@@ -618,7 +622,7 @@ with gr.Blocks(title="Qwen Image Edit - Fast Lightning Mode w/ Batch") as demo:
618
  )
619
  prompt_info = gr.HTML(
620
  value="<div style='padding:15px; margin-top:15px'>"
621
- "Prompt details will appear after generation. Ability to edit Preset Prompts on the fly will be implemented shortly.</div>"
622
  )
623
 
624
  # Fix the show_preset_editor function to use ORIGINAL_PRESETS:
 
173
  with torch.no_grad():
174
  generated_ids = rewriter_model.generate(
175
  **model_inputs,
176
+ max_new_tokens=512,
177
  do_sample=True,
178
+ temperature=0.75,
179
+ top_p=0.85,
180
  repetition_penalty=1.1,
181
  no_repeat_ngram_size=3,
182
  pad_token_id=rewriter_tokenizer.eos_token_id
 
486
  num_images_per_prompt=1
487
  ).images
488
  edited_images.extend(result)
489
+ print(f"Generated image {i+1}/{len(batch_prompts)} with prompt: {current_prompt}...")
490
  # Clear cache after generation
491
  # if device == "cuda":
492
  # torch.cuda.empty_cache()
 
505
  f"</div>"
506
  )
507
 
508
+ with gr.Blocks(title="'Qwen Image Edit' Model Playground & Showcase [4-Step Lightning Mode]") as demo:
509
  preset_prompts_state = gr.State(value=[])
510
  # preset_prompts_state = gr.State(value=["", "", "", ""])
511
 
 
524
  label="Source Image",
525
  type="pil",
526
  height=300
527
+ )
528
 
529
  prompt = gr.Textbox(
530
  label="Edit Instructions / Base Prompt",
 
550
  preset_prompt_4 = gr.Textbox(label="Prompt 4", lines=1, value="")
551
  update_preset_button = gr.Button("Update Preset", variant="secondary")
552
 
553
+
 
 
 
 
554
 
555
  # Add prompt preview component
556
  prompt_preview = gr.Textbox(
 
561
  value="Enter a base prompt and select a preset above to see how your prompt will be modified for batch generation.",
562
  placeholder="Prompt preview will appear here..."
563
  )
564
+
565
+ rewrite_toggle = gr.Checkbox(
566
+ label="Additional Prompt Enhancement",
567
+ info="Setting this to true will pass the basic prompt(s) generated via the static preset template to a secondary LLM tasked with improving the overall cohesiveness and details of the final generation prompt.",
568
+ value=True,
569
+ interactive=True
570
+ )
571
+
572
  run_button = gr.Button(
573
  "Generate Edit(s)",
574
  variant="primary"
 
592
  minimum=1.0,
593
  maximum=10.0,
594
  step=0.1,
595
+ value=1.1
596
  )
597
  num_inference_steps = gr.Slider(
598
  label="Inference Steps",
599
+ minimum=1,
600
  maximum=16,
601
  step=1,
602
+ value=3
603
  )
604
 
605
  num_images_per_prompt = gr.Slider(
 
622
  )
623
  prompt_info = gr.HTML(
624
  value="<div style='padding:15px; margin-top:15px'>"
625
+ "Hint: depending on the original image, prompt quality, and complexity, you can often get away with 3 steps, even 2 steps without much loss in quality. </div>"
626
  )
627
 
628
  # Fix the show_preset_editor function to use ORIGINAL_PRESETS: