Severian commited on
Commit
04fdd69
·
verified ·
1 Parent(s): a04936b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -127
app.py CHANGED
@@ -36,7 +36,6 @@ load_dotenv()
36
  USERNAME = os.getenv("USERNAME")
37
  PASSWORD = os.getenv("PASSWORD")
38
 
39
-
40
  qrcode_generator = qrcode.QRCode(
41
  version=1,
42
  error_correction=qrcode.ERROR_CORRECT_H,
@@ -75,7 +74,7 @@ def load_models_on_launch():
75
  loaded_controlnet = ControlNetModel.from_pretrained(
76
  controlnet_path,
77
  torch_dtype=torch.float16
78
- ).to("cuda")
79
 
80
  diffusion_path = snapshot_download(DIFFUSION_MODELS["GhostMix"])
81
  loaded_pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
@@ -83,7 +82,7 @@ def load_models_on_launch():
83
  controlnet=loaded_controlnet,
84
  torch_dtype=torch.float16,
85
  safety_checker=None,
86
- ).to("cuda")
87
  print("Models loaded successfully!")
88
 
89
  # Modify the load_models function to use global variables
@@ -200,9 +199,9 @@ def inference(
200
  qr_code_content: str,
201
  prompt: str,
202
  negative_prompt: str,
203
- guidance_scale: float = 15.0,
204
- controlnet_conditioning_scale: float = 1.5,
205
- strength: float = 0.6,
206
  seed: int = -1,
207
  init_image: Image.Image | None = None,
208
  qrcode_image: Image.Image | None = None,
@@ -215,14 +214,13 @@ def inference(
215
  controlnet_model: str = "QR Code Monster",
216
  diffusion_model: str = "GhostMix",
217
  reference_image_strength: float = 0.6,
218
- progress: gr.Progress = gr.Progress()
219
  ):
220
  try:
221
- progress(0, desc="Initializing...")
222
  # Load models based on user selection
223
- progress(0.1, desc="Loading models...")
224
  pipe = load_models(controlnet_model, diffusion_model)
225
- progress(0.2, desc="Models loaded, preparing for inference...")
226
 
227
  if prompt is None or prompt == "":
228
  raise gr.Error("Prompt is required")
@@ -234,7 +232,7 @@ def inference(
234
  if count_tokens(prompt) > MAX_TOKENS:
235
  raise gr.Error(f"Prompt exceeds the maximum allowed tokens of {MAX_TOKENS}")
236
 
237
- if negative_prompt and count_tokens(negative_prompt) > MAX_TOKENS:
238
  raise gr.Error(f"Negative prompt exceeds the maximum allowed tokens of {MAX_TOKENS}")
239
 
240
  pipe.scheduler = SAMPLER_MAP[sampler](pipe.scheduler.config)
@@ -243,7 +241,7 @@ def inference(
243
  seed = torch.seed() # Generate a truly random seed
244
  generator = torch.manual_seed(seed)
245
 
246
- if qr_code_content != "" or (qrcode_image is not None and qrcode_image.size == (1, 1)):
247
  print("Generating QR Code from content")
248
  qr = qrcode.QRCode(
249
  version=1,
@@ -256,11 +254,9 @@ def inference(
256
 
257
  qrcode_image = qr.make_image(fill_color=qr_color, back_color=bg_color)
258
  qrcode_image = resize_for_condition_image(qrcode_image, 1024)
259
- elif qrcode_image is not None:
260
  print("Using QR Code Image")
261
  qrcode_image = resize_for_condition_image(qrcode_image, 1024)
262
- else:
263
- raise gr.Error("No valid QR code content or image provided")
264
 
265
  # Determine which image to use as init_image and control_image
266
  if use_qr_code_as_init_image:
@@ -284,10 +280,10 @@ def inference(
284
  if invert_init_image and init_image is not None:
285
  init_image = invert_image(init_image)
286
 
287
- progress(0.5, desc="Generating image...")
288
  out = pipe(
289
- prompt=prompt,
290
- negative_prompt=negative_prompt,
291
  image=init_image,
292
  control_image=control_image,
293
  width=1024,
@@ -297,18 +293,19 @@ def inference(
297
  generator=generator,
298
  strength=float(strength),
299
  num_inference_steps=50,
300
- callback=lambda step, timestep, latents: progress(0.5 + step / 100, desc=f"Step {step}/50")
301
  )
302
- final_image = out.images[0]
303
 
304
  if invert_final_image:
305
  final_image = invert_image(final_image)
306
 
307
- progress(1.0, desc="Done!")
308
- return final_image, seed, gr.update(visible=False) # Add the third output here
 
 
309
  except Exception as e:
310
  print(f"Unexpected error in inference: {str(e)}")
311
- return Image.new('RGB', (1024, 1024), color='white'), -1, gr.update(visible=False)
312
 
313
 
314
 
@@ -479,7 +476,6 @@ with gr.Blocks(theme='Hev832/Applio', css=css, fill_width=True, fill_height=True
479
 
480
  with gr.Column():
481
  result_image = gr.Image(label="Your Artistic QR Code", show_download_button=True, show_fullscreen_button=True)
482
- result_image_loading = gr.Markdown("Generating image...", visible=False)
483
  scan_button = gr.Button("Verify QR Code Works", visible=False)
484
  scan_result = gr.Textbox(label="Validation Result of QR Code", interactive=False, visible=False)
485
  used_seed = gr.Number(label="Seed Used", interactive=False)
@@ -647,119 +643,98 @@ with gr.Blocks(theme='Hev832/Applio', css=css, fill_width=True, fill_height=True
647
  """
648
  )
649
 
650
- def update_image_list(image, seed):
651
- generated_images.value.append((image, seed))
652
- return gr.Dropdown(choices=[f"Image {i+1} (Seed: {s})" for i, (_, s) in enumerate(generated_images.value)])
653
-
654
- def load_image_for_editing(choice):
655
- if not choice:
656
- return None
657
- index = int(choice.split()[1]) - 1
658
- return generated_images.value[index][0]
659
-
660
- def edit_image(image, brightness, contrast, saturation):
661
- if image is None:
662
- return None
663
- img = Image.fromarray(image) if isinstance(image, np.ndarray) else image
664
- img = ImageEnhance.Brightness(img).enhance(brightness)
665
- img = ImageEnhance.Contrast(img).enhance(contrast)
666
- img = ImageEnhance.Color(img).enhance(saturation)
667
- return np.array(img)
668
-
669
- def invert_image(image):
670
- if image is None:
671
- return None
672
- return ImageOps.invert(Image.fromarray(image) if isinstance(image, np.ndarray) else image)
673
-
674
- def scan_and_display(image):
675
- if image is None:
676
- return "No image to scan"
677
-
678
- scanned_text = scan_qr_code(image)
679
- if scanned_text:
680
- return f"Scanned successfully: {scanned_text}"
681
- else:
682
- return "Failed to scan QR code. Try adjusting the settings for better visibility."
683
-
684
- run_btn.click(
685
- inference,
686
- inputs=[
687
- qr_code_content,
688
- prompt,
689
- negative_prompt,
690
- guidance_scale,
691
- controlnet_conditioning_scale,
692
- strength,
693
- seed,
694
- init_image,
695
- qr_code_image,
696
- use_qr_code_as_init_image,
697
- sampler,
698
- bg_color,
699
- qr_color,
700
- invert_final_image,
701
- controlnet_model_dropdown,
702
- diffusion_model_dropdown,
703
- reference_image_strength,
704
- ],
705
- outputs=[result_image, used_seed, result_image_loading],
706
- show_progress=True
707
- ).then(
708
- update_image_list,
709
- inputs=[result_image, used_seed],
710
- outputs=[image_selector]
711
- )
712
 
713
- image_selector.change(
714
- load_image_for_editing,
715
- inputs=[image_selector],
716
- outputs=[image_to_edit]
717
- )
718
 
719
- brightness.change(
720
- edit_image,
721
- inputs=[image_to_edit, brightness, contrast, saturation],
722
- outputs=[edited_image]
723
- )
724
- contrast.change(
725
- edit_image,
726
- inputs=[image_to_edit, brightness, contrast, saturation],
727
- outputs=[edited_image]
728
- )
729
- saturation.change(
730
- edit_image,
731
- inputs=[image_to_edit, brightness, contrast, saturation],
732
- outputs=[edited_image]
733
- )
734
 
735
- invert_button.click(
736
- invert_image,
737
- inputs=[image_to_edit],
738
- outputs=[edited_image]
739
- )
740
 
741
- scan_button.click(
742
- scan_and_display,
743
- inputs=[edited_image],
744
- outputs=[scan_result]
745
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
746
 
747
- # Define login button click behavior
748
- login_button.click(
749
- login,
750
- inputs=[username, password],
751
- outputs=[app_container, login_error, login_button]
752
  )
753
 
754
- # Define password textbox submit behavior
755
- password.submit(
756
- login,
757
- inputs=[username, password],
758
- outputs=[app_container, login_error, login_button]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
759
  )
760
 
761
  # Load models on launch
762
  load_models_on_launch()
763
 
764
  blocks.queue(max_size=20)
765
- blocks.launch(share=False, show_api=True)
 
36
  USERNAME = os.getenv("USERNAME")
37
  PASSWORD = os.getenv("PASSWORD")
38
 
 
39
  qrcode_generator = qrcode.QRCode(
40
  version=1,
41
  error_correction=qrcode.ERROR_CORRECT_H,
 
74
  loaded_controlnet = ControlNetModel.from_pretrained(
75
  controlnet_path,
76
  torch_dtype=torch.float16
77
+ ).to("mps")
78
 
79
  diffusion_path = snapshot_download(DIFFUSION_MODELS["GhostMix"])
80
  loaded_pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
 
82
  controlnet=loaded_controlnet,
83
  torch_dtype=torch.float16,
84
  safety_checker=None,
85
+ ).to("mps")
86
  print("Models loaded successfully!")
87
 
88
  # Modify the load_models function to use global variables
 
199
  qr_code_content: str,
200
  prompt: str,
201
  negative_prompt: str,
202
+ guidance_scale: float = 15.0, # Increased from 10.0 to 15.0
203
+ controlnet_conditioning_scale: float = 1.5, # Adjusted from 2.0 to 1.5
204
+ strength: float = 0.6, # Reduced from 0.8 to 0.6
205
  seed: int = -1,
206
  init_image: Image.Image | None = None,
207
  qrcode_image: Image.Image | None = None,
 
214
  controlnet_model: str = "QR Code Monster",
215
  diffusion_model: str = "GhostMix",
216
  reference_image_strength: float = 0.6,
 
217
  ):
218
  try:
219
+ progress = gr.Progress()
220
  # Load models based on user selection
221
+ progress(0, desc="Downloading models...")
222
  pipe = load_models(controlnet_model, diffusion_model)
223
+ progress(0.5, desc="Models downloaded, preparing for inference...")
224
 
225
  if prompt is None or prompt == "":
226
  raise gr.Error("Prompt is required")
 
232
  if count_tokens(prompt) > MAX_TOKENS:
233
  raise gr.Error(f"Prompt exceeds the maximum allowed tokens of {MAX_TOKENS}")
234
 
235
+ if count_tokens(negative_prompt) > MAX_TOKENS:
236
  raise gr.Error(f"Negative prompt exceeds the maximum allowed tokens of {MAX_TOKENS}")
237
 
238
  pipe.scheduler = SAMPLER_MAP[sampler](pipe.scheduler.config)
 
241
  seed = torch.seed() # Generate a truly random seed
242
  generator = torch.manual_seed(seed)
243
 
244
+ if qr_code_content != "" or qrcode_image.size == (1, 1):
245
  print("Generating QR Code from content")
246
  qr = qrcode.QRCode(
247
  version=1,
 
254
 
255
  qrcode_image = qr.make_image(fill_color=qr_color, back_color=bg_color)
256
  qrcode_image = resize_for_condition_image(qrcode_image, 1024)
257
+ else:
258
  print("Using QR Code Image")
259
  qrcode_image = resize_for_condition_image(qrcode_image, 1024)
 
 
260
 
261
  # Determine which image to use as init_image and control_image
262
  if use_qr_code_as_init_image:
 
280
  if invert_init_image and init_image is not None:
281
  init_image = invert_image(init_image)
282
 
283
+ final_image = None
284
  out = pipe(
285
+ prompt=prompt, # Use the full prompt
286
+ negative_prompt=negative_prompt, # Use the full negative prompt
287
  image=init_image,
288
  control_image=control_image,
289
  width=1024,
 
293
  generator=generator,
294
  strength=float(strength),
295
  num_inference_steps=50,
 
296
  )
297
+ final_image = out.images[0] if final_image is None else final_image
298
 
299
  if invert_final_image:
300
  final_image = invert_image(final_image)
301
 
302
+ return final_image, seed
303
+ except gr.Error as e:
304
+ print(f"Gradio error in inference: {str(e)}")
305
+ return Image.new('RGB', (1024, 1024), color='white'), -1
306
  except Exception as e:
307
  print(f"Unexpected error in inference: {str(e)}")
308
+ return Image.new('RGB', (1024, 1024), color='white'), -1
309
 
310
 
311
 
 
476
 
477
  with gr.Column():
478
  result_image = gr.Image(label="Your Artistic QR Code", show_download_button=True, show_fullscreen_button=True)
 
479
  scan_button = gr.Button("Verify QR Code Works", visible=False)
480
  scan_result = gr.Textbox(label="Validation Result of QR Code", interactive=False, visible=False)
481
  used_seed = gr.Number(label="Seed Used", interactive=False)
 
643
  """
644
  )
645
 
646
+ def scan_and_display(image):
647
+ if image is None:
648
+ return "No image to scan"
649
+
650
+ scanned_text = scan_qr_code(image)
651
+ if scanned_text:
652
+ return f"Scanned successfully: {scanned_text}"
653
+ else:
654
+ return "Failed to scan QR code. Try adjusting the settings for better visibility."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
655
 
656
+ def invert_displayed_image(image):
657
+ if image is None:
658
+ return None
659
+ return invert_image(image)
 
660
 
661
+ scan_button.click(
662
+ scan_and_display,
663
+ inputs=[result_image],
664
+ outputs=[scan_result]
665
+ )
 
 
 
 
 
 
 
 
 
 
666
 
667
+ invert_button.click(
668
+ invert_displayed_image,
669
+ inputs=[result_image],
670
+ outputs=[result_image]
671
+ )
672
 
673
+ invert_init_image_button.click(
674
+ invert_init_image_display,
675
+ inputs=[init_image],
676
+ outputs=[init_image]
677
+ )
678
+
679
+ brightness.change(
680
+ adjust_image,
681
+ inputs=[result_image, brightness, contrast, saturation],
682
+ outputs=[result_image]
683
+ )
684
+ contrast.change(
685
+ adjust_image,
686
+ inputs=[result_image, brightness, contrast, saturation],
687
+ outputs=[result_image]
688
+ )
689
+ saturation.change(
690
+ adjust_image,
691
+ inputs=[result_image, brightness, contrast, saturation],
692
+ outputs=[result_image]
693
+ )
694
+
695
+ # Add logic to show/hide the reference_image_strength slider
696
+ def update_reference_image_strength_visibility(init_image, use_qr_code_as_init_image):
697
+ return gr.update(visible=init_image is not None and not use_qr_code_as_init_image)
698
+
699
+ init_image.change(
700
+ update_reference_image_strength_visibility,
701
+ inputs=[init_image, use_qr_code_as_init_image],
702
+ outputs=[reference_image_strength]
703
+ )
704
 
705
+ use_qr_code_as_init_image.change(
706
+ update_reference_image_strength_visibility,
707
+ inputs=[init_image, use_qr_code_as_init_image],
708
+ outputs=[reference_image_strength]
 
709
  )
710
 
711
+ run_btn.click(
712
+ fn=inference,
713
+ inputs=[
714
+ qr_code_content,
715
+ prompt,
716
+ negative_prompt,
717
+ guidance_scale,
718
+ controlnet_conditioning_scale,
719
+ strength,
720
+ seed,
721
+ init_image,
722
+ qr_code_image,
723
+ use_qr_code_as_init_image,
724
+ sampler,
725
+ bg_color,
726
+ qr_color,
727
+ invert_final_image,
728
+ controlnet_model_dropdown,
729
+ diffusion_model_dropdown,
730
+ reference_image_strength,
731
+ ],
732
+ outputs=[result_image, used_seed],
733
+ concurrency_limit=20
734
  )
735
 
736
  # Load models on launch
737
  load_models_on_launch()
738
 
739
  blocks.queue(max_size=20)
740
+ blocks.launch(share=True, show_api=True)