Keltezaa commited on
Commit
cd54ddb
·
verified ·
1 Parent(s): d085fc1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -1
app.py CHANGED
@@ -454,7 +454,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
454
  output_type="pil",
455
  good_vae=good_vae,
456
  ):
457
- print(f"Debug: Yielding image of type {type(img)}") # Check type of each image
458
  if isinstance(img, float):
459
  print("Error: A float was returned instead of an image.") # Log if img is a float
460
  raise ValueError("Expected an image, but got a float.") # Raise error if a float is found
@@ -463,6 +463,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
463
  return final_image
464
 
465
  def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
 
466
  pipe_i2i.to("cuda")
467
  generator = torch.Generator(device="cuda").manual_seed(seed)
468
  image_input = load_image(image_input_path)
@@ -478,8 +479,12 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
478
  joint_attention_kwargs={"scale": 1.0},
479
  output_type="pil",
480
  ).images[0]
 
 
 
481
  return final_image
482
 
 
483
  @spaces.GPU(duration=75)
484
  def run_lora(prompt, cfg_scale, steps, selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, randomize_seed, seed, width, height, loras_state, image_input=None, progress=gr.Progress(track_tqdm=True)):
485
  print("run_lora function called.") # Debugging statement
@@ -580,6 +585,7 @@ def run_lora(prompt, cfg_scale, steps, selected_info_1, selected_info_2, selecte
580
 
581
  run_lora.zerogpu = True
582
 
 
583
  def get_huggingface_safetensors(link):
584
  split_link = link.split("/")
585
  if len(split_link) == 4:
 
454
  output_type="pil",
455
  good_vae=good_vae,
456
  ):
457
+ print(f"Debug: Yielding image of type {type(img)}") # Check the type of the image
458
  if isinstance(img, float):
459
  print("Error: A float was returned instead of an image.") # Log if img is a float
460
  raise ValueError("Expected an image, but got a float.") # Raise error if a float is found
 
463
  return final_image
464
 
465
  def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
466
+ print("Generating image from input...")
467
  pipe_i2i.to("cuda")
468
  generator = torch.Generator(device="cuda").manual_seed(seed)
469
  image_input = load_image(image_input_path)
 
479
  joint_attention_kwargs={"scale": 1.0},
480
  output_type="pil",
481
  ).images[0]
482
+ if isinstance(final_image, float):
483
+ print("Error: Expected an image but got a float.")
484
+ raise ValueError("Expected an image, but got a float.")
485
  return final_image
486
 
487
+
488
  @spaces.GPU(duration=75)
489
  def run_lora(prompt, cfg_scale, steps, selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, randomize_seed, seed, width, height, loras_state, image_input=None, progress=gr.Progress(track_tqdm=True)):
490
  print("run_lora function called.") # Debugging statement
 
585
 
586
  run_lora.zerogpu = True
587
 
588
+
589
  def get_huggingface_safetensors(link):
590
  split_link = link.split("/")
591
  if len(split_link) == 4: