Keltezaa commited on
Commit
888e5ff
Β·
verified Β·
1 Parent(s): 25132ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -35
app.py CHANGED
@@ -322,22 +322,22 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
322
  ):
323
  yield img
324
 
325
- def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
326
- pipe_i2i.to("cuda")
327
- generator = torch.Generator(device="cuda").manual_seed(seed)
328
- image_input = load_image(image_input_path)
329
- final_image = pipe_i2i(
330
- prompt=prompt_mash,
331
- image=image_input,
332
- strength=image_strength,
333
- num_inference_steps=steps,
334
- guidance_scale=cfg_scale,
335
- width=width,
336
- height=height,
337
- generator=generator,
338
- joint_attention_kwargs={"scale": 1.0},
339
- output_type="pil",
340
- ).images[0]
341
  return final_image
342
 
343
  @spaces.GPU(duration=75)
@@ -362,7 +362,7 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
362
  # Unload previous LoRA weights
363
  with calculateDuration("Unloading LoRA"):
364
  pipe.unload_lora_weights()
365
- pipe_i2i.unload_lora_weights()
366
 
367
  print(pipe.get_active_adapters())
368
  # Load LoRA weights with respective scales
@@ -377,32 +377,32 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
377
  lora_path = lora['repo']
378
  weight_name = lora.get("weights")
379
  print(f"Lora Path: {lora_path}")
380
- pipe_to_use = pipe_i2i if image_input is not None else pipe
381
- pipe_to_use.load_lora_weights(
382
- lora_path,
383
- weight_name=weight_name if weight_name else None,
384
- low_cpu_mem_usage=True,
385
- adapter_name=lora_name
386
- )
387
- if image_input is not None: pipe_i2i = pipe_to_use
388
- else: pipe = pipe_to_use
389
  print("Loaded LoRAs:", lora_names)
390
  print("Adapter weights:", lora_weights)
391
- if image_input is not None:
392
- pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
393
- else:
394
- pipe.set_adapters(lora_names, adapter_weights=lora_weights)
395
- print(pipe.get_active_adapters())
396
  # Set random seed for reproducibility
397
  with calculateDuration("Randomizing seed"):
398
  if randomize_seed:
399
  seed = random.randint(0, MAX_SEED)
400
 
401
  # Generate image
402
- if image_input is not None:
403
- final_image = generate_image_to_image(prompt_mash, steps, cfg_scale, width, height, seed)
404
- yield final_image, seed, gr.update(visible=False)
405
- else:
406
  image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
407
  # Consume the generator to get the final image
408
  final_image = None
 
322
  ):
323
  yield img
324
 
325
+ #def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
326
+ # pipe_i2i.to("cuda")
327
+ # generator = torch.Generator(device="cuda").manual_seed(seed)
328
+ # image_input = load_image(image_input_path)
329
+ # final_image = pipe_i2i(
330
+ # prompt=prompt_mash,
331
+ # image=image_input,
332
+ # strength=image_strength,
333
+ # num_inference_steps=steps,
334
+ # guidance_scale=cfg_scale,
335
+ # width=width,
336
+ # height=height,
337
+ # generator=generator,
338
+ # joint_attention_kwargs={"scale": 1.0},
339
+ # output_type="pil",
340
+ # ).images[0]
341
  return final_image
342
 
343
  @spaces.GPU(duration=75)
 
362
  # Unload previous LoRA weights
363
  with calculateDuration("Unloading LoRA"):
364
  pipe.unload_lora_weights()
365
+ # pipe_i2i.unload_lora_weights()
366
 
367
  print(pipe.get_active_adapters())
368
  # Load LoRA weights with respective scales
 
377
  lora_path = lora['repo']
378
  weight_name = lora.get("weights")
379
  print(f"Lora Path: {lora_path}")
380
+ # pipe_to_use = pipe_i2i if image_input is not None else pipe
381
+ # pipe_to_use.load_lora_weights(
382
+ # lora_path,
383
+ # weight_name=weight_name if weight_name else None,
384
+ # low_cpu_mem_usage=True,
385
+ # adapter_name=lora_name
386
+ # )
387
+ # if image_input is not None: pipe_i2i = pipe_to_use
388
+ # else: pipe = pipe_to_use
389
  print("Loaded LoRAs:", lora_names)
390
  print("Adapter weights:", lora_weights)
391
+ # if image_input is not None:
392
+ # pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
393
+ # else:
394
+ # pipe.set_adapters(lora_names, adapter_weights=lora_weights)
395
+ # print(pipe.get_active_adapters())
396
  # Set random seed for reproducibility
397
  with calculateDuration("Randomizing seed"):
398
  if randomize_seed:
399
  seed = random.randint(0, MAX_SEED)
400
 
401
  # Generate image
402
+ # if image_input is not None:
403
+ # final_image = generate_image_to_image(prompt_mash, steps, cfg_scale, width, height, seed)
404
+ # yield final_image, seed, gr.update(visible=False)
405
+ # else:
406
  image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
407
  # Consume the generator to get the final image
408
  final_image = None