Yuanshi commited on
Commit
91cb2de
·
1 Parent(s): ebe7629
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -21,7 +21,7 @@ pipe = FluxPipeline.from_pretrained(bfl_repo, transformer=None, torch_dtype=dtyp
21
  pipe.transformer = transformer
22
  pipe.scheduler.config.use_dynamic_shifting = False
23
  pipe.scheduler.config.time_shift = 10
24
- pipe.enable_model_cpu_offload()
25
  pipe = pipe.to(device)
26
 
27
  pipe.load_lora_weights(
@@ -41,7 +41,7 @@ pipe.load_lora_weights(
41
  )
42
  MAX_SEED = np.iinfo(np.int32).max
43
  MAX_IMAGE_SIZE = 4096
44
- USE_ZERO_GPU = False
45
 
46
 
47
  # @spaces.GPU #[uncomment to use ZeroGPU]
@@ -57,9 +57,9 @@ def infer(
57
  ):
58
  print("Using model:", model)
59
  if model == "2k":
60
- pipe.set_adapter("2k")
61
  elif model == "4k":
62
- pipe.set_adapter(f"4k_{flux_model}")
63
 
64
  if randomize_seed:
65
  seed = random.randint(0, MAX_SEED)
@@ -106,7 +106,7 @@ css = """
106
  """
107
 
108
  with gr.Blocks(css=css) as demo:
109
- gr.Markdown("# UREA")
110
  with gr.Row(elem_id="maincontainer"):
111
  with gr.Column(elem_id="col1"):
112
  gr.Markdown("### Prompt:")
 
21
  pipe.transformer = transformer
22
  pipe.scheduler.config.use_dynamic_shifting = False
23
  pipe.scheduler.config.time_shift = 10
24
+ # pipe.enable_model_cpu_offload()
25
  pipe = pipe.to(device)
26
 
27
  pipe.load_lora_weights(
 
41
  )
42
  MAX_SEED = np.iinfo(np.int32).max
43
  MAX_IMAGE_SIZE = 4096
44
+ USE_ZERO_GPU = True
45
 
46
 
47
  # @spaces.GPU #[uncomment to use ZeroGPU]
 
57
  ):
58
  print("Using model:", model)
59
  if model == "2k":
60
+ pipe.set_adapters("2k")
61
  elif model == "4k":
62
+ pipe.set_adapters(f"4k_{flux_model}")
63
 
64
  if randomize_seed:
65
  seed = random.randint(0, MAX_SEED)
 
106
  """
107
 
108
  with gr.Blocks(css=css) as demo:
109
+ gr.Markdown("# URAE: ")
110
  with gr.Row(elem_id="maincontainer"):
111
  with gr.Column(elem_id="col1"):
112
  gr.Markdown("### Prompt:")