TheAwakenOne commited on
Commit
885080e
·
1 Parent(s): fa075ae
Files changed (1) hide show
  1. app.py +7 -56
app.py CHANGED
@@ -13,8 +13,6 @@ import copy
13
  import random
14
  import time
15
 
16
- selected_lora_index = None
17
-
18
  # Load LoRAs from JSON file
19
  with open('loras.json', 'r') as f:
20
  loras = json.load(f)
@@ -59,8 +57,6 @@ class calculateDuration:
59
  print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
60
 
61
  def update_selection(evt: gr.SelectData, width, height):
62
- global selected_lora_index
63
- selected_lora_index = evt.index
64
  selected_lora = loras[evt.index]
65
  new_placeholder = f"{selected_lora['trigger_word']} {prompt.value}"
66
  lora_repo = selected_lora["repo"]
@@ -78,31 +74,12 @@ def update_selection(evt: gr.SelectData, width, height):
78
  return (
79
  gr.update(value=new_placeholder),
80
  updated_text,
 
81
  width,
82
  height,
83
  gr.update(interactive=True) # Enable the Generate button
84
  )
85
 
86
- def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
87
- global selected_lora_index
88
- if selected_lora_index is None:
89
- raise gr.Error("You must select a LoRA before proceeding.")
90
-
91
- selected_lora = loras[selected_lora_index]
92
- lora_path = selected_lora["repo"]
93
- trigger_word = selected_lora["trigger_word"]
94
-
95
- if trigger_word:
96
- if "trigger_position" in selected_lora:
97
- if selected_lora["trigger_position"] == "prepend":
98
- prompt_mash = f"{trigger_word} {prompt}"
99
- else:
100
- prompt_mash = f"{prompt} {trigger_word}"
101
- else:
102
- prompt_mash = f"{trigger_word} {prompt}"
103
- else:
104
- prompt_mash = prompt
105
-
106
  @spaces.GPU(duration=70)
107
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
108
  pipe.to("cuda")
@@ -122,31 +99,11 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
122
  ):
123
  yield img
124
 
125
- @spaces.GPU(duration=70)
126
- def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
127
- generator = torch.Generator(device="cuda").manual_seed(seed)
128
- pipe_i2i.to("cuda")
129
- image_input = load_image(image_input_path)
130
- final_image = pipe_i2i(
131
- prompt=prompt_mash,
132
- image=image_input,
133
- strength=image_strength,
134
- num_inference_steps=steps,
135
- guidance_scale=cfg_scale,
136
- width=width,
137
- height=height,
138
- generator=generator,
139
- joint_attention_kwargs={"scale": lora_scale},
140
- output_type="pil",
141
- ).images[0]
142
- return final_image
143
-
144
- def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
145
- global selected_lora_index
146
- if selected_lora_index is None:
147
  raise gr.Error("You must select a LoRA before proceeding.")
148
 
149
- selected_lora = loras[selected_lora_index]
150
  lora_path = selected_lora["repo"]
151
  trigger_word = selected_lora["trigger_word"]
152
 
@@ -176,11 +133,7 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
176
  if randomize_seed:
177
  seed = random.randint(0, MAX_SEED)
178
 
179
- if image_input is not None:
180
- final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
181
- yield final_image, seed, gr.update(visible=False)
182
- else:
183
- image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
184
  # Consume the generator to get the final image
185
  final_image = None
186
  step_counter = 0
@@ -192,8 +145,6 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
192
 
193
  yield final_image, seed, gr.update(visible=False)
194
 
195
- # ...
196
-
197
  # Gradio interface
198
  with gr.Blocks() as demo:
199
  gr.Markdown("# Awaken Ones' Lora Previews")
@@ -245,9 +196,9 @@ with gr.Blocks() as demo:
245
  progress_bar = gr.Markdown(visible=False)
246
 
247
  # Event handlers
248
- gallery.select(update_selection, [width, height], [prompt, selected_lora, width, height, generate])
249
  randomize_seed.change(lambda x: gr.update(visible=not x), randomize_seed, seed_input)
250
- generate_event = generate.click(run_lora, inputs=[prompt, cfg_scale, steps, randomize_seed, seed_input, width, height, lora_scale], outputs=[result, seed_output, progress_bar])
251
  cancel.click(lambda: None, None, None, cancels=[generate_event])
252
 
253
  demo.queue().launch()
 
13
  import random
14
  import time
15
 
 
 
16
  # Load LoRAs from JSON file
17
  with open('loras.json', 'r') as f:
18
  loras = json.load(f)
 
57
  print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
58
 
59
  def update_selection(evt: gr.SelectData, width, height):
 
 
60
  selected_lora = loras[evt.index]
61
  new_placeholder = f"{selected_lora['trigger_word']} {prompt.value}"
62
  lora_repo = selected_lora["repo"]
 
74
  return (
75
  gr.update(value=new_placeholder),
76
  updated_text,
77
+ evt.index,
78
  width,
79
  height,
80
  gr.update(interactive=True) # Enable the Generate button
81
  )
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  @spaces.GPU(duration=70)
84
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
85
  pipe.to("cuda")
 
99
  ):
100
  yield img
101
 
102
+ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
103
+ if selected_index is None:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  raise gr.Error("You must select a LoRA before proceeding.")
105
 
106
+ selected_lora = loras[selected_index]
107
  lora_path = selected_lora["repo"]
108
  trigger_word = selected_lora["trigger_word"]
109
 
 
133
  if randomize_seed:
134
  seed = random.randint(0, MAX_SEED)
135
 
136
+ image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
 
 
 
 
137
  # Consume the generator to get the final image
138
  final_image = None
139
  step_counter = 0
 
145
 
146
  yield final_image, seed, gr.update(visible=False)
147
 
 
 
148
  # Gradio interface
149
  with gr.Blocks() as demo:
150
  gr.Markdown("# Awaken Ones' Lora Previews")
 
196
  progress_bar = gr.Markdown(visible=False)
197
 
198
  # Event handlers
199
+ gallery.select(update_selection, [width, height], [prompt, selected_lora, gr.State(), width, height, generate])
200
  randomize_seed.change(lambda x: gr.update(visible=not x), randomize_seed, seed_input)
201
+ generate_event = generate.click(run_lora, inputs=[prompt, cfg_scale, steps, gr.State(), randomize_seed, seed_input, width, height, lora_scale], outputs=[result, seed_output, progress_bar])
202
  cancel.click(lambda: None, None, None, cancels=[generate_event])
203
 
204
  demo.queue().launch()