Keltezaa commited on
Commit
46e5485
·
verified ·
1 Parent(s): 01a105f

Upload quad lora app.py

Browse files
Files changed (1) hide show
  1. quad lora app.py +795 -0
quad lora app.py ADDED
@@ -0,0 +1,795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import json
4
+ import logging
5
+ import torch
6
+ from PIL import Image
7
+ import spaces
8
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL
9
+ from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
10
+ from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
11
+ from transformers import AutoModelForCausalLM, CLIPTokenizer, CLIPProcessor, CLIPModel, LongformerTokenizer, LongformerModel
12
+ import copy
13
+ import random
14
+ import time
15
+ import requests
16
+ import pandas as pd
17
+
18
+ # Disable tokenizer parallelism
19
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
20
+
21
+ # Initialize the CLIP tokenizer and model
22
+ clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch16")
23
+ clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
24
+ clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
25
+
26
+ # Initialize the Longformer tokenizer and model
27
+ longformer_tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096")
28
+ longformer_model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
29
+
30
+ # Load prompts for randomization
31
+ df = pd.read_csv('prompts.csv', header=None)
32
+ prompt_values = df.values.flatten()
33
+
34
+ # Load LoRAs from JSON file
35
+ with open('loras.json', 'r') as f:
36
+ loras = json.load(f)
37
+
38
+ # Initialize the base model
39
+ dtype = torch.bfloat16
40
+ device = "cuda" if torch.cuda.is_available() else "cpu"
41
+ base_model = "black-forest-labs/FLUX.1-dev"
42
+
43
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
44
+ good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
45
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
46
+
47
+ MAX_SEED = 2**32 - 1
48
+
49
+ pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
50
+
51
+ def process_input(input_text):
52
+ # Tokenize and truncate input
53
+ inputs = clip_processor(text=input_text, return_tensors="pt", padding=True, truncation=True, max_length=77)
54
+ return inputs
55
+
56
+ # Example usage
57
+ input_text = "Your long prompt goes here..."
58
+ inputs = process_input(input_text)
59
+
60
+ class calculateDuration:
61
+ def __init__(self, activity_name=""):
62
+ self.activity_name = activity_name
63
+
64
+ def __enter__(self):
65
+ self.start_time = time.time()
66
+ return self
67
+
68
+ def __exit__(self, exc_type, exc_value, traceback):
69
+ self.end_time = time.time()
70
+ self.elapsed_time = self.end_time - self.start_time
71
+ if self.activity_name:
72
+ print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
73
+ else:
74
+ print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
75
+
76
+ def download_file(url, directory=None):
77
+ if directory is None:
78
+ directory = os.getcwd() # Use current working directory if not specified
79
+
80
+ # Get the filename from the URL
81
+ filename = url.split('/')[-1]
82
+
83
+ # Full path for the downloaded file
84
+ filepath = os.path.join(directory, filename)
85
+
86
+ # Download the file
87
+ response = requests.get(url)
88
+ response.raise_for_status() # Raise an exception for bad status codes
89
+
90
+ # Write the content to the file
91
+ with open(filepath, 'wb') as file:
92
+ file.write(response.content)
93
+
94
+ return filepath
95
+
96
+ def get_trigger_word(base_model, lora_models):
97
+ # Simulate the retrieval of a trigger word based on the selected models
98
+ trigger_word = "<span style='color:green;'>default_trigger</span>" # Set default trigger word with green color
99
+ if "lora1" in lora_models:
100
+ trigger_word = f"<span style='color:green;'>{lora1.get('trigger_word', '')}</span>"
101
+ if "lora2" in lora_models:
102
+ trigger_word = f"<span style='color:green;'>{lora2.get('trigger_word', '')}</span>"
103
+ if "lora3" in lora_models:
104
+ trigger_word = f"<span style='color:green;'>{lora3.get('trigger_word', '')}</span>"
105
+ if "lora4" in lora_models:
106
+ trigger_word = f"<span style='color:green;'>{lora4.get('trigger_word', '')}</span>"
107
+ return trigger_word
108
+
109
+ def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
110
+ selected_index = evt.index
111
+ selected_indices = selected_indices or []
112
+
113
+ if selected_index in selected_indices:
114
+ selected_indices.remove(selected_index)
115
+ else:
116
+ if len(selected_indices) < 4:
117
+ selected_indices.append(selected_index)
118
+ else:
119
+ gr.Warning("You can select up to 4 LoRAs, remove one to select a new one.")
120
+ return gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update(), width, height, gr.update(), gr.update(), gr.update()
121
+
122
+ selected_info_1 = "Select a LoRA 1"
123
+ selected_info_2 = "Select a LoRA 2"
124
+ selected_info_3 = "Select a LoRA 3"
125
+ selected_info_4 = "Select a LoRA 4"
126
+ lora_scale_1 = 0.5
127
+ lora_scale_2 = 0.5
128
+ lora_scale_3 = 0.5
129
+ lora_scale_4 = 0.5
130
+ lora_image_1 = None
131
+ lora_image_2 = None
132
+ lora_image_3 = None
133
+ lora_image_4 = None
134
+ if len(selected_indices) >= 1:
135
+ lora1 = loras_state[selected_indices[0]]
136
+ trigger_word = lora1.get('trigger_word', '') # Get actual trigger word from LoRA 1
137
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨ {trigger_word}"
138
+ lora_image_1 = lora1['image']
139
+
140
+ if len(selected_indices) >= 2:
141
+ lora2 = loras_state[selected_indices[1]]
142
+ trigger_word = lora2.get('trigger_word', '') # Get actual trigger word from LoRA 2
143
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨ {trigger_word}"
144
+ lora_image_2 = lora2['image']
145
+
146
+ if len(selected_indices) >= 3:
147
+ lora3 = loras_state[selected_indices[2]]
148
+ trigger_word = lora3.get('trigger_word', '') # Get actual trigger word from LoRA 3
149
+ selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}](https://huggingface.co/{lora3['repo']}) ✨ {trigger_word}"
150
+ lora_image_3 = lora3['image']
151
+
152
+ if len(selected_indices) >= 4:
153
+ lora4 = loras_state[selected_indices[3]]
154
+ trigger_word = lora4.get('trigger_word', '') # Get actual trigger word from LoRA 4
155
+ selected_info_4 = f"### LoRA 4 Selected: [{lora4['title']}](https://huggingface.co/{lora4['repo']}) ✨ {trigger_word}"
156
+ lora_image_4 = lora4['image']
157
+
158
+ if selected_indices:
159
+ last_selected_lora = loras_state[selected_indices[-1]]
160
+ new_placeholder = f"Type a prompt for {last_selected_lora['title']}"
161
+ else:
162
+ new_placeholder = "Type a prompt after selecting a LoRA"
163
+
164
+ return (gr.update(placeholder=new_placeholder),
165
+ selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices,
166
+ lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4,
167
+ width, height,
168
+ lora_image_1, lora_image_2, lora_image_3, lora_image_4,
169
+ gr.update()
170
+ )
171
+
172
+ def randomize_loras(selected_indices, loras_state):
173
+ if len(loras_state) < 2:
174
+ raise gr.Error("Not enough LoRAs to randomize.")
175
+ selected_indices = random.sample(range(len(loras_state)), 2)
176
+ lora1 = loras_state[selected_indices[0]]
177
+ lora2 = loras_state[selected_indices[1]]
178
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨ {trigger_word}"
179
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨ {trigger_word}"
180
+ lora_scale_1 = 0.5
181
+ lora_scale_2 = 0.5
182
+ lora_image_1 = lora1['image']
183
+ lora_image_2 = lora2['image']
184
+ random_prompt = random.choice(prompt_values)
185
+ return selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, random_prompt
186
+
187
+ def remove_lora_1(selected_indices, loras_state):
188
+ if len(selected_indices) >= 1:
189
+ selected_indices.pop(0)
190
+ selected_info_1 = "Select a LoRA 1"
191
+ selected_info_2 = "Select a LoRA 2"
192
+ selected_info_3 = "Select a LoRA 3"
193
+ selected_info_4 = "Select a LoRA 4"
194
+ lora_scale_1 = 0.5
195
+ lora_scale_2 = 0.5
196
+ lora_scale_3 = 0.5
197
+ lora_scale_4 = 0.5
198
+ lora_image_1 = None
199
+ lora_image_2 = None
200
+ lora_image_3 = None
201
+ lora_image_4 = None
202
+ if len(selected_indices) >= 1:
203
+ lora1 = loras_state[selected_indices[0]]
204
+ trigger_word = lora1.get('trigger_word', '') # Get actual trigger word from LoRA 1
205
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨ {trigger_word}"
206
+ lora_image_1 = lora1['image']
207
+ if len(selected_indices) >= 2:
208
+ lora2 = loras_state[selected_indices[1]]
209
+ trigger_word = lora2.get('trigger_word', '') # Get actual trigger word from LoRA 2
210
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨ {trigger_word}"
211
+ lora_image_2 = lora2['image']
212
+ if len(selected_indices) >= 3:
213
+ lora3 = loras_state[selected_indices[2]]
214
+ trigger_word = lora3.get('trigger_word', '') # Get actual trigger word from LoRA 3
215
+ selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}]({lora3['repo']}) ✨ {trigger_word}"
216
+ lora_image_3 = lora3['image']
217
+ if len(selected_indices) >= 4:
218
+ lora4 = loras_state[selected_indices[3]]
219
+ trigger_word = lora4.get('trigger_word', '') # Get actual trigger word from LoRA 4
220
+ selected_info_4 = f"### LoRA 4 Selected: [{lora4['title']}]({lora4['repo']}) ✨ {trigger_word}"
221
+ lora_image_4 = lora4['image']
222
+ return selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, gr.update()
223
+
224
+ def remove_lora_2(selected_indices, loras_state):
225
+ if len(selected_indices) >= 2:
226
+ selected_indices.pop(1)
227
+ selected_info_1 = "Select a LoRA 1"
228
+ selected_info_2 = "Select a LoRA 2"
229
+ selected_info_3 = "Select a LoRA 3"
230
+ selected_info_4 = "Select a LoRA 4"
231
+ lora_scale_1 = 0.5
232
+ lora_scale_2 = 0.5
233
+ lora_scale_3 = 0.5
234
+ lora_scale_4 = 0.5
235
+ lora_image_1 = None
236
+ lora_image_2 = None
237
+ lora_image_3 = None
238
+ lora_image_4 = None
239
+ if len(selected_indices) >= 1:
240
+ lora1 = loras_state[selected_indices[0]]
241
+ trigger_word = lora1.get('trigger_word', '') # Get actual trigger word from LoRA 1
242
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨ {trigger_word}"
243
+ lora_image_1 = lora1['image']
244
+ if len(selected_indices) >= 2:
245
+ lora2 = loras_state[selected_indices[1]]
246
+ trigger_word = lora2.get('trigger_word', '') # Get actual trigger word from LoRA 2
247
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨ {trigger_word}"
248
+ lora_image_2 = lora2['image']
249
+ if len(selected_indices) >= 3:
250
+ lora3 = loras_state[selected_indices[2]]
251
+ trigger_word = lora3.get('trigger_word', '') # Get actual trigger word from LoRA 3
252
+ selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}]({lora3['repo']}) ✨ {trigger_word}"
253
+ lora_image_3 = lora3['image']
254
+ if len(selected_indices) >= 4:
255
+ lora4 = loras_state[selected_indices[3]]
256
+ trigger_word = lora4.get('trigger_word', '') # Get actual trigger word from LoRA 4
257
+ selected_info_4 = f"### LoRA 4 Selected: [{lora4['title']}]({lora4['repo']}) ✨ {trigger_word}"
258
+ lora_image_4 = lora4['image']
259
+ return selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, gr.update()
260
+
261
+ def remove_lora_3(selected_indices, loras_state):
262
+ if len(selected_indices) >= 3:
263
+ selected_indices.pop(2)
264
+ selected_info_1 = "Select a LoRA 1"
265
+ selected_info_2 = "Select a LoRA 2"
266
+ selected_info_3 = "Select a LoRA 3"
267
+ selected_info_4 = "Select a LoRA 4"
268
+ lora_scale_1 = 0.5
269
+ lora_scale_2 = 0.5
270
+ lora_scale_3 = 0.5
271
+ lora_scale_4 = 0.5
272
+ lora_image_1 = None
273
+ lora_image_2 = None
274
+ lora_image_3 = None
275
+ lora_image_4 = None
276
+ if len(selected_indices) >= 1:
277
+ lora1 = loras_state[selected_indices[0]]
278
+ trigger_word = lora1.get('trigger_word', '') # Get actual trigger word from LoRA 1
279
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨ {trigger_word}"
280
+ lora_image_1 = lora1['image']
281
+ if len(selected_indices) >= 2:
282
+ lora2 = loras_state[selected_indices[1]]
283
+ trigger_word = lora2.get('trigger_word', '') # Get actual trigger word from LoRA 2
284
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨ {trigger_word}"
285
+ lora_image_2 = lora2['image']
286
+ if len(selected_indices) >= 3:
287
+ lora3 = loras_state[selected_indices[2]]
288
+ trigger_word = lora3.get('trigger_word', '') # Get actual trigger word from LoRA 3
289
+ selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}]({lora3['repo']}) ✨ {trigger_word}"
290
+ lora_image_3 = lora3['image']
291
+ if len(selected_indices) >= 4:
292
+ lora4 = loras_state[selected_indices[3]]
293
+ trigger_word = lora4.get('trigger_word', '') # Get actual trigger word from LoRA 4
294
+ selected_info_4 = f"### LoRA 4 Selected: [{lora4['title']}]({lora4['repo']}) ✨ {trigger_word}"
295
+ lora_image_4 = lora4['image']
296
+ return selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, gr.update()
297
+
298
+ def remove_lora_4(selected_indices, loras_state):
299
+ if len(selected_indices) >= 4:
300
+ selected_indices.pop(3)
301
+ selected_info_1 = "Select a LoRA 1"
302
+ selected_info_2 = "Select a LoRA 2"
303
+ selected_info_3 = "Select a LoRA 3"
304
+ selected_info_4 = "Select a LoRA 4"
305
+ lora_scale_1 = 0.5
306
+ lora_scale_2 = 0.5
307
+ lora_scale_3 = 0.5
308
+ lora_scale_4 = 0.5
309
+ lora_image_1 = None
310
+ lora_image_2 = None
311
+ lora_image_3 = None
312
+ lora_image_4 = None
313
+ if len(selected_indices) >= 1:
314
+ lora1 = loras_state[selected_indices[0]]
315
+ trigger_word = lora1.get('trigger_word', '') # Get actual trigger word from LoRA 1
316
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨ {trigger_word}"
317
+ lora_image_1 = lora1['image']
318
+ if len(selected_indices) >= 2:
319
+ lora2 = loras_state[selected_indices[1]]
320
+ trigger_word = lora2.get('trigger_word', '') # Get actual trigger word from LoRA 2
321
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨ {trigger_word}"
322
+ lora_image_2 = lora2['image']
323
+ if len(selected_indices) >= 3:
324
+ lora3 = loras_state[selected_indices[2]]
325
+ trigger_word = lora3.get('trigger_word', '') # Get actual trigger word from LoRA 3
326
+ selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}]({lora3['repo']}) ✨ {trigger_word}"
327
+ lora_image_3 = lora3['image']
328
+ if len(selected_indices) >= 4:
329
+ lora4 = loras_state[selected_indices[3]]
330
+ trigger_word = lora4.get('trigger_word', '') # Get actual trigger word from LoRA 4
331
+ selected_info_4 = f"### LoRA 4 Selected: [{lora4['title']}]({lora4['repo']}) ✨ {trigger_word}"
332
+ lora_image_4 = lora4['image']
333
+ return selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, gr.update()
334
+
335
+ def add_custom_lora(custom_lora, selected_indices, current_loras, gallery):
336
+ if custom_lora:
337
+ try:
338
+ title, repo, path, trigger_word, image = check_custom_model(custom_lora)
339
+ print(f"Loaded custom LoRA: {repo}")
340
+ existing_item_index = next((index for (index, item) in enumerate(current_loras) if item['repo'] == repo), None)
341
+ if existing_item_index is None:
342
+ if repo.endswith(".safetensors") and repo.startswith("http"):
343
+ repo = download_file(repo)
344
+ new_item = {
345
+ "image": image if image else "/home/user/app/custom.png",
346
+ "title": title,
347
+ "repo": repo,
348
+ "weights": path,
349
+ "trigger_word": trigger_word
350
+ }
351
+ print(f"New LoRA: {new_item}")
352
+ existing_item_index = len(current_loras)
353
+ current_loras.append(new_item)
354
+
355
+ # Update gallery
356
+ gallery_items = [(item["image"], item["title"]) for item in current_loras]
357
+ # Update selected_indices if there's room
358
+ if len(selected_indices) < 4:
359
+ selected_indices.append(existing_item_index)
360
+ else:
361
+ gr.Warning("You can select up to 4 LoRAs, remove one to select a new one.")
362
+
363
+ # Update selected_info and images
364
+ selected_info_1 = "Select a LoRA 1"
365
+ selected_info_2 = "Select a LoRA 2"
366
+ selected_info_3 = "Select a LoRA 3"
367
+ selected_info_4 = "Select a LoRA 4"
368
+ lora_scale_1 = 0.5
369
+ lora_scale_2 = 0.5
370
+ lora_scale_3 = 0.5
371
+ lora_scale_4 = 0.5
372
+ lora_image_1 = None
373
+ lora_image_2 = None
374
+ lora_image_3 = None
375
+ lora_image_4 = None
376
+ if len(selected_indices) >= 1:
377
+ lora1 = loras_state[selected_indices[0]]
378
+ trigger_word = lora1.get('trigger_word', '') # Get actual trigger word from LoRA 1
379
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨ {trigger_word}"
380
+ lora_image_1 = lora1['image']
381
+ if len(selected_indices) >= 2:
382
+ lora2 = loras_state[selected_indices[1]]
383
+ trigger_word = lora2.get('trigger_word', '') # Get actual trigger word from LoRA 2
384
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨ {trigger_word}"
385
+ lora_image_2 = lora2['image']
386
+ if len(selected_indices) >= 3:
387
+ lora3 = loras_state[selected_indices[2]]
388
+ trigger_word = lora3.get('trigger_word', '') # Get actual trigger word from LoRA 3
389
+ selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}]({lora3['repo']}) ✨ {trigger_word}"
390
+ lora_image_3 = lora3['image']
391
+ if len(selected_indices) >= 4:
392
+ lora4 = loras_state[selected_indices[3]]
393
+ trigger_word = lora4.get('trigger_word', '') # Get actual trigger word from LoRA 4
394
+ selected_info_4 = f"### LoRA 4 Selected: [{lora4['title']}]({lora4['repo']}) ✨ {trigger_word}"
395
+ lora_image_4 = lora4['image']
396
+ print("Finished adding custom LoRA")
397
+ return current_loras, gr.update(value=gallery_items), selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, gr.update()
398
+
399
+ except Exception as e:
400
+ print(e)
401
+ gr.Warning(str(e))
402
+ return current_loras, gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update()
403
+ else:
404
+ return current_loras, gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update()
405
+
406
+ def remove_custom_lora(selected_indices, current_loras, gallery):
407
+ if current_loras:
408
+ custom_lora_repo = current_loras[-1]['repo']
409
+ # Remove from loras list
410
+ current_loras = current_loras[:-1]
411
+ # Remove from selected_indices if selected
412
+ custom_lora_index = len(current_loras)
413
+ if custom_lora_index in selected_indices:
414
+ selected_indices.remove(custom_lora_index)
415
+ # Update gallery
416
+ gallery_items = [(item["image"], item["title"]) for item in current_loras]
417
+ # Update selected_info and images
418
+ selected_info_1 = "Select a LoRA 1"
419
+ selected_info_2 = "Select a LoRA 2"
420
+ selected_info_3 = "Select a LoRA 3"
421
+ selected_info_4 = "Select a LoRA 4"
422
+ lora_scale_1 = 0.5
423
+ lora_scale_2 = 0.5
424
+ lora_scale_3 = 0.5
425
+ lora_scale_4 = 0.5
426
+ lora_image_1 = None
427
+ lora_image_2 = None
428
+ lora_image_3 = None
429
+ lora_image_4 = None
430
+ if len(selected_indices) >= 1:
431
+ lora1 = loras_state[selected_indices[0]]
432
+ trigger_word = lora1.get('trigger_word', '') # Get actual trigger word from LoRA 1
433
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨ {trigger_word}"
434
+ lora_image_1 = lora1['image']
435
+ if len(selected_indices) >= 2:
436
+ lora2 = loras_state[selected_indices[1]]
437
+ trigger_word = lora2.get('trigger_word', '') # Get actual trigger word from LoRA 2
438
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨ {trigger_word}"
439
+ lora_image_2 = lora2['image']
440
+ if len(selected_indices) >= 3:
441
+ lora3 = loras_state[selected_indices[2]]
442
+ trigger_word = lora3.get('trigger_word', '') # Get actual trigger word from LoRA 3
443
+ selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}]({lora3['repo']}) ✨ {trigger_word}"
444
+ lora_image_3 = lora3['image']
445
+ if len(selected_indices) >= 4:
446
+ lora4 = loras_state[selected_indices[3]]
447
+ trigger_word = lora4.get('trigger_word', '') # Get actual trigger word from LoRA 4
448
+ selected_info_4 = f"### LoRA 4 Selected: [{lora4['title']}]({lora4['repo']}) ✨ {trigger_word}"
449
+ lora_image_4 = lora4['image']
450
+ print("Finished adding custom LoRA")
451
+ return (current_loras, gr.update(value=gallery_items), selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, gr.update()
452
+ )
453
+
454
+ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
455
+ print("Generating image...")
456
+ pipe.to("cuda")
457
+ generator = torch.Generator(device="cuda").manual_seed(seed)
458
+ with calculateDuration("Generating image"):
459
+ # Generate image
460
+ for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
461
+ prompt=prompt_mash,
462
+ num_inference_steps=steps,
463
+ guidance_scale=cfg_scale,
464
+ width=width,
465
+ height=height,
466
+ generator=generator,
467
+ joint_attention_kwargs={"scale": 1.0},
468
+ output_type="pil",
469
+ good_vae=good_vae,
470
+ ):
471
+ yield img
472
+
473
+ @spaces.GPU(duration=75)
474
+ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
475
+ if not selected_indices:
476
+ raise gr.Error("You must select at least one LoRA before proceeding.")
477
+
478
+ selected_loras = [loras_state[idx] for idx in selected_indices]
479
+
480
+ # Build the prompt with trigger words
481
+ prepends = []
482
+ appends = []
483
+ for lora in selected_loras:
484
+ trigger_word = lora.get('trigger_word', '')
485
+ if trigger_word:
486
+ if lora.get("trigger_position") == "prepend":
487
+ prepends.append(trigger_word)
488
+ else:
489
+ appends.append(trigger_word)
490
+ prompt_mash = " ".join(prepends + [prompt] + appends)
491
+ print("Prompt Mash: ", prompt_mash)
492
+ # Unload previous LoRA weights
493
+ with calculateDuration("Unloading LoRA"):
494
+ pipe.unload_lora_weights()
495
+ pipe_i2i.unload_lora_weights()
496
+
497
+ print(pipe.get_active_adapters())
498
+ # Load LoRA weights with respective scales
499
+ lora_names = []
500
+ lora_weights = []
501
+ with calculateDuration("Loading LoRA weights"):
502
+ for idx, lora in enumerate(selected_loras):
503
+ lora_name = f"lora_{idx}"
504
+ lora_names.append(lora_name)
505
+ print(f"Lora Name: {lora_name}")
506
+ lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2)
507
+ lora_path = lora['repo']
508
+ weight_name = lora.get("weights")
509
+ print(f"Lora Path: {lora_path}")
510
+ pipe_to_use = pipe_i2i if image_input is not None else pipe
511
+ pipe_to_use.load_lora_weights(
512
+ lora_path,
513
+ weight_name=weight_name if weight_name else None,
514
+ low_cpu_mem_usage=True,
515
+ adapter_name=lora_name
516
+ )
517
+ # if image_input is not None: pipe_i2i = pipe_to_use
518
+ # else: pipe = pipe_to_use
519
+ print("Loaded LoRAs:", lora_names)
520
+ print("Adapter weights:", lora_weights)
521
+ if image_input is not None:
522
+ pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
523
+ else:
524
+ pipe.set_adapters(lora_names, adapter_weights=lora_weights)
525
+ print(pipe.get_active_adapters())
526
+ # Set random seed for reproducibility
527
+ with calculateDuration("Randomizing seed"):
528
+ if randomize_seed:
529
+ seed = random.randint(0, MAX_SEED)
530
+ print("Image Seed:", seed)
531
+
532
+ # Generate image
533
+ if image_input is not None:
534
+ #final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
535
+ yield final_image, seed, gr.update(visible=False)
536
+ else:
537
+ image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
538
+ # Consume the generator to get the final image
539
+ final_image = None
540
+ step_counter = 0
541
+ for image in image_generator:
542
+ step_counter += 1
543
+ final_image = image
544
+ progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
545
+ yield image, seed, gr.update(value=progress_bar, visible=True)
546
+ yield final_image, seed, gr.update(value=progress_bar, visible=False)
547
+
548
+ run_lora.zerogpu = True
549
+
550
+ def get_huggingface_safetensors(link):
551
+ split_link = link.split("/")
552
+ if len(split_link) == 2:
553
+ model_card = ModelCard.load(link)
554
+ base_model = model_card.data.get("base_model")
555
+ print(f"Base model: {base_model}")
556
+ if base_model not in ["black-forest-labs/FLUX.1-dev", "black-forest-labs/FLUX.1-schnell"]:
557
+ raise Exception("Not a FLUX LoRA!")
558
+ image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
559
+ trigger_word = model_card.data.get("instance_prompt", "")
560
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
561
+ fs = HfFileSystem()
562
+ safetensors_name = None
563
+ try:
564
+ list_of_files = fs.ls(link, detail=False)
565
+ for file in list_of_files:
566
+ if file.endswith(".safetensors"):
567
+ safetensors_name = file.split("/")[-1]
568
+ if not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp")):
569
+ image_elements = file.split("/")
570
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
571
+ except Exception as e:
572
+ print(e)
573
+ raise gr.Error("Invalid Hugging Face repository with a *.safetensors LoRA")
574
+ if not safetensors_name:
575
+ raise gr.Error("No *.safetensors file found in the repository")
576
+ return split_link[1], link, safetensors_name, trigger_word, image_url
577
+ else:
578
+ raise gr.Error("Invalid Hugging Face repository link")
579
+
580
+ def check_custom_model(link):
581
+ if link.endswith(".safetensors"):
582
+ # Treat as direct link to the LoRA weights
583
+ title = os.path.basename(link)
584
+ repo = link
585
+ path = None # No specific weight name
586
+ trigger_word = ""
587
+ image_url = None
588
+ return title, repo, path, trigger_word, image_url
589
+ elif link.startswith("https://"):
590
+ if "huggingface.co" in link:
591
+ link_split = link.split("huggingface.co/")
592
+ return get_huggingface_safetensors(link_split[1])
593
+ else:
594
+ raise Exception("Unsupported URL")
595
+ else:
596
+ # Assume it's a Hugging Face model path
597
+ return get_huggingface_safetensors(link)
598
+
599
+ #def update_history(new_image, history):
600
+ # """Updates the history gallery with the new image."""
601
+ # if history is None:
602
+ # history = []
603
+ # history.insert(0, new_image)
604
+ # return history
605
+
606
+ css = '''
607
+ #gen_btn{height: 100%}
608
+ #title{text-align: center}
609
+ #title h1{font-size: 3em; display:inline-flex; align-items:center}
610
+ #title img{width: 100px; margin-right: 0.25em}
611
+ #gallery .grid-wrap{height: 5vh}
612
+ #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
613
+ .custom_lora_card{margin-bottom: 1em}
614
+ .card_internal{display: flex;height: 100px;margin-top: .5em}
615
+ .card_internal img{margin-right: 1em}
616
+ .styler{--form-gap-width: 0px !important}
617
+ #progress{height:30px}
618
+ #progress .generating{display:none}
619
+ .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
620
+ .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
621
+ #component-8, .button_total{height: 100%; align-self: stretch;}
622
+ #loaded_loras [data-testid="block-info"]{font-size:80%}
623
+ #custom_lora_structure{background: var(--block-background-fill)}
624
+ #custom_lora_btn{margin-top: auto;margin-bottom: 11px}
625
+ #random_btn{font-size: 300%}
626
+ #component-11{align-self: stretch;}
627
+ #trigger_word{font-size: 1.5em; text-align: center; margin-top: 20px;}
628
+ '''
629
+
630
+ with gr.Blocks(css=css, delete_cache=(240, 240)) as app:
631
+ title = gr.HTML(
632
+ """<h1><img src="https://huggingface.co/spaces/Keltezaa/Celebrity-flux-lora/resolve/main/solo-traveller_16875043.png" alt=" "> Celebrity-flux-lora</h1><br><span style="
633
+ margin-top: -25px !important;
634
+ display: block;
635
+ margin-left: 37px;
636
+ ">Mix and match any FLUX[dev] LoRAs</span>""",
637
+ elem_id="title",
638
+ )
639
+ loras_state = gr.State(loras)
640
+ selected_indices = gr.State([])
641
+ trigger_word_display = gr.Markdown("", elem_id="trigger_word")
642
+
643
+ with gr.Row():
644
+ with gr.Column(scale=3):
645
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
646
+
647
+ with gr.Row(elem_id="loaded_loras"):
648
+
649
+ with gr.Column(scale=8):
650
+ with gr.Row():
651
+ with gr.Column(scale=0, min_width=50):
652
+ lora_image_1 = gr.Image(label="LoRA 1 Image", interactive=False, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
653
+ with gr.Column(scale=3, min_width=100):
654
+ selected_info_1 = gr.Markdown("Select a LoRA 1")
655
+ with gr.Column(scale=5, min_width=50):
656
+ lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.05, value=0.5)
657
+ with gr.Row():
658
+ remove_button_1 = gr.Button("Remove", size="sm")
659
+
660
+ with gr.Column(scale=8):
661
+ with gr.Row():
662
+ with gr.Column(scale=0, min_width=50):
663
+ lora_image_2 = gr.Image(label="LoRA 2 Image", interactive=False, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
664
+ with gr.Column(scale=3, min_width=100):
665
+ selected_info_2 = gr.Markdown("Select a LoRA 2")
666
+ with gr.Column(scale=5, min_width=50):
667
+ lora_scale_2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.05, value=0.5)
668
+ with gr.Row():
669
+ remove_button_2 = gr.Button("Remove", size="sm")
670
+
671
+ with gr.Column(scale=1,min_width=50):
672
+ randomize_button = gr.Button("🎲", variant="secondary", scale=1, elem_id="random_btn")
673
+
674
+ with gr.Row(elem_id="loaded_loras"):
675
+ with gr.Column(scale=8):
676
+ with gr.Row():
677
+ with gr.Column(scale=0, min_width=50):
678
+ lora_image_3 = gr.Image(label="LoRA 3 Image", interactive=False, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
679
+ with gr.Column(scale=3, min_width=100):
680
+ selected_info_3 = gr.Markdown("Select a LoRA 3")
681
+ with gr.Column(scale=5, min_width=50):
682
+ lora_scale_3 = gr.Slider(label="LoRA 3 Scale", minimum=0, maximum=3, step=0.05, value=0.5)
683
+ with gr.Row():
684
+ remove_button_3 = gr.Button("Remove", size="sm")
685
+ with gr.Column(scale=8):
686
+ with gr.Row():
687
+ with gr.Column(scale=0, min_width=50):
688
+ lora_image_4 = gr.Image(label="LoRA 4 Image", interactive=False, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
689
+ with gr.Column(scale=3, min_width=100):
690
+ selected_info_4 = gr.Markdown("Select a LoRA 4")
691
+ with gr.Column(scale=5, min_width=150):
692
+ lora_scale_4 = gr.Slider(label="LoRA 4 Scale", minimum=0, maximum=3, step=0.05, value=0.5)
693
+ with gr.Row():
694
+ remove_button_4 = gr.Button("Remove", size="sm")
695
+
696
+ with gr.Row():
697
+ with gr.Accordion("Advanced Settings", open=False):
698
+ #with gr.Row():
699
+ # input_image = gr.Image(label="Input image", type="filepath", show_share_button=False)
700
+ # image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
701
+ with gr.Column():
702
+ with gr.Row():
703
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=7.5)
704
+ steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
705
+
706
+ with gr.Row():
707
+ width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
708
+ height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
709
+
710
+ with gr.Row():
711
+ randomize_seed = gr.Checkbox(True, label="Randomize seed")
712
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
713
+
714
+ with gr.Row():
715
+ with gr.Column(scale=3):
716
+ generate_button = gr.Button("Generate", variant="primary", elem_classes=["button_total"])
717
+
718
+ with gr.Row():
719
+ with gr.Column():
720
+ with gr.Group():
721
+ with gr.Row(elem_id="custom_lora_structure"):
722
+ custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path or *.safetensors public URL", placeholder="multimodalart/vintage-ads-flux", scale=3, min_width=150)
723
+ add_custom_lora_button = gr.Button("Add Custom LoRA", elem_id="custom_lora_btn", scale=2, min_width=150)
724
+ remove_custom_lora_button = gr.Button("Remove Custom LoRA", visible=False)
725
+ gr.Markdown("[Check the list of FLUX LoRAs](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
726
+ gallery = gr.Gallery(
727
+ [(item["image"], item["title"]) for item in loras],
728
+ label="Or pick from the gallery",
729
+ allow_preview=False,
730
+ columns=5,
731
+ elem_id="gallery",
732
+ show_share_button=False,
733
+ interactive=False
734
+ )
735
+ with gr.Column():
736
+ progress_bar = gr.Markdown(elem_id="progress", visible=False)
737
+ result = gr.Image(label="Generated Image", interactive=False, show_share_button=False)
738
+ #with gr.Accordion("History", open=False):
739
+ # history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
740
+
741
+ gallery.select(
742
+ update_selection,
743
+ inputs=[selected_indices, loras_state, width, height],
744
+ outputs=[prompt, selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, width, height, lora_image_1, lora_image_2, lora_image_3, lora_image_4, trigger_word_display]
745
+ )
746
+
747
+ remove_button_1.click(
748
+ remove_lora_1,
749
+ inputs=[selected_indices, loras_state],
750
+ outputs=[selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, trigger_word_display]
751
+ )
752
+
753
+ remove_button_2.click(
754
+ remove_lora_2,
755
+ inputs=[selected_indices, loras_state],
756
+ outputs=[selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, trigger_word_display]
757
+ )
758
+
759
+ remove_button_3.click(
760
+ remove_lora_3,
761
+ inputs=[selected_indices, loras_state],
762
+ outputs=[selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, trigger_word_display]
763
+ )
764
+
765
+ remove_button_4.click(
766
+ remove_lora_4,
767
+ inputs=[selected_indices, loras_state],
768
+ outputs=[selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, trigger_word_display]
769
+ )
770
+
771
+ add_custom_lora_button.click(
772
+ add_custom_lora,
773
+ inputs=[custom_lora, selected_indices, loras_state, gallery],
774
+ outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, trigger_word_display]
775
+ )
776
+
777
+ remove_custom_lora_button.click(
778
+ remove_custom_lora,
779
+ inputs=[selected_indices, loras_state, gallery],
780
+ outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_image_1, lora_image_2, lora_image_3, lora_image_4, trigger_word_display]
781
+ )
782
+
783
+ gr.on(
784
+ triggers=[generate_button.click, prompt.submit],
785
+ fn=run_lora,
786
+ inputs=[prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, randomize_seed, seed, width, height, loras_state],
787
+ outputs=[result, seed, progress_bar]
788
+ )#.then(
789
+ # fn=lambda x, history: update_history(x, history),
790
+ # inputs=[result, history_gallery],
791
+ # outputs=history_gallery,
792
+ #)
793
+
794
+ app.queue()
795
+ app.launch()