Update app.py
Browse files
app.py
CHANGED
@@ -58,8 +58,8 @@ MAX_SEED = 2**32 - 1
|
|
58 |
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
59 |
|
60 |
def process_input(input_text):
|
61 |
-
# Tokenize and truncate input
|
62 |
-
#inputs = clip_processor(text=input_text, return_tensors="pt", padding=True, truncation=True, max_length=
|
63 |
#return inputs
|
64 |
#Change clip_processor to longformer
|
65 |
inputs = longformer_tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=4096)
|
@@ -399,7 +399,7 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
399 |
seed = random.randint(0, MAX_SEED)
|
400 |
|
401 |
# Generate image
|
402 |
-
if image_input is not None:
|
403 |
final_image = generate_image_to_image(prompt_mash, steps, cfg_scale, width, height, seed)
|
404 |
yield final_image, seed, gr.update(visible=False)
|
405 |
else:
|
|
|
58 |
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
59 |
|
60 |
def process_input(input_text):
|
61 |
+
# Tokenize and truncate input
|
62 |
+
#inputs = clip_processor(text=input_text, return_tensors="pt", padding=True, truncation=True, max_length=77)
|
63 |
#return inputs
|
64 |
#Change clip_processor to longformer
|
65 |
inputs = longformer_tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=4096)
|
|
|
399 |
seed = random.randint(0, MAX_SEED)
|
400 |
|
401 |
# Generate image
|
402 |
+
#if image_input is not None:
|
403 |
final_image = generate_image_to_image(prompt_mash, steps, cfg_scale, width, height, seed)
|
404 |
yield final_image, seed, gr.update(visible=False)
|
405 |
else:
|