Spaces:
Running
on
Zero
Running
on
Zero
AlekseyCalvin
commited on
Commit
•
0153bb5
1
Parent(s):
b203bc9
Update app.py
Browse files
app.py
CHANGED
@@ -33,12 +33,12 @@ if clipmodel == "long":
|
|
33 |
|
34 |
torch.backends.cuda.matmul.allow_tf32 = True
|
35 |
|
36 |
-
clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config).to(device)
|
37 |
-
clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, return_tensors="pt", truncation=True)
|
38 |
config.text_config.max_position_embeddings = 248
|
39 |
|
40 |
|
41 |
-
pipe = FluxPipeline.from_pretrained("AlekseyCalvin/HistoricColorSoonr_v2_FluxSchnell_Diffusers", torch_dtype=torch.bfloat16)
|
42 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
43 |
|
44 |
pipe.tokenizer = clip_processor.tokenizer
|
@@ -91,6 +91,14 @@ def update_selection(evt: gr.SelectData, width, height):
|
|
91 |
)
|
92 |
|
93 |
@spaces.GPU(duration=70)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
95 |
pipe.to("cuda")
|
96 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
|
|
33 |
|
34 |
torch.backends.cuda.matmul.allow_tf32 = True
|
35 |
|
36 |
+
clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to(device)
|
37 |
+
clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
|
38 |
config.text_config.max_position_embeddings = 248
|
39 |
|
40 |
|
41 |
+
pipe = FluxPipeline.from_pretrained("AlekseyCalvin/HistoricColorSoonr_v2_FluxSchnell_Diffusers", ignore_mismatched_sizes=True, torch_dtype=torch.bfloat16)
|
42 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
43 |
|
44 |
pipe.tokenizer = clip_processor.tokenizer
|
|
|
91 |
)
|
92 |
|
93 |
@spaces.GPU(duration=70)
|
94 |
+
pipe.vae.enable_slicing()
|
95 |
+
pipe.vae.enable_tiling()
|
96 |
+
|
97 |
+
# Just to look at the tokens / confirm settings:
|
98 |
+
tokens = clip_processor(
|
99 |
+
[prompt], padding="max_length", max_length=maxtokens, return_tensors="pt", truncation=True
|
100 |
+
)
|
101 |
+
|
102 |
def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
103 |
pipe.to("cuda")
|
104 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|