Spaces:
Running
on
Zero
Running
on
Zero
updates
Browse files- app.py +15 -7
- flux_app/backend.py +13 -5
app.py
CHANGED
@@ -1,9 +1,17 @@
|
|
1 |
-
#
|
2 |
-
|
|
|
3 |
from flux_app.backend import ModelManager
|
|
|
4 |
if __name__ == "__main__":
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#app.py
|
2 |
+
import os
|
3 |
+
from flux_app.frontend import Frontend
|
4 |
from flux_app.backend import ModelManager
|
5 |
+
|
6 |
if __name__ == "__main__":
|
7 |
+
# Get the Hugging Face token from an environment variable
|
8 |
+
hf_token = os.environ.get("HF_TOKEN")
|
9 |
+
|
10 |
+
if not hf_token:
|
11 |
+
raise ValueError("Hugging Face token (HF_TOKEN) not found in environment variables. Please set it.")
|
12 |
+
model_manager = ModelManager(hf_token=hf_token)
|
13 |
+
|
14 |
+
frontend = Frontend(model_manager)
|
15 |
+
app = frontend.create_ui()
|
16 |
+
app.queue()
|
17 |
+
app.launch()
|
flux_app/backend.py
CHANGED
@@ -10,21 +10,28 @@ from flux_app.config import DTYPE, DEVICE, BASE_MODEL, TAEF1_MODEL, MAX_SEED #
|
|
10 |
from flux_app.utilities import calculate_shift, retrieve_timesteps, load_image_from_path, calculateDuration # Absolute import
|
11 |
from flux_app.lora_handling import flux_pipe_call_that_returns_an_iterable_of_images # Absolute import
|
12 |
import time
|
|
|
13 |
|
14 |
class ModelManager:
|
15 |
-
def __init__(self):
|
16 |
self.pipe = None
|
17 |
self.pipe_i2i = None
|
18 |
self.good_vae = None
|
19 |
self.taef1 = None
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
self.initialize_models()
|
21 |
|
22 |
|
23 |
def initialize_models(self):
|
24 |
"""Initializes the diffusion pipelines and autoencoders."""
|
25 |
-
self.taef1 = AutoencoderTiny.from_pretrained(TAEF1_MODEL, torch_dtype=DTYPE).to(DEVICE)
|
26 |
-
self.good_vae = AutoencoderKL.from_pretrained(BASE_MODEL, subfolder="vae", torch_dtype=DTYPE).to(DEVICE)
|
27 |
-
self.pipe = DiffusionPipeline.from_pretrained(BASE_MODEL, torch_dtype=DTYPE, vae=self.taef1).to(DEVICE)
|
28 |
self.pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
29 |
BASE_MODEL,
|
30 |
vae=self.good_vae,
|
@@ -34,6 +41,7 @@ class ModelManager:
|
|
34 |
text_encoder_2=self.pipe.text_encoder_2,
|
35 |
tokenizer_2=self.pipe.tokenizer_2,
|
36 |
torch_dtype=DTYPE,
|
|
|
37 |
)
|
38 |
self.pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(self.pipe)
|
39 |
|
@@ -75,4 +83,4 @@ class ModelManager:
|
|
75 |
joint_attention_kwargs={"scale": lora_scale},
|
76 |
output_type="pil",
|
77 |
).images[0]
|
78 |
-
return final_image
|
|
|
10 |
from flux_app.utilities import calculate_shift, retrieve_timesteps, load_image_from_path, calculateDuration # Absolute import
|
11 |
from flux_app.lora_handling import flux_pipe_call_that_returns_an_iterable_of_images # Absolute import
|
12 |
import time
|
13 |
+
from huggingface_hub import login
|
14 |
|
15 |
class ModelManager:
|
16 |
+
def __init__(self, hf_token=None):
|
17 |
self.pipe = None
|
18 |
self.pipe_i2i = None
|
19 |
self.good_vae = None
|
20 |
self.taef1 = None
|
21 |
+
|
22 |
+
if hf_token:
|
23 |
+
login(token=hf_token) # Log in with the provided token
|
24 |
+
#else: # Optional: You could add a fallback to interactive login
|
25 |
+
# login()
|
26 |
+
|
27 |
self.initialize_models()
|
28 |
|
29 |
|
30 |
def initialize_models(self):
|
31 |
"""Initializes the diffusion pipelines and autoencoders."""
|
32 |
+
self.taef1 = AutoencoderTiny.from_pretrained(TAEF1_MODEL, torch_dtype=DTYPE, token=True).to(DEVICE)
|
33 |
+
self.good_vae = AutoencoderKL.from_pretrained(BASE_MODEL, subfolder="vae", torch_dtype=DTYPE, token=True).to(DEVICE)
|
34 |
+
self.pipe = DiffusionPipeline.from_pretrained(BASE_MODEL, torch_dtype=DTYPE, vae=self.taef1, token=True).to(DEVICE)
|
35 |
self.pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
36 |
BASE_MODEL,
|
37 |
vae=self.good_vae,
|
|
|
41 |
text_encoder_2=self.pipe.text_encoder_2,
|
42 |
tokenizer_2=self.pipe.tokenizer_2,
|
43 |
torch_dtype=DTYPE,
|
44 |
+
token=True
|
45 |
)
|
46 |
self.pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(self.pipe)
|
47 |
|
|
|
83 |
joint_attention_kwargs={"scale": lora_scale},
|
84 |
output_type="pil",
|
85 |
).images[0]
|
86 |
+
return final_image
|