Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,9 @@ import os
|
|
2 |
# Set environment variable before importing torch to avoid nested tensor issues
|
3 |
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
4 |
|
|
|
|
|
|
|
5 |
import time
|
6 |
import gradio as gr
|
7 |
import torch
|
@@ -22,9 +25,6 @@ from torch import Tensor, nn
|
|
22 |
from transformers import CLIPTextModel, CLIPTokenizer
|
23 |
from transformers import T5EncoderModel, T5Tokenizer
|
24 |
|
25 |
-
# Import spaces after other imports to minimize conflicts
|
26 |
-
import spaces
|
27 |
-
|
28 |
# ---------------- Encoders ----------------
|
29 |
|
30 |
class HFEmbedder(nn.Module):
|
@@ -62,7 +62,6 @@ class HFEmbedder(nn.Module):
|
|
62 |
return outputs[self.output_key]
|
63 |
|
64 |
# Initialize models without GPU decorator first
|
65 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
66 |
t5 = None
|
67 |
clip = None
|
68 |
ae = None
|
@@ -73,6 +72,7 @@ def initialize_models():
|
|
73 |
global t5, clip, ae, model, model_initialized
|
74 |
if not model_initialized:
|
75 |
print("Initializing models...")
|
|
|
76 |
t5 = HFEmbedder("DeepFloyd/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
|
77 |
clip = HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to(device)
|
78 |
ae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16).to(device)
|
|
|
2 |
# Set environment variable before importing torch to avoid nested tensor issues
|
3 |
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
4 |
|
5 |
+
# Import spaces FIRST before any torch imports
|
6 |
+
import spaces
|
7 |
+
|
8 |
import time
|
9 |
import gradio as gr
|
10 |
import torch
|
|
|
25 |
from transformers import CLIPTextModel, CLIPTokenizer
|
26 |
from transformers import T5EncoderModel, T5Tokenizer
|
27 |
|
|
|
|
|
|
|
28 |
# ---------------- Encoders ----------------
|
29 |
|
30 |
class HFEmbedder(nn.Module):
|
|
|
62 |
return outputs[self.output_key]
|
63 |
|
64 |
# Initialize models without GPU decorator first
|
|
|
65 |
t5 = None
|
66 |
clip = None
|
67 |
ae = None
|
|
|
72 |
global t5, clip, ae, model, model_initialized
|
73 |
if not model_initialized:
|
74 |
print("Initializing models...")
|
75 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
76 |
t5 = HFEmbedder("DeepFloyd/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
|
77 |
clip = HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to(device)
|
78 |
ae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16).to(device)
|