Update src/pipeline.py
Browse files- src/pipeline.py +4 -13
src/pipeline.py
CHANGED
@@ -41,9 +41,6 @@ from diffusers.utils.import_utils import is_torch_npu_available
|
|
41 |
from diffusers.utils.torch_utils import maybe_allow_in_graph
|
42 |
from diffusers.models.embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed
|
43 |
from diffusers.models.modeling_outputs import Transformer2DModelOutput
|
44 |
-
# from diffusers import FluxPipeline, FluxTransformer2DModel, GGUFQuantizationConfig
|
45 |
-
from diffusers.loaders.single_file_utils import create_diffusers_t5_model_from_checkpoint
|
46 |
-
from diffusers.loaders.single_file_model import FromOriginalModelMixin
|
47 |
|
48 |
class BasicQuantization:
|
49 |
def __init__(self, bits=1):
|
@@ -595,16 +592,10 @@ def load_pipeline() -> Pipeline:
|
|
595 |
|
596 |
dtype, device = torch.bfloat16, "cuda"
|
597 |
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
t5_path = os.path.join(HF_HUB_CACHE, "models--manbeast3b--t5-v1_1-xxl-encoder-q8/snapshots/59c6c9cb99dcea42067f32caac3ea0836ef4c548/t5-v1_1-xxl-encoder-Q8_0.gguf")
|
603 |
-
# config_path = os.path.join(HF_HUB_CACHE, "models--black-forest--labs/FLUX.1-schnell/snapshots/741f7c3ce8b383c54771c7003378a50191e9efe9/text_encoder_2/config.json")
|
604 |
-
config_path = os.path.join(HF_HUB_CACHE, "models--black-forest-labs--FLUX.1-schnell/snapshots/741f7c3ce8b383c54771c7003378a50191e9efe9/")
|
605 |
-
import pdb; pdb.set_trace()
|
606 |
-
ckpt_t5 = load_single_file_checkpoint(t5_path,local_files_only=True)
|
607 |
-
import pdb; pdb.set_trace()
|
608 |
|
609 |
|
610 |
vae = AutoencoderTiny.from_pretrained("silentdriver/7815792fb4", revision="bdb7d88ebe5a1c6b02a3c0c78651dd57a403fdf5", torch_dtype=dtype)
|
|
|
41 |
from diffusers.utils.torch_utils import maybe_allow_in_graph
|
42 |
from diffusers.models.embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed
|
43 |
from diffusers.models.modeling_outputs import Transformer2DModelOutput
|
|
|
|
|
|
|
44 |
|
45 |
class BasicQuantization:
|
46 |
def __init__(self, bits=1):
|
|
|
592 |
|
593 |
dtype, device = torch.bfloat16, "cuda"
|
594 |
|
595 |
+
text_encoder_2 = T5EncoderModel.from_pretrained(
|
596 |
+
"silentdriver/aadb864af9", revision = "060dabc7fa271c26dfa3fd43c16e7c5bf3ac7892", torch_dtype=torch.bfloat16
|
597 |
+
).to(memory_format=torch.channels_last)
|
598 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
599 |
|
600 |
|
601 |
vae = AutoencoderTiny.from_pretrained("silentdriver/7815792fb4", revision="bdb7d88ebe5a1c6b02a3c0c78651dd57a403fdf5", torch_dtype=dtype)
|