manbeast3b commited on
Commit
704a7c9
·
0 Parent(s):

Initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ RobertML.png filter=lfs diff=lfs merge=lfs -text
37
+ backup.png filter=lfs diff=lfs merge=lfs -text
loss_params.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0ee6fa5873dbc8df9daeeb105e220266bcf6634c6806b69da38fdc0a5c12b81
3
+ size 3184
pyproject.toml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission by RobertML for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "gitpython>=3.1.43",
20
+ "hf_transfer==0.1.8",
21
+ "torchao==0.6.1",
22
+ "setuptools"
23
+ ]
24
+
25
+ [[tool.edge-maxxing.models]]
26
+ repository = "silentdriver/4b68f38c0b"
27
+ revision = "36a3cf4a9f733fc5f31257099b56b304fb2eceab"
28
+ exclude = ["transformer"]
29
+
30
+ [[tool.edge-maxxing.models]]
31
+ repository = "silentdriver/7d92df966a"
32
+ revision = "add1b8d9a84c728c1209448c4a695759240bad3c"
33
+
34
+ [[tool.edge-maxxing.models]]
35
+ repository = "silentdriver/aadb864af9"
36
+ revision = "060dabc7fa271c26dfa3fd43c16e7c5bf3ac7892"
37
+
38
+ [[tool.edge-maxxing.models]]
39
+ repository = "silentdriver/7815792fb4"
40
+ revision = "bdb7d88ebe5a1c6b02a3c0c78651dd57a403fdf5"
41
+
42
+
43
+ [project.scripts]
44
+ start_inference = "main:main"
45
+
src/__pycache__/main.cpython-310.pyc ADDED
Binary file (2.19 kB). View file
 
src/__pycache__/pipeline.cpython-310.pyc ADDED
Binary file (2.8 kB). View file
 
src/flux_schnell_edge_inference.egg-info/PKG-INFO ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: flux-schnell-edge-inference
3
+ Version: 7
4
+ Summary: An edge-maxxing model submission for the 4090 Flux contest
5
+ Requires-Python: <3.13,>=3.10
6
+ Requires-Dist: diffusers==0.31.0
7
+ Requires-Dist: transformers==4.46.2
8
+ Requires-Dist: accelerate==1.1.0
9
+ Requires-Dist: omegaconf==2.3.0
10
+ Requires-Dist: torch==2.5.1
11
+ Requires-Dist: protobuf==5.28.3
12
+ Requires-Dist: sentencepiece==0.2.0
13
+ Requires-Dist: edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines
14
+ Requires-Dist: gitpython>=3.1.43
15
+ Requires-Dist: torchao>=0.6.1
src/flux_schnell_edge_inference.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ README.md
2
+ pyproject.toml
3
+ src/main.py
4
+ src/pipeline.py
5
+ src/flux_schnell_edge_inference.egg-info/PKG-INFO
6
+ src/flux_schnell_edge_inference.egg-info/SOURCES.txt
7
+ src/flux_schnell_edge_inference.egg-info/dependency_links.txt
8
+ src/flux_schnell_edge_inference.egg-info/entry_points.txt
9
+ src/flux_schnell_edge_inference.egg-info/requires.txt
10
+ src/flux_schnell_edge_inference.egg-info/top_level.txt
src/flux_schnell_edge_inference.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
src/flux_schnell_edge_inference.egg-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ start_inference = main:main
src/flux_schnell_edge_inference.egg-info/requires.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers==0.31.0
2
+ transformers==4.46.2
3
+ accelerate==1.1.0
4
+ omegaconf==2.3.0
5
+ torch==2.5.1
6
+ protobuf==5.28.3
7
+ sentencepiece==0.2.0
8
+ edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines
9
+ gitpython>=3.1.43
10
+ torchao>=0.6.1
src/flux_schnell_edge_inference.egg-info/top_level.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ main
2
+ pipeline
src/ghanta.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Tuple, Callable
3
+ def hacer_nada(x: torch.Tensor, modo: str = None):
4
+ return x
5
+ def brujeria_mps(entrada, dim, indice):
6
+ if entrada.shape[-1] == 1:
7
+ return torch.gather(entrada.unsqueeze(-1), dim - 1 if dim < 0 else dim, indice.unsqueeze(-1)).squeeze(-1)
8
+ else:
9
+ return torch.gather(entrada, dim, indice)
10
+ def emparejamiento_suave_aleatorio_2d(
11
+ metrica: torch.Tensor,
12
+ ancho: int,
13
+ alto: int,
14
+ paso_x: int,
15
+ paso_y: int,
16
+ radio: int,
17
+ sin_aleatoriedad: bool = False,
18
+ generador: torch.Generator = None
19
+ ) -> Tuple[Callable, Callable]:
20
+ lote, num_nodos, _ = metrica.shape
21
+ if radio <= 0:
22
+ return hacer_nada, hacer_nada
23
+ recopilar = brujeria_mps if metrica.device.type == "mps" else torch.gather
24
+ with torch.no_grad():
25
+ alto_paso_y, ancho_paso_x = alto // paso_y, ancho // paso_x
26
+ if sin_aleatoriedad:
27
+ indice_aleatorio = torch.zeros(alto_paso_y, ancho_paso_x, 1, device=metrica.device, dtype=torch.int64)
28
+ else:
29
+ indice_aleatorio = torch.randint(paso_y * paso_x, size=(alto_paso_y, ancho_paso_x, 1), device=generador.device, generator=generador).to(metrica.device)
30
+ vista_buffer_indice = torch.zeros(alto_paso_y, ancho_paso_x, paso_y * paso_x, device=metrica.device, dtype=torch.int64)
31
+ vista_buffer_indice.scatter_(dim=2, index=indice_aleatorio, src=-torch.ones_like(indice_aleatorio, dtype=indice_aleatorio.dtype))
32
+ vista_buffer_indice = vista_buffer_indice.view(alto_paso_y, ancho_paso_x, paso_y, paso_x).transpose(1, 2).reshape(alto_paso_y * paso_y, ancho_paso_x * paso_x)
33
+ if (alto_paso_y * paso_y) < alto or (ancho_paso_x * paso_x) < ancho:
34
+ buffer_indice = torch.zeros(alto, ancho, device=metrica.device, dtype=torch.int64)
35
+ buffer_indice[:(alto_paso_y * paso_y), :(ancho_paso_x * paso_x)] = vista_buffer_indice
36
+ else:
37
+ buffer_indice = vista_buffer_indice
38
+ indice_aleatorio = buffer_indice.reshape(1, -1, 1).argsort(dim=1)
39
+ del buffer_indice, vista_buffer_indice
40
+ num_destino = alto_paso_y * ancho_paso_x
41
+ indices_a = indice_aleatorio[:, num_destino:, :]
42
+ indices_b = indice_aleatorio[:, :num_destino, :]
43
+ def dividir(x):
44
+ canales = x.shape[-1]
45
+ origen = recopilar(x, dim=1, index=indices_a.expand(lote, num_nodos - num_destino, canales))
46
+ destino = recopilar(x, dim=1, index=indices_b.expand(lote, num_destino, canales))
47
+ return origen, destino
48
+ metrica = metrica / metrica.norm(dim=-1, keepdim=True)
49
+ a, b = dividir(metrica)
50
+ puntuaciones = a @ b.transpose(-1, -2)
51
+ radio = min(a.shape[1], radio)
52
+ nodo_max, nodo_indice = puntuaciones.max(dim=-1)
53
+ indice_borde = nodo_max.argsort(dim=-1, descending=True)[..., None]
54
+ indice_no_emparejado = indice_borde[..., radio:, :]
55
+ indice_origen = indice_borde[..., :radio, :]
56
+ indice_destino = recopilar(nodo_indice[..., None], dim=-2, index=indice_origen)
57
+ def fusionar(x: torch.Tensor, modo="mean") -> torch.Tensor:
58
+ origen, destino = dividir(x)
59
+ n, t1, c = origen.shape
60
+ no_emparejado = recopilar(origen, dim=-2, index=indice_no_emparejado.expand(n, t1 - radio, c))
61
+ origen = recopilar(origen, dim=-2, index=indice_origen.expand(n, radio, c))
62
+ destino = destino.scatter_reduce(-2, indice_destino.expand(n, radio, c), origen, reduce=modo)
63
+ return torch.cat([no_emparejado, destino], dim=1)
64
+ def desfusionar(x: torch.Tensor) -> torch.Tensor:
65
+ longitud_no_emparejado = indice_no_emparejado.shape[1]
66
+ no_emparejado, destino = x[..., :longitud_no_emparejado, :], x[..., longitud_no_emparejado:, :]
67
+ _, _, c = no_emparejado.shape
68
+ origen = recopilar(destino, dim=-2, index=indice_destino.expand(lote, radio, c))
69
+ salida = torch.zeros(lote, num_nodos, c, device=x.device, dtype=x.dtype)
70
+ salida.scatter_(dim=-2, index=indices_b.expand(lote, num_destino, c), src=destino)
71
+ salida.scatter_(dim=-2, index=recopilar(indices_a.expand(lote, indices_a.shape[1], 1), dim=1, index=indice_no_emparejado).expand(lote, longitud_no_emparejado, c), src=no_emparejado)
72
+ salida.scatter_(dim=-2, index=recopilar(indices_a.expand(lote, indices_a.shape[1], 1), dim=1, index=indice_origen).expand(lote, radio, c), src=origen)
73
+ return salida
74
+ return fusionar, desfusionar
src/main.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+ from git import Repo
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+ from pipeline import load_pipeline, infer
13
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
14
+
15
+
16
+ def at_exit():
17
+ torch.cuda.empty_cache()
18
+
19
+
20
+ def main():
21
+ atexit.register(at_exit)
22
+
23
+ print(f"Loading pipeline")
24
+ pipeline = load_pipeline()
25
+
26
+ print(f"Pipeline loaded! , creating socket at '{SOCKET}'")
27
+
28
+ if exists(SOCKET):
29
+ remove(SOCKET)
30
+
31
+ with Listener(SOCKET) as listener:
32
+ chmod(SOCKET, 0o777)
33
+
34
+ print(f"Awaiting connections")
35
+ with listener.accept() as connection:
36
+ print(f"Connected")
37
+ generator = torch.Generator("cuda")
38
+ while True:
39
+ try:
40
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
41
+ except EOFError:
42
+ print(f"Inference socket exiting")
43
+
44
+ return
45
+ image = infer(request, pipeline, generator.manual_seed(request.seed))
46
+ data = BytesIO()
47
+ image.save(data, format=JpegImageFile.format)
48
+
49
+ packet = data.getvalue()
50
+
51
+ connection.send_bytes(packet )
52
+
53
+
54
+ if __name__ == '__main__':
55
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
2
+ from diffusers.image_processor import VaeImageProcessor
3
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
4
+ from huggingface_hub.constants import HF_HUB_CACHE
5
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
6
+ import torch
7
+ import torch._dynamo
8
+ import gc
9
+ from PIL import Image as img
10
+ from PIL.Image import Image
11
+ from pipelines.models import TextToImageRequest
12
+ from torch import Generator
13
+ import time
14
+ from diffusers import DiffusionPipeline
15
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
16
+ import os
17
+ os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
18
+
19
+ import torch
20
+ import math
21
+ from typing import Type, Dict, Any, Tuple, Callable, Optional, Union
22
+ import ghanta
23
+ import numpy as np
24
+ import torch
25
+ import torch.nn as nn
26
+ import torch.nn.functional as F
27
+
28
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
29
+ from diffusers.loaders import FromOriginalModelMixin, PeftAdapterMixin
30
+ from diffusers.models.attention import FeedForward
31
+ from diffusers.models.attention_processor import (
32
+ Attention,
33
+ AttentionProcessor,
34
+ FluxAttnProcessor2_0,
35
+ FusedFluxAttnProcessor2_0,
36
+ )
37
+ from diffusers.models.modeling_utils import ModelMixin
38
+ from diffusers.models.normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle
39
+ from diffusers.utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers
40
+ from diffusers.utils.import_utils import is_torch_npu_available
41
+ from diffusers.utils.torch_utils import maybe_allow_in_graph
42
+ from diffusers.models.embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed
43
+ from diffusers.models.modeling_outputs import Transformer2DModelOutput
44
+
45
+ class BasicQuantization:
46
+ def __init__(self, bits=1):
47
+ self.bits = bits
48
+ self.qmin = -(2**(bits-1))
49
+ self.qmax = 2**(bits-1) - 1
50
+
51
+ def quantize_tensor(self, tensor):
52
+ scale = (tensor.max() - tensor.min()) / (self.qmax - self.qmin)
53
+ zero_point = self.qmin - torch.round(tensor.min() / scale)
54
+ qtensor = torch.round(tensor / scale + zero_point)
55
+ qtensor = torch.clamp(qtensor, self.qmin, self.qmax)
56
+ return (qtensor - zero_point) * scale, scale, zero_point
57
+
58
+ class ModelQuantization:
59
+ def __init__(self, model, bits=7):
60
+ self.model = model
61
+ self.quant = BasicQuantization(bits)
62
+
63
+ def quantize_model(self):
64
+ for name, module in self.model.named_modules():
65
+ if isinstance(module, torch.nn.Linear):
66
+ if hasattr(module, 'weightML'):
67
+ quantized_weight, _, _ = self.quant.quantize_tensor(module.weight)
68
+ module.weight = torch.nn.Parameter(quantized_weight)
69
+ if hasattr(module, 'bias') and module.bias is not None:
70
+ quantized_bias, _, _ = self.quant.quantize_tensor(module.bias)
71
+ module.bias = torch.nn.Parameter(quantized_bias)
72
+
73
+
74
+ def inicializar_generador(dispositivo: torch.device, respaldo: torch.Generator = None):
75
+ if dispositivo.type == "cpu":
76
+ return torch.Generator(device="cpu").set_state(torch.get_rng_state())
77
+ elif dispositivo.type == "cuda":
78
+ return torch.Generator(device=dispositivo).set_state(torch.cuda.get_rng_state())
79
+ else:
80
+ if respaldo is None:
81
+ return inicializar_generador(torch.device("cpu"))
82
+ else:
83
+ return respaldo
84
+
85
+ def calcular_fusion(x: torch.Tensor, info_tome: Dict[str, Any]) -> Tuple[Callable, ...]:
86
+ alto_original, ancho_original = info_tome["size"]
87
+ tokens_originales = alto_original * ancho_original
88
+ submuestreo = int(math.ceil(math.sqrt(tokens_originales // x.shape[1])))
89
+ argumentos = info_tome["args"]
90
+ if submuestreo <= argumentos["down"]:
91
+ ancho = int(math.ceil(ancho_original / submuestreo))
92
+ alto = int(math.ceil(alto_original / submuestreo))
93
+ radio = int(x.shape[1] * argumentos["ratio"])
94
+
95
+ if argumentos["generator"] is None:
96
+ argumentos["generator"] = inicializar_generador(x.device)
97
+ elif argumentos["generator"].device != x.device:
98
+ argumentos["generator"] = inicializar_generador(x.device, respaldo=argumentos["generator"])
99
+
100
+ usar_aleatoriedad = argumentos["rando"]
101
+ fusion, desfusion = ghanta.emparejamiento_suave_aleatorio_2d(
102
+ x, ancho, alto, argumentos["sx"], argumentos["sy"], radio,
103
+ sin_aleatoriedad=not usar_aleatoriedad, generador=argumentos["generator"]
104
+ )
105
+ else:
106
+ fusion, desfusion = (hacer_nada, hacer_nada)
107
+ fusion_a, desfusion_a = (fusion, desfusion) if argumentos["m1"] else (hacer_nada, hacer_nada)
108
+ fusion_c, desfusion_c = (fusion, desfusion) if argumentos["m2"] else (hacer_nada, hacer_nada)
109
+ fusion_m, desfusion_m = (fusion, desfusion) if argumentos["m3"] else (hacer_nada, hacer_nada)
110
+ return fusion_a, fusion_c, fusion_m, desfusion_a, desfusion_c, desfusion_m
111
+
112
+ @torch.compile
113
+ @maybe_allow_in_graph
114
+ class FluxSingleTransformerBlock(nn.Module):
115
+
116
+ def __init__(self, dim, num_attention_heads, attention_head_dim, mlp_ratio=4.0):
117
+ super().__init__()
118
+ self.mlp_hidden_dim = int(dim * mlp_ratio)
119
+
120
+ self.norm = AdaLayerNormZeroSingle(dim)
121
+ self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim)
122
+ self.act_mlp = nn.GELU(approximate="tanh")
123
+ self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim)
124
+
125
+ processor = FluxAttnProcessor2_0()
126
+ self.attn = Attention(
127
+ query_dim=dim,
128
+ cross_attention_dim=None,
129
+ dim_head=attention_head_dim,
130
+ heads=num_attention_heads,
131
+ out_dim=dim,
132
+ bias=True,
133
+ processor=processor,
134
+ qk_norm="rms_norm",
135
+ eps=1e-6,
136
+ pre_only=True,
137
+ )
138
+
139
+ def forward(
140
+ self,
141
+ hidden_states: torch.FloatTensor,
142
+ temb: torch.FloatTensor,
143
+ image_rotary_emb=None,
144
+ joint_attention_kwargs=None,
145
+ tinfo: Dict[str, Any] = None,
146
+ ):
147
+ if tinfo is not None:
148
+ m_a, m_c, mom, u_a, u_c, u_m = calcular_fusion(hidden_states, tinfo)
149
+ else:
150
+ m_a, m_c, mom, u_a, u_c, u_m = (ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada)
151
+
152
+ residual = hidden_states
153
+ norm_hidden_states, gate = self.norm(hidden_states, emb=temb)
154
+ norm_hidden_states = m_a(norm_hidden_states)
155
+ mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states))
156
+ joint_attention_kwargs = joint_attention_kwargs or {}
157
+ attn_output = self.attn(
158
+ hidden_states=norm_hidden_states,
159
+ image_rotary_emb=image_rotary_emb,
160
+ **joint_attention_kwargs,
161
+ )
162
+
163
+ hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2)
164
+ gate = gate.unsqueeze(1)
165
+ hidden_states = gate * self.proj_out(hidden_states)
166
+ hidden_states = u_a(residual + hidden_states)
167
+
168
+ return hidden_states
169
+
170
+ @torch.compile
171
+ @maybe_allow_in_graph
172
+ class FluxTransformerBlock(nn.Module):
173
+
174
+ def __init__(self, dim, num_attention_heads, attention_head_dim, qk_norm="rms_norm", eps=1e-6):
175
+ super().__init__()
176
+
177
+ self.norm1 = AdaLayerNormZero(dim)
178
+
179
+ self.norm1_context = AdaLayerNormZero(dim)
180
+
181
+ if hasattr(F, "scaled_dot_product_attention"):
182
+ processor = FluxAttnProcessor2_0()
183
+ else:
184
+ raise ValueError(
185
+ "The current PyTorch version does not support the `scaled_dot_product_attention` function."
186
+ )
187
+ self.attn = Attention(
188
+ query_dim=dim,
189
+ cross_attention_dim=None,
190
+ added_kv_proj_dim=dim,
191
+ dim_head=attention_head_dim,
192
+ heads=num_attention_heads,
193
+ out_dim=dim,
194
+ context_pre_only=False,
195
+ bias=True,
196
+ processor=processor,
197
+ qk_norm=qk_norm,
198
+ eps=eps,
199
+ )
200
+
201
+ self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
202
+ self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
203
+
204
+ self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
205
+ self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
206
+ self._chunk_size = None
207
+ self._chunk_dim = 0
208
+
209
+ def forward(
210
+ self,
211
+ hidden_states: torch.FloatTensor,
212
+ encoder_hidden_states: torch.FloatTensor,
213
+ temb: torch.FloatTensor,
214
+ image_rotary_emb=None,
215
+ joint_attention_kwargs=None,
216
+ tinfo: Dict[str, Any] = None, # Add tinfo parameter
217
+ ):
218
+ if tinfo is not None:
219
+ m_a, m_c, mom, u_a, u_c, u_m = calcular_fusion(hidden_states, tinfo)
220
+ else:
221
+ m_a, m_c, mom, u_a, u_c, u_m = (ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada)
222
+
223
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
224
+
225
+ norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
226
+ encoder_hidden_states, emb=temb
227
+ )
228
+ joint_attention_kwargs = joint_attention_kwargs or {}
229
+ norm_hidden_states = m_a(norm_hidden_states)
230
+ norm_encoder_hidden_states = m_c(norm_encoder_hidden_states)
231
+
232
+ attn_output, context_attn_output = self.attn(
233
+ hidden_states=norm_hidden_states,
234
+ encoder_hidden_states=norm_encoder_hidden_states,
235
+ image_rotary_emb=image_rotary_emb,
236
+ **joint_attention_kwargs,
237
+ )
238
+
239
+ attn_output = gate_msa.unsqueeze(1) * attn_output
240
+ hidden_states = u_a(attn_output) + hidden_states
241
+
242
+ norm_hidden_states = self.norm2(hidden_states)
243
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
244
+
245
+ norm_hidden_states = mom(norm_hidden_states)
246
+
247
+ ff_output = self.ff(norm_hidden_states)
248
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
249
+
250
+ hidden_states = u_m(ff_output) + hidden_states
251
+ context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output
252
+ encoder_hidden_states = u_c(context_attn_output) + encoder_hidden_states
253
+
254
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
255
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
256
+
257
+ context_ff_output = self.ff_context(norm_encoder_hidden_states)
258
+ encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output
259
+
260
+ return encoder_hidden_states, hidden_states
261
+
262
+ class FluxTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
263
+
264
+ _supports_gradient_checkpointing = True
265
+ _no_split_modules = ["FluxTransformerBlock", "FluxSingleTransformerBlock"]
266
+
267
+ @register_to_config
268
+ def __init__(
269
+ self,
270
+ patch_size: int = 1,
271
+ in_channels: int = 64,
272
+ out_channels: Optional[int] = None,
273
+ num_layers: int = 19,
274
+ num_single_layers: int = 38,
275
+ attention_head_dim: int = 128,
276
+ num_attention_heads: int = 24,
277
+ joint_attention_dim: int = 4096,
278
+ pooled_projection_dim: int = 768,
279
+ guidance_embeds: bool = False,
280
+ axes_dims_rope: Tuple[int] = (16, 56, 56),
281
+ generator: Optional[torch.Generator] = None,
282
+ ):
283
+ super().__init__()
284
+ self.out_channels = out_channels or in_channels
285
+ self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
286
+
287
+ self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope)
288
+
289
+ text_time_guidance_cls = (
290
+ CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings
291
+ )
292
+ self.time_text_embed = text_time_guidance_cls(
293
+ embedding_dim=self.inner_dim, pooled_projection_dim=self.config.pooled_projection_dim
294
+ )
295
+
296
+ self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.inner_dim)
297
+ self.x_embedder = nn.Linear(self.config.in_channels, self.inner_dim)
298
+
299
+ self.transformer_blocks = nn.ModuleList(
300
+ [
301
+ FluxTransformerBlock(
302
+ dim=self.inner_dim,
303
+ num_attention_heads=self.config.num_attention_heads,
304
+ attention_head_dim=self.config.attention_head_dim,
305
+ )
306
+ for i in range(self.config.num_layers)
307
+ ]
308
+ )
309
+
310
+ self.single_transformer_blocks = nn.ModuleList(
311
+ [
312
+ FluxSingleTransformerBlock(
313
+ dim=self.inner_dim,
314
+ num_attention_heads=self.config.num_attention_heads,
315
+ attention_head_dim=self.config.attention_head_dim,
316
+ )
317
+ for i in range(self.config.num_single_layers)
318
+ ]
319
+ )
320
+
321
+ self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
322
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
323
+ ratio: float = 0.5
324
+ down: int = 1
325
+ sx: int = 2
326
+ sy: int = 2
327
+ rando: bool = False
328
+ m1: bool = False
329
+ m2: bool = True
330
+ m3: bool = False
331
+
332
+ self.tinfo = {
333
+ "size": None,
334
+ "args": {
335
+ "ratio": ratio,
336
+ "down": down,
337
+ "sx": sx,
338
+ "sy": sy,
339
+ "rando": rando,
340
+ "m1": m1,
341
+ "m2": m2,
342
+ "m3": m3,
343
+ "generator": generator
344
+ }
345
+ }
346
+
347
+ self.gradient_checkpointing = False
348
+
349
+ @property
350
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
351
+ r"""
352
+ Returns:
353
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
354
+ indexed by its weight name.
355
+ """
356
+ processors = {}
357
+
358
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
359
+ if hasattr(module, "get_processor"):
360
+ processors[f"{name}.processor"] = module.get_processor()
361
+
362
+ for sub_name, child in module.named_children():
363
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
364
+
365
+ return processors
366
+
367
+ for name, module in self.named_children():
368
+ fn_recursive_add_processors(name, module, processors)
369
+
370
+ return processors
371
+
372
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
373
+ count = len(self.attn_processors.keys())
374
+
375
+ if isinstance(processor, dict) and len(processor) != count:
376
+ raise ValueError(
377
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
378
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
379
+ )
380
+
381
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
382
+ if hasattr(module, "set_processor"):
383
+ if not isinstance(processor, dict):
384
+ module.set_processor(processor)
385
+ else:
386
+ module.set_processor(processor.pop(f"{name}.processor"))
387
+
388
+ for sub_name, child in module.named_children():
389
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
390
+
391
+ for name, module in self.named_children():
392
+ fn_recursive_attn_processor(name, module, processor)
393
+
394
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedFluxAttnProcessor2_0
395
+ def fuse_qkv_projections(self):
396
+ self.original_attn_processors = None
397
+
398
+ for _, attn_processor in self.attn_processors.items():
399
+ if "Added" in str(attn_processor.__class__.__name__):
400
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
401
+
402
+ self.original_attn_processors = self.attn_processors
403
+
404
+ for module in self.modules():
405
+ if isinstance(module, Attention):
406
+ module.fuse_projections(fuse=True)
407
+
408
+ self.set_attn_processor(FusedFluxAttnProcessor2_0())
409
+
410
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
411
+ def unfuse_qkv_projections(self):
412
+ if self.original_attn_processors is not None:
413
+ self.set_attn_processor(self.original_attn_processors)
414
+
415
+ def _set_gradient_checkpointing(self, module, value=False):
416
+ if hasattr(module, "gradient_checkpointing"):
417
+ module.gradient_checkpointing = value
418
+
419
+ def forward(
420
+ self,
421
+ hidden_states: torch.Tensor,
422
+ encoder_hidden_states: torch.Tensor = None,
423
+ pooled_projections: torch.Tensor = None,
424
+ timestep: torch.LongTensor = None,
425
+ img_ids: torch.Tensor = None,
426
+ txt_ids: torch.Tensor = None,
427
+ guidance: torch.Tensor = None,
428
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
429
+ controlnet_block_samples=None,
430
+ controlnet_single_block_samples=None,
431
+ return_dict: bool = True,
432
+ controlnet_blocks_repeat: bool = False,
433
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
434
+ if joint_attention_kwargs is not None:
435
+ joint_attention_kwargs = joint_attention_kwargs.copy()
436
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
437
+ else:
438
+ lora_scale = 1.0
439
+
440
+ if USE_PEFT_BACKEND:
441
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
442
+ scale_lora_layers(self, lora_scale)
443
+ else:
444
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
445
+ logger.warning(
446
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
447
+ )
448
+
449
+ hidden_states = self.x_embedder(hidden_states)
450
+ if len(hidden_states.shape) == 4:
451
+ self.tinfo["size"] = (hidden_states.shape[2], hidden_states.shape[3])
452
+
453
+ timestep = timestep.to(hidden_states.dtype) * 1000
454
+ if guidance is not None:
455
+ guidance = guidance.to(hidden_states.dtype) * 1000
456
+ else:
457
+ guidance = None
458
+
459
+ temb = (
460
+ self.time_text_embed(timestep, pooled_projections)
461
+ if guidance is None
462
+ else self.time_text_embed(timestep, guidance, pooled_projections)
463
+ )
464
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
465
+
466
+ if txt_ids.ndim == 3:
467
+ logger.warning(
468
+ "Passing `txt_ids` 3d torch.Tensor is deprecated."
469
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
470
+ )
471
+ txt_ids = txt_ids[0]
472
+ if img_ids.ndim == 3:
473
+ logger.warning(
474
+ "Passing `img_ids` 3d torch.Tensor is deprecated."
475
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
476
+ )
477
+ img_ids = img_ids[0]
478
+
479
+ ids = torch.cat((txt_ids, img_ids), dim=0)
480
+ image_rotary_emb = self.pos_embed(ids)
481
+
482
+ for index_block, block in enumerate(self.transformer_blocks):
483
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
484
+
485
+ def create_custom_forward(module, return_dict=None):
486
+ def custom_forward(*inputs):
487
+ if return_dict is not None:
488
+ return module(*inputs, return_dict=return_dict)
489
+ else:
490
+ return module(*inputs)
491
+
492
+ return custom_forward
493
+
494
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
495
+ encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint(
496
+ create_custom_forward(block),
497
+ hidden_states,
498
+ encoder_hidden_states,
499
+ temb,
500
+ image_rotary_emb,
501
+ **ckpt_kwargs,
502
+ )
503
+
504
+ else:
505
+ encoder_hidden_states, hidden_states = block(
506
+ hidden_states=hidden_states,
507
+ encoder_hidden_states=encoder_hidden_states,
508
+ temb=temb,
509
+ image_rotary_emb=image_rotary_emb,
510
+ joint_attention_kwargs=joint_attention_kwargs,
511
+ )
512
+
513
+ if controlnet_block_samples is not None:
514
+ interval_control = len(self.transformer_blocks) / len(controlnet_block_samples)
515
+ interval_control = int(np.ceil(interval_control))
516
+ if controlnet_blocks_repeat:
517
+ hidden_states = (
518
+ hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)]
519
+ )
520
+ else:
521
+ hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control]
522
+
523
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
524
+
525
+ for index_block, block in enumerate(self.single_transformer_blocks):
526
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
527
+
528
+ def create_custom_forward(module, return_dict=None):
529
+ def custom_forward(*inputs):
530
+ if return_dict is not None:
531
+ return module(*inputs, return_dict=return_dict)
532
+ else:
533
+ return module(*inputs)
534
+
535
+ return custom_forward
536
+
537
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
538
+ hidden_states = torch.utils.checkpoint.checkpoint(
539
+ create_custom_forward(block),
540
+ hidden_states,
541
+ temb,
542
+ image_rotary_emb,
543
+ **ckpt_kwargs,
544
+ )
545
+
546
+ else:
547
+ hidden_states = block(
548
+ hidden_states=hidden_states,
549
+ temb=temb,
550
+ image_rotary_emb=image_rotary_emb,
551
+ joint_attention_kwargs=joint_attention_kwargs,
552
+ )
553
+
554
+ if controlnet_single_block_samples is not None:
555
+ interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples)
556
+ interval_control = int(np.ceil(interval_control))
557
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...] = (
558
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...]
559
+ + controlnet_single_block_samples[index_block // interval_control]
560
+ )
561
+
562
+ hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...]
563
+
564
+ hidden_states = self.norm_out(hidden_states, temb)
565
+ output = self.proj_out(hidden_states)
566
+
567
+ if USE_PEFT_BACKEND:
568
+ unscale_lora_layers(self, lora_scale)
569
+
570
+ if not return_dict:
571
+ return (output,)
572
+
573
+ return Transformer2DModelOutput(sample=output)
574
+
575
+ Pipeline = None
576
+ torch.backends.cuda.matmul.allow_tf32 = True
577
+ torch.backends.cudnn.enabled = True
578
+ torch.backends.cudnn.benchmark = True
579
+
580
+ # ckpt_id = "black-forest-labs/FLUX.1-schnell"
581
+ # ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
582
+ ckpt_id = "silentdriver/4b68f38c0b"
583
+ ckpt_revision = "36a3cf4a9f733fc5f31257099b56b304fb2eceab"
584
+ def empty_cache():
585
+ gc.collect()
586
+ torch.cuda.empty_cache()
587
+ torch.cuda.reset_max_memory_allocated()
588
+ torch.cuda.reset_peak_memory_stats()
589
+
590
+ def load_pipeline() -> Pipeline:
591
+ empty_cache()
592
+
593
+ dtype, device = torch.bfloat16, "cuda"
594
+
595
+ text_encoder_2 = T5EncoderModel.from_pretrained(
596
+ "silentdriver/aadb864af9", revision = "060dabc7fa271c26dfa3fd43c16e7c5bf3ac7892", torch_dtype=torch.bfloat16
597
+ ).to(memory_format=torch.channels_last)
598
+
599
+
600
+
601
+ vae = AutoencoderTiny.from_pretrained("silentdriver/7815792fb4", revision="bdb7d88ebe5a1c6b02a3c0c78651dd57a403fdf5", torch_dtype=dtype)
602
+
603
+ path = os.path.join(HF_HUB_CACHE, "models--silentdriver--7d92df966a/snapshots/add1b8d9a84c728c1209448c4a695759240bad3c")
604
+ generator = torch.Generator(device=device)
605
+ model = FluxTransformer2DModel.from_pretrained(path, torch_dtype=dtype, use_safetensors=False, generator= generator).to(memory_format=torch.channels_last)
606
+ torch.backends.cudnn.benchmark = True
607
+ torch.backends.cudnn.deterministic = False
608
+ # model = torch.compile(model, mode="max-autotune-no-cudagraphs")
609
+ # model = torch.compile(model,backend="aot_eager")
610
+ vae = torch.compile(vae)
611
+ pipeline = DiffusionPipeline.from_pretrained(
612
+ ckpt_id,
613
+ vae=vae,
614
+ revision=ckpt_revision,
615
+ transformer=model,
616
+ text_encoder_2=text_encoder_2,
617
+ torch_dtype=dtype,
618
+ ).to(device)
619
+ pipeline.vae.requires_grad_(False)
620
+ pipeline.transformer.requires_grad_(False)
621
+ pipeline.text_encoder_2.requires_grad_(False)
622
+ pipeline.text_encoder.requires_grad_(False)
623
+
624
+ # pipeline.enable_sequential_cpu_offload(exclude=["transformer"])
625
+
626
+ for _ in range(3):
627
+ pipeline(prompt="blah blah waah waah oneshot oneshot gang gang", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
628
+
629
+ empty_cache()
630
+ return pipeline
631
+
632
+
633
+ @torch.no_grad()
634
+ def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image:
635
+ image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
636
+ return image
uv.lock ADDED
The diff for this file is too large to render. See raw diff