File size: 3,338 Bytes
7ff2a4f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import os
import gc
import time
import torch
from PIL import Image as img
from PIL.Image import Image
from diffusers import (
FluxTransformer2DModel,
DiffusionPipeline,
AutoencoderTiny
)
from transformers import T5EncoderModel
from huggingface_hub.constants import HF_HUB_CACHE
from torchao.quantization import quantize_, int8_weight_only
from first_block_cache.diffusers_adapters import apply_cache_on_pipe
from pipelines.models import TextToImageRequest
from torch import Generator
os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
Pipeline = None
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
ckpt_id = "black-forest-labs/FLUX.1-schnell"
ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
def are_two_tensors_similar(t1, t2, *, threshold, parallelized=False):
# Compute absolute difference and means in a single pass
with torch.no_grad(): # Disable gradient computation for better performance
abs_diff = torch.abs(t1 - t2)
abs_t1 = torch.abs(t1)
# Use built-in mean() operation which is already optimized
mean_diff = abs_diff.mean()
mean_t1 = abs_t1.mean()
# Calculate relative difference
diff = mean_diff / (mean_t1 + 1e-8) # Add small epsilon to prevent division by zero
return diff.item() < 0.7
def empty_cache():
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
def load_pipeline() -> Pipeline:
empty_cache()
dtype, device = torch.bfloat16, "cuda"
text_encoder_2 = T5EncoderModel.from_pretrained(
"city96/t5-v1_1-xxl-encoder-bf16",
revision="1b9c856aadb864af93c1dcdc226c2774fa67bc86",
torch_dtype=torch.bfloat16
).to(memory_format=torch.channels_last)
path = os.path.join(HF_HUB_CACHE, "models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a")
model = FluxTransformer2DModel.from_pretrained(
path,
torch_dtype=dtype,
use_safetensors=False
).to(memory_format=torch.channels_last)
pipeline = DiffusionPipeline.from_pretrained(
ckpt_id,
revision=ckpt_revision,
transformer=model,
text_encoder_2=text_encoder_2,
torch_dtype=dtype,
).to(device)
#quantize_(pipeline.vae, int8_weight_only())
apply_cache_on_pipe(pipeline)
for _ in range(3):
pipeline(
prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness",
width=1024,
height=1024,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256
)
return pipeline
@torch.no_grad()
def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image:
try:
image = pipeline(
request.prompt,
generator=generator,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
height=request.height,
width=request.width,
output_type="pil"
).images[0]
except:
image = img.open("./RobertML.png")
return image
|