edge-flux-optim-01 / src /pipeline.py
loveisgone's picture
Upload folder using huggingface_hub
560559d verified
raw
history blame
1.96 kB
from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
from diffusers.image_processor import VaeImageProcessor
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
import torch
import torch._dynamo
import gc
from PIL import Image as img
from PIL.Image import Image
from pipelines.models import TextToImageRequest
from torch import Generator
import time
from diffusers import FluxTransformer2DModel, DiffusionPipeline
from torchao.quantization import quantize_, int8_weight_only
#from torchao.quantization import autoquant
Pipeline = None
ckpt_id = "black-forest-labs/FLUX.1-schnell"
def empty_cache():
start = time.time()
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
print(f"Flush took: {time.time() - start}")
def load_pipeline() -> Pipeline:
empty_cache()
dtype, device = torch.bfloat16, "cuda"
empty_cache()
pipeline = DiffusionPipeline.from_pretrained(
ckpt_id,
torch_dtype=dtype,
)
pipeline.enable_sequential_cpu_offload()
for _ in range(2):
empty_cache()
pipeline(prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
return pipeline
from datetime import datetime
@torch.inference_mode()
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
empty_cache()
try:
generator = Generator("cuda").manual_seed(request.seed)
image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
except:
image = img.open("./loy.png")
pass
return(image)