silencer107 commited on
Commit
42c0e61
·
verified ·
1 Parent(s): 0a9e6a7

Update src/pipeline.py

Browse files
Files changed (1) hide show
  1. src/pipeline.py +45 -9
src/pipeline.py CHANGED
@@ -1,26 +1,62 @@
1
- from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
2
- import gc
3
  from PIL.Image import Image
 
4
  from pipelines.models import TextToImageRequest
5
  from torch import Generator
6
- import torch
7
- from diffusers import FluxPipeline, AutoencoderTiny
 
 
 
 
 
 
 
 
8
 
 
9
  Pipeline = None
10
 
 
 
 
 
 
 
11
 
12
  def load_pipeline() -> Pipeline:
13
- pipeline = FluxPipeline.from_pretrained(
14
- "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16)
15
- pipeline.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
16
- pipeline.enable_sequential_cpu_offload()
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  for _ in range(2):
19
  pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
20
-
21
  return pipeline
22
 
 
 
23
  def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
 
 
24
  generator = Generator("cuda").manual_seed(request.seed)
25
  image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
26
  return(image)
 
1
+ import torch
 
2
  from PIL.Image import Image
3
+ from diffusers import FluxPipeline
4
  from pipelines.models import TextToImageRequest
5
  from torch import Generator
6
+ #from time import perf_counter
7
+ import os
8
+ from diffusers import FluxPipeline, AutoencoderKL
9
+ from diffusers.image_processor import VaeImageProcessor
10
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
11
+ import diffusers
12
+ #from optimum.quanto import freeze, qfloat8, quantize
13
+ import gc
14
+ from diffusers import FluxTransformer2DModel, DiffusionPipeline
15
+ #from torchao.quantization import quantize_,int8_weight_only
16
 
17
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
18
  Pipeline = None
19
 
20
+ ckpt_id = "black-forest-labs/FLUX.1-schnell"
21
+ def empty_cache():
22
+ gc.collect()
23
+ torch.cuda.empty_cache()
24
+ torch.cuda.reset_max_memory_allocated()
25
+ torch.cuda.reset_peak_memory_stats()
26
 
27
  def load_pipeline() -> Pipeline:
28
+ empty_cache()
29
+ dtype, device = torch.bfloat16, "cuda"
 
 
30
 
31
+ text_encoder_2 = T5EncoderModel.from_pretrained(
32
+ "city96/t5-v1_1-xxl-encoder-bf16", torch_dtype=torch.bfloat16
33
+ )
34
+ vae=AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=dtype)
35
+ pipeline = DiffusionPipeline.from_pretrained(
36
+ ckpt_id,
37
+ vae=vae,
38
+ text_encoder_2 = text_encoder_2,
39
+ torch_dtype=dtype,
40
+ )
41
+ # torch.backends.cudnn.benchmark = True
42
+ # torch.backends.cuda.matmul.allow_tf32 = True
43
+ # torch.cuda.set_per_process_memory_fraction(0.99)
44
+ # pipeline.text_encoder.to(memory_format=torch.channels_last)
45
+ # pipeline.transformer.to(memory_format=torch.channels_last)
46
+ # pipeline.vae.to(memory_format=torch.channels_last)
47
+ # pipeline.vae.enable_tiling()
48
+ # pipeline.vae = torch.compile(pipeline.vae)
49
+ # pipeline._exclude_from_cpu_offload = ["vae"]
50
+ pipeline.enable_sequential_cpu_offload()
51
  for _ in range(2):
52
  pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
 
53
  return pipeline
54
 
55
+
56
+ @torch.inference_mode()
57
  def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
58
+ # torch.cuda.reset_peak_memory_stats()
59
+ # torch.backends.cuda.matmul.allow_tf32 = True
60
  generator = Generator("cuda").manual_seed(request.seed)
61
  image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
62
  return(image)