silencer107 commited on
Commit
c193566
·
verified ·
1 Parent(s): a6f2238

Update src/pipeline.py

Browse files
Files changed (1) hide show
  1. src/pipeline.py +19 -27
src/pipeline.py CHANGED
@@ -1,53 +1,45 @@
 
1
  import torch
2
  from PIL.Image import Image
3
- from diffusers import FluxPipeline
4
  from pipelines.models import TextToImageRequest
5
  from torch import Generator
6
- #from time import perf_counter
 
7
  import os
8
- from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
9
- from diffusers.image_processor import VaeImageProcessor
10
  from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
11
- import diffusers
12
- #from optimum.quanto import freeze, qfloat8, quantize
13
- import gc
14
- from diffusers import FluxTransformer2DModel, DiffusionPipeline
15
- from torchao.quantization import quantize_,int8_weight_only
16
  HOME = os.environ["HOME"]
17
- os.environ["TOKENIZERS_PARALLELISM"] = "True"
18
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
19
  Pipeline = None
 
20
 
21
- ckpt_id = "slobers/tx"
22
  def empty_cache():
23
  gc.collect()
24
  torch.cuda.empty_cache()
25
  torch.cuda.reset_max_memory_allocated()
26
  torch.cuda.reset_peak_memory_stats()
27
-
28
  def load_pipeline() -> Pipeline:
29
  empty_cache()
30
  dtype, device = torch.bfloat16, "cuda"
31
- vae = AutoencoderTiny.from_pretrained("aifeifei798/taef1", torch_dtype=dtype)
 
 
32
  quantize_(vae, int8_weight_only())
33
- model = FluxTransformer2DModel.from_pretrained(f"{HOME}/.cache/huggingface/hub/models--slobers--transgender/snapshots/cb99836efa0ed55856970269c42fafdaa0e44c5d", torch_dtype=dtype, use_safetensors=False)
34
- pipeline = DiffusionPipeline.from_pretrained(ckpt_id, vae=vae, transformer=model, torch_dtype=dtype,)
35
- torch.backends.cudnn.benchmark = True
36
- torch.backends.cuda.matmul.allow_tf32 = True
37
- torch.cuda.set_per_process_memory_fraction(0.90)
38
- pipeline.text_encoder.to(memory_format=torch.channels_last)
39
- pipeline.transformer.to(memory_format=torch.channels_last)
40
- pipeline.vae.to(memory_format=torch.channels_last)
41
- pipeline.vae.enable_tiling()
42
  pipeline.to("cuda")
 
43
  for _ in range(2):
 
44
  pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
45
- empty_cache()
46
  return pipeline
47
 
48
-
49
- @torch.inference_mode()
50
  def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
 
51
  generator = Generator("cuda").manual_seed(request.seed)
52
- image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
53
  return(image)
 
1
+ from torch import Generator
2
  import torch
3
  from PIL.Image import Image
 
4
  from pipelines.models import TextToImageRequest
5
  from torch import Generator
6
+ from diffusers import FluxTransformer2DModel, DiffusionPipeline
7
+ import gc
8
  import os
 
 
9
  from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
10
+ import torch._dynamo
11
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
12
+ os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
 
 
13
  HOME = os.environ["HOME"]
 
 
14
  Pipeline = None
15
+ ckpt_id = "black-forest-labs/FLUX.1-schnell"
16
 
 
17
  def empty_cache():
18
  gc.collect()
19
  torch.cuda.empty_cache()
20
  torch.cuda.reset_max_memory_allocated()
21
  torch.cuda.reset_peak_memory_stats()
22
+
23
  def load_pipeline() -> Pipeline:
24
  empty_cache()
25
  dtype, device = torch.bfloat16, "cuda"
26
+ text_encoder = CLIPTextModel.from_pretrained(ckpt_id, subfolder="text_encoder", torch_dtype=torch.bfloat16)
27
+ quantize_(text_encoder, int8_weight_only())
28
+ vae = AutoencoderTiny.from_pretrained("RobertML/FLUX.1-schnell-vae_e3m2", torch_dtype=torch.bfloat16)
29
  quantize_(vae, int8_weight_only())
30
+ text_encoder_2 = T5EncoderModel.from_pretrained("city96/t5-v1_1-xxl-encoder-bf16", torch_dtype=torch.bfloat16)
31
+ quantize_(text_encoder_2, int8_weight_only())
32
+ model = FluxTransformer2DModel.from_pretrained(f"{HOME}/.cache/huggingface/hub/models--slobers--transgender/snapshots/cb99836efa0ed55856970269c42fafdaa0e44c5d", torch_dtype=torch.bfloat16, use_safetensors=False)
33
+ pipeline = DiffusionPipeline.from_pretrained(ckpt_id, text_encoder=text_encoder, transformer=model, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16)
 
 
 
 
 
34
  pipeline.to("cuda")
35
+
36
  for _ in range(2):
37
+ empty_cache()
38
  pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
 
39
  return pipeline
40
 
 
 
41
  def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
42
+ empty_cache()
43
  generator = Generator("cuda").manual_seed(request.seed)
44
+ image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=3, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
45
  return(image)