jokerbit commited on
Commit
8c32573
·
verified ·
1 Parent(s): 0903770

Upload src/pipeline.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/pipeline.py +18 -13
src/pipeline.py CHANGED
@@ -13,12 +13,14 @@ from transformers import T5EncoderModel, CLIPTextModel, logging
13
 
14
 
15
  Pipeline: TypeAlias = FluxPipeline
16
- torch.backends.cudnn.benchmark = True
17
- torch.backends.cudnn.benchmark = True
18
- torch._inductor.config.conv_1x1_as_mm = True
19
- torch._inductor.config.coordinate_descent_tuning = True
20
- torch._inductor.config.epilogue_fusion = False
21
- torch._inductor.config.coordinate_descent_check_all_directions = True
 
 
22
  os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
23
  os.environ["TOKENIZERS_PARALLELISM"] = "True"
24
  CHECKPOINT = "jokerbit/flux.1-schnell-Robert-int8wo"
@@ -42,16 +44,18 @@ def load_pipeline() -> Pipeline:
42
  transformer=transformer,
43
  local_files_only=True,
44
  torch_dtype=torch.bfloat16,
45
- )
46
 
47
  pipeline.transformer.to(memory_format=torch.channels_last)
 
48
  # pipeline.vae.to(memory_format=torch.channels_last)
49
  # quantize_(pipeline.vae, int8_weight_only())
50
- pipeline.vae = torch.compile(pipeline.vae, fullgraph=True, mode="max-autotune")
51
- pipeline.to("cuda")
52
-
53
- for _ in range(2):
54
- pipeline("cat", num_inference_steps=4)
 
55
 
56
  return pipeline
57
 
@@ -76,13 +80,14 @@ if __name__ == "__main__":
76
  height=None,
77
  width=None,
78
  seed=666)
 
79
  start_time = perf_counter()
80
  pipe_ = load_pipeline()
81
  stop_time = perf_counter()
82
  print(f"Pipeline is loaded in {stop_time - start_time}s")
83
  for _ in range(4):
84
  start_time = perf_counter()
85
- infer(request, pipe_)
86
  stop_time = perf_counter()
87
  print(f"Request in {stop_time - start_time}s")
88
 
 
13
 
14
 
15
  Pipeline: TypeAlias = FluxPipeline
16
+
17
+ #torch.backends.cudnn.benchmark = True
18
+ #torch._inductor.config.conv_1x1_as_mm = True
19
+ #torch._inductor.config.coordinate_descent_tuning = True
20
+ #torch._inductor.config.epilogue_fusion = False
21
+ # torch._inductor.config.coordinate_descent_check_all_directions = True
22
+
23
+
24
  os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
25
  os.environ["TOKENIZERS_PARALLELISM"] = "True"
26
  CHECKPOINT = "jokerbit/flux.1-schnell-Robert-int8wo"
 
44
  transformer=transformer,
45
  local_files_only=True,
46
  torch_dtype=torch.bfloat16,
47
+ ).to("cuda")
48
 
49
  pipeline.transformer.to(memory_format=torch.channels_last)
50
+ pipeline.transformer = torch.compile(pipeline.transformer, fullgraph=True)
51
  # pipeline.vae.to(memory_format=torch.channels_last)
52
  # quantize_(pipeline.vae, int8_weight_only())
53
+ # pipeline.vae = torch.compile(pipeline.vae, fullgraph=True, mode="max-autotune")
54
+ # pipeline.to("cuda")
55
+ PROMPT = 'semiconformity, peregrination, quip, twineless, emotionless, tawa, depickle'
56
+
57
+ for _ in range(4):
58
+ pipeline(PROMPT, max_sequence_length=256, height=1024, width=1024, num_inference_steps=4, guidance_scale=0.0)
59
 
60
  return pipeline
61
 
 
80
  height=None,
81
  width=None,
82
  seed=666)
83
+ generator = torch.Generator(device="cuda")
84
  start_time = perf_counter()
85
  pipe_ = load_pipeline()
86
  stop_time = perf_counter()
87
  print(f"Pipeline is loaded in {stop_time - start_time}s")
88
  for _ in range(4):
89
  start_time = perf_counter()
90
+ infer(request, pipe_, generator=generator.manual_seed(request.seed))
91
  stop_time = perf_counter()
92
  print(f"Request in {stop_time - start_time}s")
93