jokerbit commited on
Commit
03240f9
·
verified ·
1 Parent(s): 5e8c66a

2 less line

Browse files
Files changed (1) hide show
  1. src/pipeline.py +1 -3
src/pipeline.py CHANGED
@@ -14,7 +14,6 @@ from transformers import T5EncoderModel, CLIPTextModel, logging
14
 
15
  Pipeline: TypeAlias = FluxPipeline
16
  torch.backends.cudnn.benchmark = True
17
- torch.backends.cudnn.benchmark = True
18
  torch._inductor.config.conv_1x1_as_mm = True
19
  torch._inductor.config.coordinate_descent_tuning = True
20
  torch._inductor.config.epilogue_fusion = False
@@ -49,9 +48,8 @@ def load_pipeline() -> Pipeline:
49
  torch_dtype=torch.bfloat16,
50
  )
51
 
52
- pipeline.transformer.to(memory_format=torch.channels_last)
53
  # quantize_(pipeline.vae, int8_weight_only())
54
- pipeline.vae.to(memory_format=torch.channels_last)
55
  pipeline.vae = torch.compile(pipeline.vae, mode="max-autotune", fullgraph=True)
56
  pipeline.to("cuda")
57
 
 
14
 
15
  Pipeline: TypeAlias = FluxPipeline
16
  torch.backends.cudnn.benchmark = True
 
17
  torch._inductor.config.conv_1x1_as_mm = True
18
  torch._inductor.config.coordinate_descent_tuning = True
19
  torch._inductor.config.epilogue_fusion = False
 
48
  torch_dtype=torch.bfloat16,
49
  )
50
 
51
+ pipeline.to(memory_format=torch.channels_last)
52
  # quantize_(pipeline.vae, int8_weight_only())
 
53
  pipeline.vae = torch.compile(pipeline.vae, mode="max-autotune", fullgraph=True)
54
  pipeline.to("cuda")
55