jokerbit commited on
Commit
a6270be
·
verified ·
1 Parent(s): 8c32573

Upload src/pipeline.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/pipeline.py +10 -9
src/pipeline.py CHANGED
@@ -10,19 +10,20 @@ from pipelines.models import TextToImageRequest
10
  from torch import Generator
11
  from torchao.quantization import quantize_, int8_weight_only
12
  from transformers import T5EncoderModel, CLIPTextModel, logging
13
-
 
14
 
15
  Pipeline: TypeAlias = FluxPipeline
16
 
17
- #torch.backends.cudnn.benchmark = True
18
- #torch._inductor.config.conv_1x1_as_mm = True
19
- #torch._inductor.config.coordinate_descent_tuning = True
20
- #torch._inductor.config.epilogue_fusion = False
21
- # torch._inductor.config.coordinate_descent_check_all_directions = True
22
 
23
 
24
  os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
25
- os.environ["TOKENIZERS_PARALLELISM"] = "True"
26
  CHECKPOINT = "jokerbit/flux.1-schnell-Robert-int8wo"
27
  REVISION = "5ef0012f11a863e5111ec56540302a023bc8587b"
28
 
@@ -47,7 +48,7 @@ def load_pipeline() -> Pipeline:
47
  ).to("cuda")
48
 
49
  pipeline.transformer.to(memory_format=torch.channels_last)
50
- pipeline.transformer = torch.compile(pipeline.transformer, fullgraph=True)
51
  # pipeline.vae.to(memory_format=torch.channels_last)
52
  # quantize_(pipeline.vae, int8_weight_only())
53
  # pipeline.vae = torch.compile(pipeline.vae, fullgraph=True, mode="max-autotune")
@@ -55,7 +56,7 @@ def load_pipeline() -> Pipeline:
55
  PROMPT = 'semiconformity, peregrination, quip, twineless, emotionless, tawa, depickle'
56
 
57
  for _ in range(4):
58
- pipeline(PROMPT, max_sequence_length=256, height=1024, width=1024, num_inference_steps=4, guidance_scale=0.0)
59
 
60
  return pipeline
61
 
 
10
  from torch import Generator
11
  from torchao.quantization import quantize_, int8_weight_only
12
  from transformers import T5EncoderModel, CLIPTextModel, logging
13
+ import torch._dynamo
14
+ torch._dynamo.config.suppress_errors = True
15
 
16
  Pipeline: TypeAlias = FluxPipeline
17
 
18
+ torch.backends.cudnn.benchmark = True
19
+ torch._inductor.config.conv_1x1_as_mm = True
20
+ torch._inductor.config.coordinate_descent_tuning = True
21
+ torch._inductor.config.epilogue_fusion = False
22
+ torch._inductor.config.coordinate_descent_check_all_directions = True
23
 
24
 
25
  os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
26
+ # os.environ["TOKENIZERS_PARALLELISM"] = "True"
27
  CHECKPOINT = "jokerbit/flux.1-schnell-Robert-int8wo"
28
  REVISION = "5ef0012f11a863e5111ec56540302a023bc8587b"
29
 
 
48
  ).to("cuda")
49
 
50
  pipeline.transformer.to(memory_format=torch.channels_last)
51
+ pipeline.transformer = torch.compile(pipeline.transformer, mode="reduce-overhead")
52
  # pipeline.vae.to(memory_format=torch.channels_last)
53
  # quantize_(pipeline.vae, int8_weight_only())
54
  # pipeline.vae = torch.compile(pipeline.vae, fullgraph=True, mode="max-autotune")
 
56
  PROMPT = 'semiconformity, peregrination, quip, twineless, emotionless, tawa, depickle'
57
 
58
  for _ in range(4):
59
+ pipeline(prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
60
 
61
  return pipeline
62