jokerbit commited on
Commit
cedf637
·
verified ·
1 Parent(s): c3810c7

Upload src/pipeline.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/pipeline.py +4 -4
src/pipeline.py CHANGED
@@ -55,17 +55,17 @@ def load_pipeline() -> Pipeline:
55
 
56
  pipeline.to(memory_format=torch.channels_last)
57
  pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune", fullgraph=False)
58
- quantize_(pipeline.vae, int8_weight_only())
59
- pipeline.vae = torch.compile(pipeline.vae, fullgraph=True, mode="max-autotune")
60
 
61
  PROMPT = 'semiconformity, peregrination, quip, twineless, emotionless, tawa, depickle'
62
- with torch.inference_mode():
63
  for _ in range(4):
64
  pipeline(prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
65
  torch.cuda.empty_cache()
66
  return pipeline
67
 
68
- @torch.inference_mode()
69
  def infer(request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator) -> Image:
70
 
71
  return pipeline(
 
55
 
56
  pipeline.to(memory_format=torch.channels_last)
57
  pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune", fullgraph=False)
58
+ # quantize_(pipeline.vae, int8_weight_only())
59
+ # pipeline.vae = torch.compile(pipeline.vae, fullgraph=True, mode="max-autotune")
60
 
61
  PROMPT = 'semiconformity, peregrination, quip, twineless, emotionless, tawa, depickle'
62
+ with torch.no_grad():
63
  for _ in range(4):
64
  pipeline(prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
65
  torch.cuda.empty_cache()
66
  return pipeline
67
 
68
+ @torch.no_grad()
69
  def infer(request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator) -> Image:
70
 
71
  return pipeline(