craftgamesnetwork commited on
Commit
31908a7
·
verified ·
1 Parent(s): 4a11c01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -4
app.py CHANGED
@@ -61,27 +61,23 @@ def generate(
61
  if not use_img2img:
62
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
  pipe.enable_model_cpu_offload()
64
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
65
  pipe.unet.set_default_attn_processor()
66
 
67
  if use_vae:
68
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
69
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
70
  pipe.enable_model_cpu_offload()
71
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
72
  pipe.unet.set_default_attn_processor()
73
 
74
  if use_img2img:
75
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
76
  pipe.enable_model_cpu_offload()
77
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
78
  pipe.unet.set_default_attn_processor()
79
 
80
  if use_vae:
81
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
82
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
83
  pipe.enable_model_cpu_offload()
84
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
85
  pipe.unet.set_default_attn_processor()
86
 
87
  response = requests.get(url)
 
61
  if not use_img2img:
62
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
  pipe.enable_model_cpu_offload()
 
64
  pipe.unet.set_default_attn_processor()
65
 
66
  if use_vae:
67
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
68
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
69
  pipe.enable_model_cpu_offload()
 
70
  pipe.unet.set_default_attn_processor()
71
 
72
  if use_img2img:
73
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
74
  pipe.enable_model_cpu_offload()
 
75
  pipe.unet.set_default_attn_processor()
76
 
77
  if use_vae:
78
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
79
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
80
  pipe.enable_model_cpu_offload()
 
81
  pipe.unet.set_default_attn_processor()
82
 
83
  response = requests.get(url)