craftgamesnetwork commited on
Commit
4a11c01
·
verified ·
1 Parent(s): e27d4af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -3
app.py CHANGED
@@ -59,18 +59,30 @@ def generate(
59
  if torch.cuda.is_available():
60
 
61
  if not use_img2img:
62
- pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
 
 
 
63
 
64
  if use_vae:
65
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
66
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
 
 
 
67
 
68
  if use_img2img:
69
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
70
-
 
 
 
71
  if use_vae:
72
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
73
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
 
 
 
74
 
75
  response = requests.get(url)
76
  init_image = Image.open(BytesIO(response.content)).convert("RGB")
@@ -82,7 +94,6 @@ def generate(
82
 
83
  else:
84
  pipe.to(device)
85
- pipe.unet.set_default_attn_processor()
86
 
87
  generator = torch.Generator().manual_seed(seed)
88
 
 
59
  if torch.cuda.is_available():
60
 
61
  if not use_img2img:
62
+ pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
+ pipe.enable_model_cpu_offload()
64
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
65
+ pipe.unet.set_default_attn_processor()
66
 
67
  if use_vae:
68
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
69
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
70
+ pipe.enable_model_cpu_offload()
71
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
72
+ pipe.unet.set_default_attn_processor()
73
 
74
  if use_img2img:
75
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
76
+ pipe.enable_model_cpu_offload()
77
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
78
+ pipe.unet.set_default_attn_processor()
79
+
80
  if use_vae:
81
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
82
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
83
+ pipe.enable_model_cpu_offload()
84
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
85
+ pipe.unet.set_default_attn_processor()
86
 
87
  response = requests.get(url)
88
  init_image = Image.open(BytesIO(response.content)).convert("RGB")
 
94
 
95
  else:
96
  pipe.to(device)
 
97
 
98
  generator = torch.Generator().manual_seed(seed)
99