Manjushri commited on
Commit
9cfb47a
·
verified ·
1 Parent(s): 7826c41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -12,14 +12,14 @@ torch.cuda.max_memory_allocated(device=device)
12
  torch.cuda.empty_cache()
13
  pipe = StableCascadeCombinedPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16)
14
  pipe.enable_model_cpu_offload()
15
- #pipe.enable_xformers_memory_efficient_attention()
16
- #pipe = pipe.to(device)
17
  torch.cuda.empty_cache()
18
 
19
 
20
  def genie (Prompt, negative_prompt, height, width, scale, steps, seed, upscale):
21
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
22
- #torch.cuda.empty_cache()
23
  image=pipe(prompt=Prompt,
24
  negative_prompt="",
25
  num_inference_steps=20,
@@ -30,7 +30,7 @@ def genie (Prompt, negative_prompt, height, width, scale, steps, seed, upscale):
30
  #int_image = pipe(prompt=Prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=scale, width=width, height=height, output_type="latent").images #
31
  #torch.cuda.empty_cache()
32
  #image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=.99).images[0]
33
- #torch.cuda.empty_cache()
34
  return image
35
 
36
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
 
12
  torch.cuda.empty_cache()
13
  pipe = StableCascadeCombinedPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16)
14
  pipe.enable_model_cpu_offload()
15
+ pipe.enable_xformers_memory_efficient_attention()
16
+ pipe = pipe.to(device)
17
  torch.cuda.empty_cache()
18
 
19
 
20
  def genie (Prompt, negative_prompt, height, width, scale, steps, seed, upscale):
21
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
22
+ torch.cuda.empty_cache()
23
  image=pipe(prompt=Prompt,
24
  negative_prompt="",
25
  num_inference_steps=20,
 
30
  #int_image = pipe(prompt=Prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=scale, width=width, height=height, output_type="latent").images #
31
  #torch.cuda.empty_cache()
32
  #image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=.99).images[0]
33
+ torch.cuda.empty_cache()
34
  return image
35
 
36
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),