Manjushri commited on
Commit
bde16a7
·
verified ·
1 Parent(s): f69302e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -10
app.py CHANGED
@@ -13,24 +13,18 @@ pipe = StableCascadeCombinedPipeline.from_pretrained("stabilityai/stable-cascade
13
  pipe.enable_xformers_memory_efficient_attention()
14
  pipe = pipe.to(device)
15
  torch.cuda.empty_cache()
16
- upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
17
- upscaler.enable_xformers_memory_efficient_attention()
18
- upscaler = upscaler.to(device)
19
 
20
  def genie (Prompt, negative_prompt, height, width, scale, steps, seed, upscale):
21
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
22
  torch.cuda.empty_cache()
23
  image = pipe(prompt=Prompt, negative_prompt=negative_prompt, num_inference_steps=15, prior_num_inference_steps=steps, prior_guidance_scale=scale, width=width, height=height).images[0]
24
- if upscale == 'Yes':
25
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
26
- return upscaled
27
- else:
28
- return image
29
 
30
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
31
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
32
- gr.Slider(512, 1280, 768, step=128, label='Height'),
33
- gr.Slider(512, 1280, 768, step=128, label='Width'),
34
  gr.Slider(1, maximum=15, value=5, step=.25, label='Guidance Scale'),
35
  gr.Slider(5, maximum=100, value=50, step=5, label='Number of Iterations'),
36
  gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random'),
 
13
  pipe.enable_xformers_memory_efficient_attention()
14
  pipe = pipe.to(device)
15
  torch.cuda.empty_cache()
16
+
 
 
17
 
18
  def genie (Prompt, negative_prompt, height, width, scale, steps, seed, upscale):
19
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
20
  torch.cuda.empty_cache()
21
  image = pipe(prompt=Prompt, negative_prompt=negative_prompt, num_inference_steps=15, prior_num_inference_steps=steps, prior_guidance_scale=scale, width=width, height=height).images[0]
22
+ return image
 
 
 
 
23
 
24
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
25
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
26
+ gr.Slider(512, 2048, 768, step=128, label='Height'),
27
+ gr.Slider(512, 2048, 768, step=128, label='Width'),
28
  gr.Slider(1, maximum=15, value=5, step=.25, label='Guidance Scale'),
29
  gr.Slider(5, maximum=100, value=50, step=5, label='Number of Iterations'),
30
  gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random'),