Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -24,7 +24,7 @@ else:
|
|
24 |
def genie (prompt, negative_prompt, height, width, scale, steps, seed, prompt_2, negative_prompt_2):
|
25 |
generator = torch.Generator(device=device).manual_seed(seed)
|
26 |
int_image = pipe(prompt, prompt_2=prompt_2, negative_prompt_2=negative_prompt_2, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, num_images_per_prompt=1, generator=generator, output_type="latent").images
|
27 |
-
image = refiner(prompt=prompt, negative_prompt=negative_prompt, image=int_image).images[0]
|
28 |
return image
|
29 |
|
30 |
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
|
|
24 |
def genie (prompt, negative_prompt, height, width, scale, steps, seed, prompt_2, negative_prompt_2):
|
25 |
generator = torch.Generator(device=device).manual_seed(seed)
|
26 |
int_image = pipe(prompt, prompt_2=prompt_2, negative_prompt_2=negative_prompt_2, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, num_images_per_prompt=1, generator=generator, output_type="latent").images
|
27 |
+
image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image).images[0]
|
28 |
return image
|
29 |
|
30 |
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|