piyk commited on
Commit
0d464c8
·
verified ·
1 Parent(s): 3b6278a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -80,7 +80,7 @@ with gr.Blocks(css=css) as demo:
80
  with gr.Row():
81
  prompt = gr.Textbox(label="Prompt", show_label=False, info="", placeholder="Describe the image you want")
82
  run_button = gr.Button("Generate", scale=0)
83
- result = gr.Gallery(label="Generated AI Images", elem_id="gallery")
84
  with gr.Accordion("Advanced options", open=False):
85
  with gr.Row():
86
  num_inference_steps = gr.Slider(label="Number of Inference Steps", info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference", minimum=1, maximum=50, value=25, step=1)
@@ -96,7 +96,7 @@ with gr.Blocks(css=css) as demo:
96
  examples=examples,
97
  fn=generate_image,
98
  inputs=[prompt, num_inference_steps, height, width, guidance_scale, seed, num_images_per_prompt],
99
- outputs=[result],
100
  cache_examples=CACHE_EXAMPLES
101
  )
102
 
@@ -107,7 +107,7 @@ with gr.Blocks(css=css) as demo:
107
  ],
108
  fn=generate_image,
109
  inputs=[prompt, num_inference_steps, height, width, guidance_scale, seed, num_images_per_prompt],
110
- outputs=[result],
111
  )
112
 
113
  demo.queue().launch(share=False)
 
80
  with gr.Row():
81
  prompt = gr.Textbox(label="Prompt", show_label=False, info="", placeholder="Describe the image you want")
82
  run_button = gr.Button("Generate", scale=0)
83
+ resultf = gr.Gallery(label="Generated AI Images", elem_id="gallery")
84
  with gr.Accordion("Advanced options", open=False):
85
  with gr.Row():
86
  num_inference_steps = gr.Slider(label="Number of Inference Steps", info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference", minimum=1, maximum=50, value=25, step=1)
 
96
  examples=examples,
97
  fn=generate_image,
98
  inputs=[prompt, num_inference_steps, height, width, guidance_scale, seed, num_images_per_prompt],
99
+ outputs=[resultf],
100
  cache_examples=CACHE_EXAMPLES
101
  )
102
 
 
107
  ],
108
  fn=generate_image,
109
  inputs=[prompt, num_inference_steps, height, width, guidance_scale, seed, num_images_per_prompt],
110
+ outputs=[resultf],
111
  )
112
 
113
  demo.queue().launch(share=False)