yeq6x commited on
Commit
3f17ce4
·
1 Parent(s): bb35a51
Files changed (1) hide show
  1. app.py +22 -26
app.py CHANGED
@@ -107,29 +107,6 @@ def predict_image(cond_image, prompt, negative_prompt, controlnet_conditioning_s
107
 
108
  return image
109
 
110
- # block = gr.Blocks().queue()
111
-
112
- # with block:
113
- # with gr.Row():
114
- # with gr.Column():
115
- # input_image = gr.Image(sources=None, type="pil") # None for upload, ctrl+v and webcam
116
- # prompt = gr.Textbox(label="Prompt")
117
- # negative_prompt = gr.Textbox(label="Negative prompt", value="Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers")
118
- # num_steps = gr.Slider(label="Number of steps", minimum=25, maximum=100, value=50, step=1)
119
- # controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=1.0, step=0.05)
120
- # seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True,)
121
- # run_button = gr.Button(value="Run")
122
-
123
- # with gr.Column():
124
- # with gr.Row():
125
- # pose_image_output = gr.Image(label="Pose Image", type="pil", interactive=False)
126
- # generated_image_output = gr.Image(label="Generated Image", type="pil", interactive=False)
127
-
128
- # ips = [input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed]
129
- # run_button.click(fn=process, inputs=ips, outputs=[pose_image_output, generated_image_output])
130
-
131
-
132
- # block.launch(debug = True)
133
 
134
  def convert_pil_to_opencv(pil_image):
135
  return np.array(pil_image)
@@ -243,8 +220,27 @@ def outpaint_image(image):
243
 
244
  return image
245
 
246
- # Gradioアプリケーション
247
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  gr.Markdown("## Position Map Visualizer")
249
 
250
  with gr.Row():
@@ -271,4 +267,4 @@ with gr.Blocks() as demo:
271
  predict_map_btn.click(predict_image, inputs=[img1, prompt, negative_prompt, controlnet_conditioning_scale], outputs=img2)
272
  visualize_3d_btn.click(visualize_3d, inputs=[img2, img1], outputs=reconstruction_output)
273
 
274
- demo.launch()
 
107
 
108
  return image
109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
  def convert_pil_to_opencv(pil_image):
112
  return np.array(pil_image)
 
220
 
221
  return image
222
 
223
+ block = gr.Blocks().queue()
224
+
225
+ with block:
226
+ with gr.Row():
227
+ with gr.Column():
228
+ input_image = gr.Image(sources=None, type="pil") # None for upload, ctrl+v and webcam
229
+ prompt = gr.Textbox(label="Prompt")
230
+ negative_prompt = gr.Textbox(label="Negative prompt", value="Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers")
231
+ num_steps = gr.Slider(label="Number of steps", minimum=25, maximum=100, value=50, step=1)
232
+ controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=1.0, step=0.05)
233
+ seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True,)
234
+ run_button = gr.Button(value="Run")
235
+
236
+ with gr.Column():
237
+ with gr.Row():
238
+ pose_image_output = gr.Image(label="Pose Image", type="pil", interactive=False)
239
+ generated_image_output = gr.Image(label="Generated Image", type="pil", interactive=False)
240
+
241
+ ips = [input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed]
242
+ run_button.click(fn=process, inputs=ips, outputs=[pose_image_output, generated_image_output])
243
+
244
  gr.Markdown("## Position Map Visualizer")
245
 
246
  with gr.Row():
 
267
  predict_map_btn.click(predict_image, inputs=[img1, prompt, negative_prompt, controlnet_conditioning_scale], outputs=img2)
268
  visualize_3d_btn.click(visualize_3d, inputs=[img2, img1], outputs=reconstruction_output)
269
 
270
+ block.launch(debug = True)