prithivMLmods commited on
Commit
b3a3e40
·
verified ·
1 Parent(s): 2df2030

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +176 -407
app.py CHANGED
@@ -1,413 +1,182 @@
1
  import gradio as gr
2
- import spaces
 
 
3
  import torch
4
- from diffusers import AutoencoderKL, TCDScheduler
5
- from diffusers.models.model_loading_utils import load_state_dict
6
- from huggingface_hub import hf_hub_download
7
-
8
- from controlnet_union import ControlNetModel_Union
9
- from pipeline_fill_sd_xl import StableDiffusionXLFillPipeline
10
-
11
- from PIL import Image, ImageDraw
12
  import numpy as np
13
-
14
- config_file = hf_hub_download(
15
- "xinsir/controlnet-union-sdxl-1.0",
16
- filename="config_promax.json",
17
- )
18
-
19
- config = ControlNetModel_Union.load_config(config_file)
20
- controlnet_model = ControlNetModel_Union.from_config(config)
21
- model_file = hf_hub_download(
22
- "xinsir/controlnet-union-sdxl-1.0",
23
- filename="diffusion_pytorch_model_promax.safetensors",
24
- )
25
-
26
- sstate_dict = load_state_dict(model_file)
27
- model, _, _, _, _ = ControlNetModel_Union._load_pretrained_model(
28
- controlnet_model, sstate_dict, model_file, "xinsir/controlnet-union-sdxl-1.0"
29
  )
30
- model.to(device="cuda", dtype=torch.float16)
31
-
32
- vae = AutoencoderKL.from_pretrained(
33
- "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
34
- ).to("cuda")
35
-
36
- # Initialize both pipelines and store them in a dictionary
37
- pipelines = {
38
- "RealVisXL V5.0 Lightning": StableDiffusionXLFillPipeline.from_pretrained(
39
- "SG161222/RealVisXL_V5.0_Lightning",
40
- torch_dtype=torch.float16,
41
- vae=vae,
42
- controlnet=model,
43
- variant="fp16",
44
- ).to("cuda"),
45
- "RealVisXL V4.0 Lightning": StableDiffusionXLFillPipeline.from_pretrained(
46
- "SG161222/RealVisXL_V4.0_Lightning",
47
- torch_dtype=torch.float16,
48
- vae=vae,
49
- controlnet=model,
50
- variant="fp16",
51
- ).to("cuda"),
52
- }
53
-
54
- for pipe in pipelines.values():
55
- pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
56
-
57
-
58
- def prepare_image_and_mask(image, width, height, overlap_percentage, resize_option, custom_resize_percentage, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom):
59
- target_size = (width, height)
60
-
61
- # Calculate the scaling factor to fit the image within the target size
62
- scale_factor = min(target_size[0] / image.width, target_size[1] / image.height)
63
- new_width = int(image.width * scale_factor)
64
- new_height = int(image.height * scale_factor)
65
-
66
- # Resize the source image to fit within target size
67
- source = image.resize((new_width, new_height), Image.LANCZOS)
68
-
69
- # Apply resize option using percentages
70
- if resize_option == "Full":
71
- resize_percentage = 100
72
- elif resize_option == "50%":
73
- resize_percentage = 50
74
- elif resize_option == "33%":
75
- resize_percentage = 33
76
- elif resize_option == "25%":
77
- resize_percentage = 25
78
- else: # Custom
79
- resize_percentage = custom_resize_percentage
80
-
81
- # Calculate new dimensions based on percentage
82
- resize_factor = resize_percentage / 100
83
- new_width = int(source.width * resize_factor)
84
- new_height = int(source.height * resize_factor)
85
-
86
- # Ensure minimum size of 64 pixels
87
- new_width = max(new_width, 64)
88
- new_height = max(new_height, 64)
89
-
90
- # Resize the image
91
- source = source.resize((new_width, new_height), Image.LANCZOS)
92
-
93
- # Calculate the overlap in pixels based on the percentage
94
- overlap_x = int(new_width * (overlap_percentage / 100))
95
- overlap_y = int(new_height * (overlap_percentage / 100))
96
-
97
- # Ensure minimum overlap of 1 pixel
98
- overlap_x = max(overlap_x, 1)
99
- overlap_y = max(overlap_y, 1)
100
-
101
- # Calculate margins based on alignment
102
- if alignment == "Middle":
103
- margin_x = (target_size[0] - new_width) // 2
104
- margin_y = (target_size[1] - new_height) // 2
105
- elif alignment == "Left":
106
- margin_x = 0
107
- margin_y = (target_size[1] - new_height) // 2
108
- elif alignment == "Right":
109
- margin_x = target_size[0] - new_width
110
- margin_y = (target_size[1] - new_height) // 2
111
- elif alignment == "Top":
112
- margin_x = (target_size[0] - new_width) // 2
113
- margin_y = 0
114
- elif alignment == "Bottom":
115
- margin_x = (target_size[0] - new_width) // 2
116
- margin_y = target_size[1] - new_height
117
-
118
- # Adjust margins to eliminate gaps
119
- margin_x = max(0, min(margin_x, target_size[0] - new_width))
120
- margin_y = max(0, min(margin_y, target_size[1] - new_height))
121
-
122
- # Create a new background image and paste the resized source image
123
- background = Image.new('RGB', target_size, (255, 255, 255))
124
- background.paste(source, (margin_x, margin_y))
125
-
126
- # Create the mask
127
- mask = Image.new('L', target_size, 255)
128
- mask_draw = ImageDraw.Draw(mask)
129
-
130
- # Calculate overlap areas
131
- white_gaps_patch = 2
132
-
133
- left_overlap = margin_x + overlap_x if overlap_left else margin_x + white_gaps_patch
134
- right_overlap = margin_x + new_width - overlap_x if overlap_right else margin_x + new_width - white_gaps_patch
135
- top_overlap = margin_y + overlap_y if overlap_top else margin_y + white_gaps_patch
136
- bottom_overlap = margin_y + new_height - overlap_y if overlap_bottom else margin_y + new_height - white_gaps_patch
137
-
138
- if alignment == "Left":
139
- left_overlap = margin_x + overlap_x if overlap_left else margin_x
140
- elif alignment == "Right":
141
- right_overlap = margin_x + new_width - overlap_x if overlap_right else margin_x + new_width
142
- elif alignment == "Top":
143
- top_overlap = margin_y + overlap_y if overlap_top else margin_y
144
- elif alignment == "Bottom":
145
- bottom_overlap = margin_y + new_height - overlap_y if overlap_bottom else margin_y + new_height
146
-
147
- # Draw the mask
148
- mask_draw.rectangle([
149
- (left_overlap, top_overlap),
150
- (right_overlap, bottom_overlap)
151
- ], fill=0)
152
-
153
- return background, mask
154
-
155
- @spaces.GPU(duration=28)
156
- def infer(image, width, height, overlap_percentage, num_inference_steps, resize_option, custom_resize_percentage, prompt_input, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom, selected_model):
157
- background, mask = prepare_image_and_mask(image, width, height, overlap_percentage, resize_option, custom_resize_percentage, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom)
158
-
159
- cnet_image = background.copy()
160
- cnet_image.paste(0, (0, 0), mask)
161
-
162
- final_prompt = f"{prompt_input} , high quality, 4k"
163
-
164
- # Access the selected pipeline from the dictionary
165
- pipe = pipelines[selected_model]
166
-
167
- (
168
- prompt_embeds,
169
- negative_prompt_embeds,
170
- pooled_prompt_embeds,
171
- negative_pooled_prompt_embeds,
172
- ) = pipe.encode_prompt(final_prompt, "cuda", True)
173
-
174
- # Generate the image
175
- for image in pipe(
176
- prompt_embeds=prompt_embeds,
177
- negative_prompt_embeds=negative_prompt_embeds,
178
- pooled_prompt_embeds=pooled_prompt_embeds,
179
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
180
- image=cnet_image,
181
- num_inference_steps=num_inference_steps
182
- ):
183
- pass # Wait for the generation to complete
184
- generated_image = image # Get the last image
185
-
186
- generated_image = generated_image.convert("RGBA")
187
- cnet_image.paste(generated_image, (0, 0), mask)
188
-
189
- return cnet_image
190
-
191
-
192
- def clear_result():
193
- """Clears the result Image."""
194
- return gr.update(value=None)
195
-
196
-
197
- def preload_presets(target_ratio, ui_width, ui_height):
198
- """Updates the width and height sliders based on the selected aspect ratio."""
199
- if target_ratio == "9:16":
200
- changed_width = 720
201
- changed_height = 1280
202
- return changed_width, changed_height, gr.update()
203
- elif target_ratio == "16:9":
204
- changed_width = 1280
205
- changed_height = 720
206
- return changed_width, changed_height, gr.update()
207
- elif target_ratio == "1:1":
208
- changed_width = 1024
209
- changed_height = 1024
210
- return changed_width, changed_height, gr.update()
211
- elif target_ratio == "Custom":
212
- return ui_width, ui_height, gr.update(open=True)
213
-
214
-
215
- def select_the_right_preset(user_width, user_height):
216
- if user_width == 720 and user_height == 1280:
217
- return "9:16"
218
- elif user_width == 1280 and user_height == 720:
219
- return "16:9"
220
- elif user_width == 1024 and user_height == 1024:
221
- return "1:1"
222
  else:
223
- return "Custom"
224
-
225
-
226
- def toggle_custom_resize_slider(resize_option):
227
- return gr.update(visible=(resize_option == "Custom"))
228
-
229
-
230
- def update_history(new_image, history):
231
- """Updates the history gallery with the new image."""
232
- if history is None:
233
- history = []
234
- history.insert(0, new_image)
235
- return history
236
-
237
- # CSS and title (unchanged)
238
- css = """
239
- h1 {
240
- text-align: center;
241
- display: block;
242
- }
243
- """
244
-
245
- title = """<h1>Image Outpaint Expand 🪃</h1>"""
246
-
247
- with gr.Blocks(css=css) as demo:
248
- with gr.Column():
249
- gr.HTML(title)
250
-
251
- with gr.Row():
252
- with gr.Column():
253
- input_image = gr.Image(
254
- type="pil",
255
- label="Input Image"
256
- )
257
-
258
- with gr.Row():
259
- with gr.Column(scale=2):
260
- prompt_input = gr.Textbox(label="Prompt (Optional)")
261
- with gr.Column(scale=1):
262
- run_button = gr.Button("Re-Generate Image / Diffusers Outpaint Image Lightning / Lightning v4, v5", elem_classes="submit-btn")
263
-
264
- with gr.Row():
265
- model_selector = gr.Dropdown(
266
- label="Select Model",
267
- choices=list(pipelines.keys()),
268
- value="RealVisXL V5.0 Lightning",
269
- )
270
-
271
- with gr.Row():
272
- target_ratio = gr.Radio(
273
- label="Expected Ratio",
274
- choices=["9:16", "16:9", "1:1", "Custom"],
275
- value="9:16",
276
- scale=2
277
- )
278
-
279
- alignment_dropdown = gr.Dropdown(
280
- choices=["Middle", "Left", "Right", "Top", "Bottom"],
281
- value="Middle",
282
- label="Alignment"
283
- )
284
-
285
- with gr.Accordion(label="Advanced settings", open=False) as settings_panel:
286
- with gr.Column():
287
- with gr.Row():
288
- width_slider = gr.Slider(
289
- label="Width",
290
- minimum=720,
291
- maximum=1536,
292
- step=8,
293
- value=720,
294
- )
295
- height_slider = gr.Slider(
296
- label="Height",
297
- minimum=720,
298
- maximum=1536,
299
- step=8,
300
- value=1280,
301
- )
302
-
303
- num_inference_steps = gr.Slider(label="Steps", minimum=4, maximum=12, step=1, value=8)
304
- with gr.Group():
305
- overlap_percentage = gr.Slider(
306
- label="Mask overlap (%)",
307
- minimum=1,
308
- maximum=50,
309
- value=10,
310
- step=1
311
- )
312
- with gr.Row():
313
- overlap_top = gr.Checkbox(label="Overlap Top", value=True)
314
- overlap_right = gr.Checkbox(label="Overlap Right", value=True)
315
- with gr.Row():
316
- overlap_left = gr.Checkbox(label="Overlap Left", value=True)
317
- overlap_bottom = gr.Checkbox(label="Overlap Bottom", value=True)
318
- with gr.Row():
319
- resize_option = gr.Radio(
320
- label="Resize input image",
321
- #choices=["Full", "50%", "33%", "25%", "Custom"],
322
- choices=["Full", "50%", "33%", "25%", "Custom"],
323
- value="Full"
324
- )
325
- custom_resize_percentage = gr.Slider(
326
- label="Custom resize (%)",
327
- minimum=1,
328
- maximum=100,
329
- step=1,
330
- value=50,
331
- visible=False
332
- )
333
-
334
- gr.Examples(
335
- examples=[
336
- ["./examples/3.jpg", 1024, 1024, "Top"],
337
- ["./examples/4.jpg", 1024, 1024, "Middle"],
338
- ["./examples/2.png", 720, 1280, "Left"],
339
- ["./examples/1.png", 1280, 720, "Bottom"],
340
- ["./examples/5.jpg", 1024, 1024, "Bottom"],
341
- ],
342
- inputs=[input_image, width_slider, height_slider, alignment_dropdown],
343
- )
344
-
345
- with gr.Column():
346
- result = gr.Image(
347
- interactive=False,
348
- label="Generated Image",
349
- format="png",
350
- )
351
- history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
352
-
353
- target_ratio.change(
354
- fn=preload_presets,
355
- inputs=[target_ratio, width_slider, height_slider],
356
- outputs=[width_slider, height_slider, settings_panel],
357
- queue=False
358
- )
359
-
360
- width_slider.change(
361
- fn=select_the_right_preset,
362
- inputs=[width_slider, height_slider],
363
- outputs=[target_ratio],
364
- queue=False
365
- )
366
-
367
- height_slider.change(
368
- fn=select_the_right_preset,
369
- inputs=[width_slider, height_slider],
370
- outputs=[target_ratio],
371
- queue=False
372
- )
373
-
374
- resize_option.change(
375
- fn=toggle_custom_resize_slider,
376
- inputs=[resize_option],
377
- outputs=[custom_resize_percentage],
378
- queue=False
379
- )
380
-
381
- run_button.click(
382
- fn=clear_result,
383
- inputs=None,
384
- outputs=result,
385
- ).then(
386
- fn=infer,
387
- inputs=[input_image, width_slider, height_slider, overlap_percentage, num_inference_steps,
388
- resize_option, custom_resize_percentage, prompt_input, alignment_dropdown,
389
- overlap_left, overlap_right, overlap_top, overlap_bottom, model_selector],
390
- outputs=result,
391
- ).then(
392
- fn=lambda x, history: update_history(x, history),
393
- inputs=[result, history_gallery],
394
- outputs=history_gallery,
395
- )
396
-
397
- prompt_input.submit(
398
- fn=clear_result,
399
- inputs=None,
400
- outputs=result,
401
- ).then(
402
- fn=infer,
403
- inputs=[input_image, width_slider, height_slider, overlap_percentage, num_inference_steps,
404
- resize_option, custom_resize_percentage, prompt_input, alignment_dropdown,
405
- overlap_left, overlap_right, overlap_top, overlap_bottom, model_selector],
406
- outputs=result,
407
- ).then(
408
- fn=lambda x, history: update_history(x, history),
409
- inputs=[result, history_gallery],
410
- outputs=history_gallery,
411
- )
412
 
413
- demo.queue(max_size=20).launch(share=False, ssr_mode=False, show_error=True)
 
1
  import gradio as gr
2
+ from transformers.image_utils import load_image
3
+ from threading import Thread
4
+ import time
5
  import torch
6
+ import spaces
7
+ import cv2
 
 
 
 
 
 
8
  import numpy as np
9
+ from PIL import Image
10
+ from transformers import (
11
+ Qwen2VLForConditionalGeneration,
12
+ AutoProcessor,
13
+ TextIteratorStreamer,
 
 
 
 
 
 
 
 
 
 
 
14
  )
15
+ from transformers import Qwen2_5_VLForConditionalGeneration
16
+ from pdf2image import convert_from_path
17
+
18
+ # Helper Functions
19
+ def progress_bar_html(label: str, primary_color: str = "#4B0082", secondary_color: str = "#9370DB") -> str:
20
+ """
21
+ Returns an HTML snippet for a thin animated progress bar with a label.
22
+ Colors can be customized; default colors are used for Qwen2VL/Aya‑Vision.
23
+ """
24
+ return f'''
25
+ <div style="display: flex; align-items: center;">
26
+ <span style="margin-right: 10px; font-size: 14px;">{label}</span>
27
+ <div style="width: 110px; height: 5px; background-color: {secondary_color}; border-radius: 2px; overflow: hidden;">
28
+ <div style="width: 100%; height: 100%; background-color: {primary_color}; animation: loading 1.5s linear infinite;"></div>
29
+ </div>
30
+ </div>
31
+ <style>
32
+ @keyframes loading {{
33
+ 0% {{ transform: translateX(-100%); }}
34
+ 100% {{ transform: translateX(100%); }}
35
+ }}
36
+ </style>
37
+ '''
38
+
39
+ def downsample_video(video_path):
40
+ """
41
+ Downsamples a video file by extracting 10 evenly spaced frames.
42
+ Returns a list of tuples (PIL.Image, timestamp).
43
+ """
44
+ vidcap = cv2.VideoCapture(video_path)
45
+ total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
46
+ fps = vidcap.get(cv2.CAP_PROP_FPS)
47
+ frames = []
48
+ if total_frames <= 0 or fps <= 0:
49
+ vidcap.release()
50
+ return frames
51
+ frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)
52
+ for i in frame_indices:
53
+ vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
54
+ success, image = vidcap.read()
55
+ if success:
56
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
57
+ pil_image = Image.fromarray(image)
58
+ timestamp = round(i / fps, 2)
59
+ frames.append((pil_image, timestamp))
60
+ vidcap.release()
61
+ return frames
62
+
63
+ # Model and Processor Setup
64
+ QV_MODEL_ID = "prithivMLmods/Qwen2-VL-OCR-2B-Instruct"
65
+ qwen_processor = AutoProcessor.from_pretrained(QV_MODEL_ID, trust_remote_code=True)
66
+ qwen_model = Qwen2VLForConditionalGeneration.from_pretrained(
67
+ QV_MODEL_ID,
68
+ trust_remote_code=True,
69
+ torch_dtype=torch.float16
70
+ ).to("cuda").eval()
71
+
72
+ ROLMOCR_MODEL_ID = "reducto/RolmOCR"
73
+ rolmocr_processor = AutoProcessor.from_pretrained(ROLMOCR_MODEL_ID, trust_remote_code=True)
74
+ rolmocr_model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
75
+ ROLMOCR_MODEL_ID,
76
+ trust_remote_code=True,
77
+ torch_dtype=torch.bfloat16
78
+ ).to("cuda").eval()
79
+
80
+ # Main Inference Function
81
+ @spaces.GPU
82
+ def model_inference(message, history, use_rolmocr):
83
+ text = message["text"].strip()
84
+ files = message.get("files", [])
85
+
86
+ if not text and not files:
87
+ yield "Error: Please input a text query or provide files (images, videos, PDFs)."
88
+ return
89
+
90
+ # Process files: images, videos, PDFs
91
+ image_list = []
92
+ for idx, file in enumerate(files):
93
+ if file.lower().endswith(".pdf"):
94
+ try:
95
+ pdf_images = convert_from_path(file)
96
+ for page_num, img in enumerate(pdf_images, start=1):
97
+ label = f"PDF {idx+1} Page {page_num}:"
98
+ image_list.append((label, img))
99
+ except Exception as e:
100
+ yield f"Error converting PDF: {str(e)}"
101
+ return
102
+ elif file.lower().endswith((".mp4", ".avi", ".mov")):
103
+ frames = downsample_video(file)
104
+ if not frames:
105
+ yield "Error: Could not extract frames from the video."
106
+ return
107
+ for frame, timestamp in frames:
108
+ label = f"Video {idx+1} Frame {timestamp}:"
109
+ image_list.append((label, frame))
110
+ else:
111
+ try:
112
+ img = load_image(file)
113
+ label = f"Image {idx+1}:"
114
+ image_list.append((label, img))
115
+ except Exception as e:
116
+ yield f"Error loading image: {str(e)}"
117
+ return
118
+
119
+ # Build content list
120
+ content = [{"type": "text", "text": text}]
121
+ for label, img in image_list:
122
+ content.append({"type": "text", "text": label})
123
+ content.append({"type": "image", "image": img})
124
+
125
+ messages = [{"role": "user", "content": content}]
126
+
127
+ # Select processor and model
128
+ if use_rolmocr:
129
+ processor = rolmocr_processor
130
+ model = rolmocr_model
131
+ model_name = "RolmOCR"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  else:
133
+ processor = qwen_processor
134
+ model = qwen_model
135
+ model_name = "Qwen2VL OCR"
136
+
137
+ prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
138
+ all_images = [item["image"] for item in content if item["type"] == "image"]
139
+ inputs = processor(
140
+ text=[prompt_full],
141
+ images=all_images if all_images else None,
142
+ return_tensors="pt",
143
+ padding=True,
144
+ ).to("cuda")
145
+
146
+ streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
147
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
148
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
149
+ thread.start()
150
+ buffer = ""
151
+ yield progress_bar_html(f"Processing with {model_name}")
152
+ for new_text in streamer:
153
+ buffer += new_text
154
+ buffer = buffer.replace("<|im_end|>", "")
155
+ time.sleep(0.01)
156
+ yield buffer
157
+
158
+ # Gradio Interface
159
+ examples = [
160
+ [{"text": "OCR the Text in the Image", "files": ["rolm/1.jpeg"]}],
161
+ [{"text": "Explain the Ad in Detail", "files": ["examples/videoplayback.mp4"]}],
162
+ [{"text": "OCR the Image", "files": ["rolm/3.jpeg"]}],
163
+ [{"text": "Extract as JSON table from the table", "files": ["examples/4.jpg"]}],
164
+ ]
165
+
166
+ demo = gr.ChatInterface(
167
+ fn=model_inference,
168
+ description="# **Multimodal OCR with Model Selection**",
169
+ examples=examples,
170
+ textbox=gr.MultimodalTextbox(
171
+ label="Query Input",
172
+ file_types=["image", "video", "pdf"],
173
+ file_count="multiple",
174
+ placeholder="Input your query and optionally upload image(s), video(s), or PDF(s). Select the model using the checkbox."
175
+ ),
176
+ stop_btn="Stop Generation",
177
+ multimodal=True,
178
+ cache_examples=False,
179
+ additional_inputs=[gr.Checkbox(label="Use RolmOCR", value=True)],
180
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
+ demo.launch(debug=True)