acecalisto3 commited on
Commit
e487958
·
verified ·
1 Parent(s): 8561cc8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -313
app.py CHANGED
@@ -3,15 +3,17 @@ import subprocess
3
  import random
4
  from huggingface_hub import InferenceClient
5
  import gradio as gr
6
- from safe_search import safe_search # Make sure you have this function defined
7
  from i_search import google
8
  from i_search import i_search as i_s
9
  from datetime import datetime
10
  import logging
11
  import json
12
- import nltk # Import nltk for the generate_text_chunked function
 
13
 
14
- nltk.download('punkt') # Download the punkt tokenizer if you haven't already
 
15
 
16
  now = datetime.now()
17
  date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
@@ -32,11 +34,9 @@ agents = [
32
  "AI_SYSTEM_PROMPT",
33
  "PYTHON_CODE_DEV"
34
  ]
35
- ############################################
36
 
37
  VERBOSE = True
38
  MAX_HISTORY = 5
39
- # MODEL = "gpt-3.5-turbo" # "gpt-4"
40
 
41
  PREFIX = """
42
  {date_time_str}
@@ -82,14 +82,14 @@ What do you want to know about the test results?
82
  thought:
83
  """
84
 
85
- def format_prompt(message, history, max_history_turns=2):
86
- prompt = "<s>"
87
- # Keep only the last 'max_history_turns' turns
88
- for user_prompt, bot_response in history[-max_history_turns:]:
89
- prompt += f"[INST] {user_prompt} [/INST]"
90
- prompt += f" {bot_response}</s> "
91
- prompt += f"[INST] {message} [/INST]"
92
- return prompt
93
 
94
  def run_gpt(
95
  prompt_template,
@@ -111,12 +111,31 @@ def run_gpt(
111
 
112
  resp = client.text_generation(content, max_new_tokens=max_tokens, stop_sequences=stop_tokens, temperature=0.7, top_p=0.8, repetition_penalty=1.5)
113
  if VERBOSE:
114
- logging.info(LOG_RESPONSE.format(resp)) # Log the response
115
  return resp
116
 
117
- def generate(prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.7, max_new_tokens=2048, top_p=0.8, repetition_penalty=1.5, model="mistralai/Mixtral-8x7B-Instruct-v0.1"):
118
- # Use 'prompt' here instead of 'message'
119
- formatted_prompt = format_prompt(prompt, history, max_history_turns=5) # Truncated history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  logging.info(f"Formatted Prompt: {formatted_prompt}")
121
  stream = client.text_generation(formatted_prompt, temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, stream=True, details=True, return_full_text=False)
122
  resp = ""
@@ -124,10 +143,9 @@ def generate(prompt, history, agent_name=agents[0], sys_prompt="", temperature=0
124
  resp += response.token.text
125
 
126
  if VERBOSE:
127
- logging.info(LOG_RESPONSE.format(resp)) # Log the response
128
  return resp
129
 
130
-
131
  def compress_history(purpose, task, history, directory):
132
  resp = run_gpt(
133
  COMPRESS_HISTORY_PROMPT,
@@ -143,7 +161,6 @@ def compress_history(purpose, task, history, directory):
143
  def call_search(purpose, task, history, directory, action_input):
144
  logging.info(f"CALLING SEARCH: {action_input}")
145
  try:
146
-
147
  if "http" in action_input:
148
  if "<" in action_input:
149
  action_input = action_input.strip("<")
@@ -151,7 +168,6 @@ def call_search(purpose, task, history, directory, action_input):
151
  action_input = action_input.strip(">")
152
 
153
  response = i_s(action_input)
154
- #response = google(search_return)
155
  logging.info(f"Search Result: {response}")
156
  history += "observation: search result is: {}\n".format(response)
157
  else:
@@ -178,7 +194,6 @@ def call_main(purpose, task, history, directory, action_input):
178
  history += "{}\n".format(line)
179
  logging.info(f"Thought: {line}")
180
  elif line.startswith("action: "):
181
-
182
  action_name, action_input = parse_action(line)
183
  logging.info(f"Action: {action_name} - {action_input}")
184
  history += "{}\n".format(line)
@@ -190,13 +205,8 @@ def call_main(purpose, task, history, directory, action_input):
190
  else:
191
  history += "{}\n".format(line)
192
  logging.info(f"Other Output: {line}")
193
- #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
194
-
195
- #return action_name, action_input, history, task
196
- #assert False, "unknown action: {}".format(line)
197
  return "MAIN", None, history, task
198
 
199
-
200
  def call_set_task(purpose, task, history, directory, action_input):
201
  logging.info(f"CALLING SET_TASK: {action_input}")
202
  task = run_gpt(
@@ -220,7 +230,6 @@ NAME_TO_FUNC = {
220
  "UPDATE-TASK": call_set_task,
221
  "SEARCH": call_search,
222
  "COMPLETE": end_fn,
223
-
224
  }
225
 
226
  def run_action(purpose, task, history, directory, action_name, action_input):
@@ -248,14 +257,11 @@ def run_action(purpose, task, history, directory, action_name, action_input):
248
  logging.error(f"Error in run_action: {e}")
249
  return "MAIN", None, history, task
250
 
251
- def run(purpose,history):
252
-
253
- #print(purpose)
254
- #print(hist)
255
- task=None
256
- directory="./"
257
  if history:
258
- history=str(history).strip("[]")
259
  if not history:
260
  history = ""
261
 
@@ -278,61 +284,8 @@ def run(purpose,history):
278
  action_input,
279
  )
280
  yield (history)
281
- #yield ("",[(purpose,history)])
282
  if task == "END":
283
  return (history)
284
- #return ("", [(purpose,history)])
285
-
286
-
287
-
288
- ################################################
289
-
290
- def format_prompt(message, history, max_history_turns=5):
291
- prompt = "<s>"
292
- # Keep only the last 'max_history_turns' turns
293
- for user_prompt, bot_response in history[-max_history_turns:]:
294
- prompt += f"[INST] {user_prompt} [/INST]"
295
- prompt += f" {bot_response}</s> "
296
- prompt += f"[INST] {message} [/INST]"
297
- return prompt
298
- agents =[
299
- "WEB_DEV",
300
- "AI_SYSTEM_PROMPT",
301
- "PYTHON_CODE_DEV"
302
- ]
303
- def generate(
304
- prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0, model="mistralai/Mixtral-8x7B-Instruct-v0.1"
305
- ):
306
- seed = random.randint(1,1111111111111111)
307
-
308
- # Correct the line:
309
- if agent_name == "WEB_DEV":
310
- agent = "You are a helpful AI assistant. You are a web developer."
311
- if agent_name == "AI_SYSTEM_PROMPT":
312
- agent = "You are a helpful AI assistant. You are an AI system."
313
- if agent_name == "PYTHON_CODE_DEV":
314
- agent = "You are a helpful AI assistant. You are a Python code developer."
315
- system_prompt = agent
316
- temperature = float(temperature)
317
- if temperature < 1e-2:
318
- temperature = 1e-2
319
- top_p = float(top_p)
320
-
321
- # Add the system prompt to the beginning of the prompt
322
- formatted_prompt = f"{system_prompt} {prompt}"
323
-
324
- # Use 'prompt' here instead of 'message'
325
- formatted_prompt = format_prompt(formatted_prompt, history, max_history_turns=5) # Truncated history
326
- logging.info(f"Formatted Prompt: {formatted_prompt}")
327
- stream = client.text_generation(formatted_prompt, temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, stream=True, details=True, return_full_text=False)
328
- resp = ""
329
- for response in stream:
330
- resp += response.token.text
331
-
332
- if VERBOSE:
333
- logging.info(LOG_RESPONSE.format(resp)) # Log the response
334
- return resp
335
-
336
 
337
  def generate_text_chunked(input_text, model, generation_parameters, max_tokens_to_generate):
338
  """Generates text in chunks to avoid token limit errors."""
@@ -349,234 +302,26 @@ def generate_text_chunked(input_text, model, generation_parameters, max_tokens_t
349
  generated_text.append(response[0]['generated_text'])
350
  else:
351
  # Handle cases where the sentence is too long
352
- # You could split the sentence further or skip it
353
  print(f"Sentence too long: {sentence}")
354
 
355
  return ''.join(generated_text)
356
 
357
- formatted_prompt = format_prompt(prompt, history, max_history_turns=5) # Truncated history
358
- logging.info(f"Formatted Prompt: {formatted_prompt}")
359
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
360
- output = ""
361
-
362
- for response in stream:
363
- output += response.token.text
364
- yield output
365
- return output
366
-
367
-
368
- additional_inputs=[
369
- gr.Dropdown(
370
- label="Agents",
371
- choices=[s for s in agents],
372
- value=agents[0],
373
- interactive=True,
374
- ),
375
- gr.Textbox(
376
- label="System Prompt",
377
- max_lines=1,
378
- interactive=True,
379
- ),
380
- gr.Slider(
381
- label="Temperature",
382
- value=0.9,
383
- minimum=0.0,
384
- maximum=1.0,
385
- step=0.05,
386
- interactive=True,
387
- info="Higher values produce more diverse outputs",
388
- ),
389
-
390
- gr.Slider(
391
- label="Max new tokens",
392
- value=1048*10,
393
- minimum=0,
394
- maximum=1048*10,
395
- step=64,
396
- interactive=True,
397
- info="The maximum numbers of new tokens",
398
- ),
399
- gr.Slider(
400
- label="Top-p (nucleus sampling)",
401
- value=0.90,
402
- minimum=0.0,
403
- maximum=1,
404
- step=0.05,
405
- interactive=True,
406
- info="Higher values sample more low-probability tokens",
407
- ),
408
- gr.Slider(
409
- label="Repetition penalty",
410
- value=1.2,
411
- minimum=1.0,
412
- maximum=2.0,
413
- step=0.05,
414
- interactive=True,
415
- info="Penalize repeated tokens",
416
- ),
417
-
418
-
419
- ]
420
-
421
- examples = [
422
- ["Help me set up TypeScript configurations and integrate ts-loader in my existing React project.",
423
- "Update Webpack Configurations",
424
- "Install Dependencies",
425
- "Configure Ts-Loader",
426
- "TypeChecking Rules Setup",
427
- "React Specific Settings",
428
- "Compilation Options",
429
- "Test Runner Configuration"],
430
-
431
- ["Guide me through building a serverless microservice using AWS Lambda and API Gateway, connecting to DynamoDB for storage.",
432
- "Set Up AWS Account",
433
- "Create Lambda Function",
434
- "APIGateway Integration",
435
- "Define DynamoDB Table Scheme",
436
- "Connect Service To DB",
437
- "Add Authentication Layers",
438
- "Monitor Metrics and Set Alarms"],
439
-
440
- ["Migrate our current monolithic PHP application towards containerized services using Docker and Kubernetes for scalability.",
441
- "Architectural Restructuring Plan",
442
- "Containerisation Process With Docker",
443
- "Service Orchestration With Kubernetes",
444
- "Load Balancing Strategies",
445
- "Persistent Storage Solutions",
446
- "Network Policies Enforcement",
447
- "Continuous Integration / Continuous Delivery"],
448
-
449
- ["Provide guidance on integrating WebAssembly modules compiled from C++ source files into an ongoing web project.",
450
- "Toolchain Selection (Emscripten vs. LLVM)",
451
- "Setting Up Compiler Environment",
452
- ".cpp Source Preparation",
453
- "Module Building Approach",
454
- "Memory Management Considerations",
455
- "Performance Tradeoffs",
456
- "Seamless Web Assembly Embedding"]
457
- ]
458
-
459
- def parse_action(line):
460
- action_name, action_input = line.strip("action: ").split("=")
461
- action_input = action_input.strip()
462
- return action_name, action_input
463
-
464
- def get_file_tree(path):
465
- """
466
- Recursively explores a directory and returns a nested dictionary representing its file tree.
467
- """
468
- tree = {}
469
- for item in os.listdir(path):
470
- item_path = os.path.join(path, item)
471
- if os.path.isdir(item_path):
472
- tree[item] = get_file_tree(item_path)
473
- else:
474
- tree[item] = None
475
- return tree
476
-
477
- def display_file_tree(tree, indent=0):
478
- """
479
- Prints a formatted representation of the file tree.
480
- """
481
- for name, subtree in tree.items():
482
- print(f"{' ' * indent}{name}")
483
- if subtree is not None:
484
- display_file_tree(subtree, indent + 1)
485
-
486
- def project_explorer(path):
487
- """
488
- Displays the file tree of a given path in a Streamlit app.
489
- """
490
- tree = get_file_tree(path)
491
- tree_str = json.dumps(tree, indent=4) # Convert the tree to a string for display
492
- return tree_str
493
-
494
- def chat_app_logic(message, history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, model):
495
- # Your existing code here
496
-
497
- try:
498
- # Pass 'message' as 'prompt'
499
- response = ''.join(generate(
500
- model=model,
501
- prompt=message, # Use 'prompt' here
502
- history=history,
503
- agent_name=agent_name,
504
- sys_prompt=sys_prompt,
505
- temperature=temperature,
506
- max_new_tokens=max_new_tokens,
507
- top_p=top_p,
508
- repetition_penalty=repetition_penalty,
509
- ))
510
- except TypeError:
511
- # ... (rest of the exception handling)
512
-
513
- response_parts = []
514
- for part in generate(
515
- model=model,
516
- prompt=message, # Use 'prompt' here
517
- history=history,
518
- agent_name=agent_name,
519
- sys_prompt=sys_prompt,
520
- temperature=temperature,
521
- max_new_tokens=max_new_tokens,
522
- top_p=top_p,
523
- repetition_penalty=repetition_penalty,
524
- ):
525
- if isinstance(part, str):
526
- response_parts.append(part)
527
- elif isinstance(part, dict) and 'content' in part:
528
- response_parts.append(part['content'])
529
-
530
- response = ''.join(response_parts)
531
- history.append((message, response))
532
- return history
533
-
534
- history.append((message, response))
535
- return history
536
-
537
- def main():
538
- with gr.Blocks() as demo:
539
- gr.Markdown("## FragMixt")
540
- gr.Markdown("### Agents w/ Agents")
541
-
542
- # Chat Interface
543
- chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
544
- #chatbot.load(examples)
545
-
546
- # Input Components
547
- message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
548
- purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
549
- agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True)
550
- sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
551
- temperature = gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
552
- max_new_tokens = gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens")
553
- top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
554
- repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
555
- model_input = gr.Textbox(label="Model", value="mistralai/Mixtral-8x7B-Instruct-v0.1", visible=False)
556
-
557
- # Button to submit the message
558
- submit_button = gr.Button(value="Send")
559
-
560
- # Project Explorer Tab
561
- with gr.Tab("Project Explorer"):
562
- project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
563
- explore_button = gr.Button(value="Explore")
564
- project_output = gr.Textbox(label="File Tree", lines=20)
565
-
566
- # Chat App Logic Tab
567
- with gr.Tab("Chat App"):
568
- history = gr.State([])
569
- for example in examples:
570
- gr.Button(value=example[0]).click(lambda: chat_app_logic(example[0], history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, model=model_input), outputs=chatbot)
571
-
572
- # Connect components to the chat app logic
573
- submit_button.click(chat_app_logic, inputs=[message, history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, model_input], outputs=chatbot)
574
- message.submit(chat_app_logic, inputs=[message, history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, model_input], outputs=chatbot)
575
-
576
- # Connect components to the project explorer
577
- explore_button.click(project_explorer, inputs=project_path, outputs=project_output)
578
-
579
- demo.launch(show_api=True)
580
 
581
  if __name__ == "__main__":
582
- main()
 
3
  import random
4
  from huggingface_hub import InferenceClient
5
  import gradio as gr
6
+ from safe_search import safe_search
7
  from i_search import google
8
  from i_search import i_search as i_s
9
  from datetime import datetime
10
  import logging
11
  import json
12
+ import nltk
13
+ from transformers import pipeline
14
 
15
+ # Ensure NLTK data is downloaded
16
+ nltk.download('punkt')
17
 
18
  now = datetime.now()
19
  date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
 
34
  "AI_SYSTEM_PROMPT",
35
  "PYTHON_CODE_DEV"
36
  ]
 
37
 
38
  VERBOSE = True
39
  MAX_HISTORY = 5
 
40
 
41
  PREFIX = """
42
  {date_time_str}
 
82
  thought:
83
  """
84
 
85
+ def format_prompt(message, history, max_history_turns=5):
86
+ prompt = "<s>"
87
+ # Keep only the last 'max_history_turns' turns
88
+ for user_prompt, bot_response in history[-max_history_turns:]:
89
+ prompt += f"[INST] {user_prompt} [/INST]"
90
+ prompt += f" {bot_response}</s> "
91
+ prompt += f"[INST] {message} [/INST]"
92
+ return prompt
93
 
94
  def run_gpt(
95
  prompt_template,
 
111
 
112
  resp = client.text_generation(content, max_new_tokens=max_tokens, stop_sequences=stop_tokens, temperature=0.7, top_p=0.8, repetition_penalty=1.5)
113
  if VERBOSE:
114
+ logging.info(LOG_RESPONSE.format(resp=resp)) # Log the response
115
  return resp
116
 
117
+ def generate(
118
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.7, max_new_tokens=2048, top_p=0.8, repetition_penalty=1.5, model="mistralai/Mixtral-8x7B-Instruct-v0.1"
119
+ ):
120
+ seed = random.randint(1,1111111111111111)
121
+
122
+ if agent_name == "WEB_DEV":
123
+ agent = "You are a helpful AI assistant. You are a web developer."
124
+ elif agent_name == "AI_SYSTEM_PROMPT":
125
+ agent = "You are a helpful AI assistant. You are an AI system."
126
+ elif agent_name == "PYTHON_CODE_DEV":
127
+ agent = "You are a helpful AI assistant. You are a Python code developer."
128
+ else:
129
+ agent = "You are a helpful AI assistant."
130
+
131
+ system_prompt = agent
132
+ temperature = float(temperature)
133
+ if temperature < 1e-2:
134
+ temperature = 1e-2
135
+ top_p = float(top_p)
136
+
137
+ formatted_prompt = f"{system_prompt} {prompt}"
138
+ formatted_prompt = format_prompt(formatted_prompt, history, max_history_turns=5) # Truncated history
139
  logging.info(f"Formatted Prompt: {formatted_prompt}")
140
  stream = client.text_generation(formatted_prompt, temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, stream=True, details=True, return_full_text=False)
141
  resp = ""
 
143
  resp += response.token.text
144
 
145
  if VERBOSE:
146
+ logging.info(LOG_RESPONSE.format(resp=resp)) # Log the response
147
  return resp
148
 
 
149
  def compress_history(purpose, task, history, directory):
150
  resp = run_gpt(
151
  COMPRESS_HISTORY_PROMPT,
 
161
  def call_search(purpose, task, history, directory, action_input):
162
  logging.info(f"CALLING SEARCH: {action_input}")
163
  try:
 
164
  if "http" in action_input:
165
  if "<" in action_input:
166
  action_input = action_input.strip("<")
 
168
  action_input = action_input.strip(">")
169
 
170
  response = i_s(action_input)
 
171
  logging.info(f"Search Result: {response}")
172
  history += "observation: search result is: {}\n".format(response)
173
  else:
 
194
  history += "{}\n".format(line)
195
  logging.info(f"Thought: {line}")
196
  elif line.startswith("action: "):
 
197
  action_name, action_input = parse_action(line)
198
  logging.info(f"Action: {action_name} - {action_input}")
199
  history += "{}\n".format(line)
 
205
  else:
206
  history += "{}\n".format(line)
207
  logging.info(f"Other Output: {line}")
 
 
 
 
208
  return "MAIN", None, history, task
209
 
 
210
  def call_set_task(purpose, task, history, directory, action_input):
211
  logging.info(f"CALLING SET_TASK: {action_input}")
212
  task = run_gpt(
 
230
  "UPDATE-TASK": call_set_task,
231
  "SEARCH": call_search,
232
  "COMPLETE": end_fn,
 
233
  }
234
 
235
  def run_action(purpose, task, history, directory, action_name, action_input):
 
257
  logging.error(f"Error in run_action: {e}")
258
  return "MAIN", None, history, task
259
 
260
+ def run(purpose, history):
261
+ task = None
262
+ directory = "./"
 
 
 
263
  if history:
264
+ history = str(history).strip("[]")
265
  if not history:
266
  history = ""
267
 
 
284
  action_input,
285
  )
286
  yield (history)
 
287
  if task == "END":
288
  return (history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
 
290
  def generate_text_chunked(input_text, model, generation_parameters, max_tokens_to_generate):
291
  """Generates text in chunks to avoid token limit errors."""
 
302
  generated_text.append(response[0]['generated_text'])
303
  else:
304
  # Handle cases where the sentence is too long
 
305
  print(f"Sentence too long: {sentence}")
306
 
307
  return ''.join(generated_text)
308
 
309
+ # Gradio Interface
310
+ def gradio_interface(purpose, history):
311
+ history = json.loads(history) if history else []
312
+ result = run(purpose, history)
313
+ return next(result)
314
+
315
+ iface = gr.Interface(
316
+ fn=gradio_interface,
317
+ inputs=[
318
+ gr.inputs.Textbox(lines=2, placeholder="Enter the purpose here..."),
319
+ gr.inputs.Textbox(lines=10, placeholder="Enter the history here (JSON format)...")
320
+ ],
321
+ outputs="text",
322
+ title="AI Assistant",
323
+ description="An AI assistant that helps with various tasks."
324
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
 
326
  if __name__ == "__main__":
327
+ iface.launch()