yaleh commited on
Commit
706e55e
·
1 Parent(s): 829d673

feat: Add `Expert` tab for model selection.

Browse files
Files changed (1) hide show
  1. app/gradio_meta_prompt.py +117 -17
app/gradio_meta_prompt.py CHANGED
@@ -247,6 +247,37 @@ def process_message_with_2_llms(user_message, expected_output, acceptance_criter
247
  recursion_limit, max_output_age, llms)
248
 
249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  class FileConfig(BaseConfig):
251
  config_file: str = 'config.yml' # default path
252
 
@@ -294,35 +325,79 @@ with gr.Blocks(title='Meta Prompt') as demo:
294
  with gr.Row():
295
  with gr.Tabs():
296
  with gr.Tab('Simple') as simple_llm_tab:
297
- model_name_input = gr.Dropdown(
298
  label="Model Name",
299
  choices=config.llms.keys(),
300
  value=list(config.llms.keys())[0],
301
  )
302
  # Connect the inputs and outputs to the function
303
  with gr.Row():
304
- submit_button = gr.Button(
305
  value="Submit", variant="primary")
306
- clear_button = gr.ClearButton(
307
  [user_message_input, expected_output_input,
308
  acceptance_criteria_input, initial_system_message_input],
309
  value='Clear All')
310
  with gr.Tab('Advanced') as advanced_llm_tab:
311
- optimizer_model_name_input = gr.Dropdown(
312
  label="Optimizer Model Name",
313
  choices=config.llms.keys(),
314
  value=list(config.llms.keys())[0],
315
  )
316
- executor_model_name_input = gr.Dropdown(
317
  label="Executor Model Name",
318
  choices=config.llms.keys(),
319
  value=list(config.llms.keys())[0],
320
  )
321
  # Connect the inputs and outputs to the function
322
  with gr.Row():
323
- multiple_submit_button = gr.Button(
324
  value="Submit", variant="primary")
325
- multiple_clear_button = gr.ClearButton(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  components=[user_message_input, expected_output_input,
327
  acceptance_criteria_input, initial_system_message_input],
328
  value='Clear All')
@@ -348,23 +423,24 @@ with gr.Blocks(title='Meta Prompt') as demo:
348
  acceptance_criteria_input,
349
  initial_system_message_input,
350
  recursion_limit_input,
351
- model_name_input
352
  ])
353
 
354
  # set up event handlers
355
  simple_llm_tab.select(on_model_tab_select)
356
  advanced_llm_tab.select(on_model_tab_select)
 
357
 
358
  evaluate_initial_system_message_button.click(
359
  evaluate_system_message,
360
  inputs=[initial_system_message_input, user_message_input,
361
- model_name_input, executor_model_name_input],
362
  outputs=[output_output]
363
  )
364
  evaluate_system_message_button.click(
365
  evaluate_system_message,
366
  inputs=[system_message_output, user_message_input,
367
- model_name_input, executor_model_name_input],
368
  outputs=[output_output]
369
  )
370
  copy_to_initial_system_message_button.click(
@@ -373,12 +449,12 @@ with gr.Blocks(title='Meta Prompt') as demo:
373
  outputs=[initial_system_message_input]
374
  )
375
 
376
- clear_button.add([system_message_output, output_output,
377
  analysis_output, logs_chatbot])
378
- multiple_clear_button.add([system_message_output, output_output,
379
  analysis_output, logs_chatbot])
380
 
381
- submit_button.click(
382
  process_message_with_single_llm,
383
  inputs=[
384
  user_message_input,
@@ -387,7 +463,7 @@ with gr.Blocks(title='Meta Prompt') as demo:
387
  initial_system_message_input,
388
  recursion_limit_input,
389
  max_output_age,
390
- model_name_input
391
  ],
392
  outputs=[
393
  system_message_output,
@@ -397,7 +473,7 @@ with gr.Blocks(title='Meta Prompt') as demo:
397
  ]
398
  )
399
 
400
- multiple_submit_button.click(
401
  process_message_with_2_llms,
402
  inputs=[
403
  user_message_input,
@@ -406,8 +482,32 @@ with gr.Blocks(title='Meta Prompt') as demo:
406
  initial_system_message_input,
407
  recursion_limit_input,
408
  max_output_age,
409
- optimizer_model_name_input,
410
- executor_model_name_input
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411
  ],
412
  outputs=[
413
  system_message_output,
 
247
  recursion_limit, max_output_age, llms)
248
 
249
 
250
+ def process_message_with_expert_llms(user_message, expected_output, acceptance_criteria, initial_system_message,
251
+ recursion_limit: int, max_output_age: int,
252
+ initial_developer_model_name: str, developer_model_name: str,
253
+ executor_model_name: str, output_history_analyzer_model_name: str,
254
+ analyzer_model_name: str, suggester_model_name: str):
255
+ # Get the output state from MetaPromptGraph
256
+ initial_developer_model = LLMModelFactory().create(config.llms[initial_developer_model_name].type,
257
+ **config.llms[initial_developer_model_name].model_dump(exclude={'type'}))
258
+ developer_model = LLMModelFactory().create(config.llms[developer_model_name].type,
259
+ **config.llms[developer_model_name].model_dump(exclude={'type'}))
260
+ executor_model = LLMModelFactory().create(config.llms[executor_model_name].type,
261
+ **config.llms[executor_model_name].model_dump(exclude={'type'}))
262
+ output_history_analyzer_model = LLMModelFactory().create(config.llms[output_history_analyzer_model_name].type,
263
+ **config.llms[output_history_analyzer_model_name].model_dump(exclude={'type'}))
264
+ analyzer_model = LLMModelFactory().create(config.llms[analyzer_model_name].type,
265
+ **config.llms[analyzer_model_name].model_dump(exclude={'type'}))
266
+ suggester_model = LLMModelFactory().create(config.llms[suggester_model_name].type,
267
+ **config.llms[suggester_model_name].model_dump(exclude={'type'}))
268
+ llms = {
269
+ NODE_PROMPT_INITIAL_DEVELOPER: initial_developer_model,
270
+ NODE_PROMPT_DEVELOPER: developer_model,
271
+ NODE_PROMPT_EXECUTOR: executor_model,
272
+ NODE_OUTPUT_HISTORY_ANALYZER: output_history_analyzer_model,
273
+ NODE_PROMPT_ANALYZER: analyzer_model,
274
+ NODE_PROMPT_SUGGESTER: suggester_model
275
+ }
276
+
277
+ return process_message(user_message, expected_output, acceptance_criteria, initial_system_message,
278
+ recursion_limit, max_output_age, llms)
279
+
280
+
281
  class FileConfig(BaseConfig):
282
  config_file: str = 'config.yml' # default path
283
 
 
325
  with gr.Row():
326
  with gr.Tabs():
327
  with gr.Tab('Simple') as simple_llm_tab:
328
+ simple_model_name_input = gr.Dropdown(
329
  label="Model Name",
330
  choices=config.llms.keys(),
331
  value=list(config.llms.keys())[0],
332
  )
333
  # Connect the inputs and outputs to the function
334
  with gr.Row():
335
+ simple_submit_button = gr.Button(
336
  value="Submit", variant="primary")
337
+ simple_clear_button = gr.ClearButton(
338
  [user_message_input, expected_output_input,
339
  acceptance_criteria_input, initial_system_message_input],
340
  value='Clear All')
341
  with gr.Tab('Advanced') as advanced_llm_tab:
342
+ advanced_optimizer_model_name_input = gr.Dropdown(
343
  label="Optimizer Model Name",
344
  choices=config.llms.keys(),
345
  value=list(config.llms.keys())[0],
346
  )
347
+ advanced_executor_model_name_input = gr.Dropdown(
348
  label="Executor Model Name",
349
  choices=config.llms.keys(),
350
  value=list(config.llms.keys())[0],
351
  )
352
  # Connect the inputs and outputs to the function
353
  with gr.Row():
354
+ advanced_submit_button = gr.Button(
355
  value="Submit", variant="primary")
356
+ advanced_clear_button = gr.ClearButton(
357
+ components=[user_message_input, expected_output_input,
358
+ acceptance_criteria_input, initial_system_message_input],
359
+ value='Clear All')
360
+ with gr.Tab('Expert') as expert_llm_tab:
361
+ expert_prompt_initial_developer_model_name_input = gr.Dropdown(
362
+ label="Initial Developer Model Name",
363
+ choices=config.llms.keys(),
364
+ value=list(config.llms.keys())[0],
365
+ )
366
+
367
+ expert_prompt_developer_model_name_input = gr.Dropdown(
368
+ label="Developer Model Name",
369
+ choices=config.llms.keys(),
370
+ value=list(config.llms.keys())[0],
371
+ )
372
+
373
+ expert_prompt_executor_model_name_input = gr.Dropdown(
374
+ label="Executor Model Name",
375
+ choices=config.llms.keys(),
376
+ value=list(config.llms.keys())[0],
377
+ )
378
+
379
+ expert_output_history_analyzer_model_name_input = gr.Dropdown(
380
+ label="History Analyzer Model Name",
381
+ choices=config.llms.keys(),
382
+ value=list(config.llms.keys())[0],
383
+ )
384
+
385
+ expert_prompt_analyzer_model_name_input = gr.Dropdown(
386
+ label="Analyzer Model Name",
387
+ choices=config.llms.keys(),
388
+ value=list(config.llms.keys())[0],
389
+ )
390
+
391
+ expert_prompt_suggester_model_name_input = gr.Dropdown(
392
+ label="Suggester Model Name",
393
+ choices=config.llms.keys(),
394
+ value=list(config.llms.keys())[0],
395
+ )
396
+ # Connect the inputs and outputs to the function
397
+ with gr.Row():
398
+ expert_submit_button = gr.Button(
399
+ value="Submit", variant="primary")
400
+ expert_clear_button = gr.ClearButton(
401
  components=[user_message_input, expected_output_input,
402
  acceptance_criteria_input, initial_system_message_input],
403
  value='Clear All')
 
423
  acceptance_criteria_input,
424
  initial_system_message_input,
425
  recursion_limit_input,
426
+ simple_model_name_input
427
  ])
428
 
429
  # set up event handlers
430
  simple_llm_tab.select(on_model_tab_select)
431
  advanced_llm_tab.select(on_model_tab_select)
432
+ expert_llm_tab.select(on_model_tab_select)
433
 
434
  evaluate_initial_system_message_button.click(
435
  evaluate_system_message,
436
  inputs=[initial_system_message_input, user_message_input,
437
+ simple_model_name_input, advanced_executor_model_name_input],
438
  outputs=[output_output]
439
  )
440
  evaluate_system_message_button.click(
441
  evaluate_system_message,
442
  inputs=[system_message_output, user_message_input,
443
+ simple_model_name_input, advanced_executor_model_name_input],
444
  outputs=[output_output]
445
  )
446
  copy_to_initial_system_message_button.click(
 
449
  outputs=[initial_system_message_input]
450
  )
451
 
452
+ simple_clear_button.add([system_message_output, output_output,
453
  analysis_output, logs_chatbot])
454
+ advanced_clear_button.add([system_message_output, output_output,
455
  analysis_output, logs_chatbot])
456
 
457
+ simple_submit_button.click(
458
  process_message_with_single_llm,
459
  inputs=[
460
  user_message_input,
 
463
  initial_system_message_input,
464
  recursion_limit_input,
465
  max_output_age,
466
+ simple_model_name_input
467
  ],
468
  outputs=[
469
  system_message_output,
 
473
  ]
474
  )
475
 
476
+ advanced_submit_button.click(
477
  process_message_with_2_llms,
478
  inputs=[
479
  user_message_input,
 
482
  initial_system_message_input,
483
  recursion_limit_input,
484
  max_output_age,
485
+ advanced_optimizer_model_name_input,
486
+ advanced_executor_model_name_input
487
+ ],
488
+ outputs=[
489
+ system_message_output,
490
+ output_output,
491
+ analysis_output,
492
+ logs_chatbot
493
+ ]
494
+ )
495
+
496
+ expert_submit_button.click(
497
+ process_message_with_expert_llms,
498
+ inputs=[
499
+ user_message_input,
500
+ expected_output_input,
501
+ acceptance_criteria_input,
502
+ initial_system_message_input,
503
+ recursion_limit_input,
504
+ max_output_age,
505
+ expert_prompt_initial_developer_model_name_input,
506
+ expert_prompt_developer_model_name_input,
507
+ expert_prompt_executor_model_name_input,
508
+ expert_output_history_analyzer_model_name_input,
509
+ expert_prompt_analyzer_model_name_input,
510
+ expert_prompt_suggester_model_name_input
511
  ],
512
  outputs=[
513
  system_message_output,