Spaces:
Running
Running
Updated layout.
Browse files- app/gradio_meta_prompt.py +258 -204
- app/gradio_meta_prompt_utils.py +82 -54
app/gradio_meta_prompt.py
CHANGED
@@ -31,136 +31,138 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
31 |
<p style="text-align:left">A tool for generating and analyzing natural language prompts using multiple language models.</p>
|
32 |
<a href="https://github.com/yaleh/meta-prompt"><img src="https://img.shields.io/badge/GitHub-blue?logo=github" alt="GitHub"></a>""")
|
33 |
|
34 |
-
input_dataframe = gr.DataFrame(
|
35 |
-
label="Input Examples",
|
36 |
-
headers=["Input", "Output"],
|
37 |
-
datatype=["str", "str"],
|
38 |
-
column_widths=["50%", "50%"],
|
39 |
-
row_count=(1, "dynamic"),
|
40 |
-
col_count=(2, "fixed"),
|
41 |
-
interactive=False,
|
42 |
-
wrap=True
|
43 |
-
)
|
44 |
-
|
45 |
with gr.Row():
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
)
|
58 |
-
|
59 |
-
selected_group_mode = gr.State(None) # None, "update", "append"
|
60 |
-
selected_group_index = gr.State(None) # None, int
|
61 |
-
selected_group_input = gr.State("")
|
62 |
-
selected_group_output = gr.State("")
|
63 |
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
|
|
74 |
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
selected_group_input,
|
80 |
-
selected_group_output,
|
81 |
-
],
|
82 |
-
triggers=[selected_group_mode.change],
|
83 |
-
)
|
84 |
-
def selected_group(mode, index, input, output):
|
85 |
-
if mode is None:
|
86 |
-
return
|
87 |
-
with gr.Group():
|
88 |
-
if mode == "update":
|
89 |
-
with gr.Row():
|
90 |
-
selected_row_index = gr.Number(
|
91 |
-
label="Selected Row Index", value=index, precision=0, interactive=False
|
92 |
-
)
|
93 |
-
delete_row_button = gr.Button(
|
94 |
-
"Delete Selected Row", variant="secondary"
|
95 |
-
)
|
96 |
-
with gr.Row():
|
97 |
-
update_row_button = gr.Button(
|
98 |
-
"Update Selected Row", variant="secondary"
|
99 |
-
)
|
100 |
-
close_button = gr.Button("Close", variant="secondary")
|
101 |
-
|
102 |
-
delete_row_button.click(
|
103 |
-
fn=delete_selected_dataframe_row,
|
104 |
-
inputs=[selected_row_index, input_dataframe],
|
105 |
-
outputs=[
|
106 |
-
input_dataframe,
|
107 |
-
selected_group_mode,
|
108 |
-
selected_group_index,
|
109 |
-
selected_group_input,
|
110 |
-
selected_group_output,
|
111 |
-
],
|
112 |
)
|
|
|
113 |
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
selected_example_output,
|
119 |
-
selected_row_index,
|
120 |
-
input_dataframe,
|
121 |
-
],
|
122 |
-
outputs=[
|
123 |
-
input_dataframe,
|
124 |
-
selected_group_mode,
|
125 |
-
selected_group_index,
|
126 |
-
selected_group_input,
|
127 |
-
selected_group_output,
|
128 |
-
],
|
129 |
-
)
|
130 |
-
elif mode == "append":
|
131 |
-
with gr.Row():
|
132 |
-
append_example_button = gr.Button(
|
133 |
-
"Append to Input Examples", variant="secondary"
|
134 |
-
)
|
135 |
-
close_button = gr.Button("Close", variant="secondary")
|
136 |
-
|
137 |
-
append_example_button.click(
|
138 |
-
fn=append_example_to_input_dataframe,
|
139 |
-
inputs=[
|
140 |
-
selected_example_input,
|
141 |
-
selected_example_output,
|
142 |
-
input_dataframe,
|
143 |
-
],
|
144 |
-
outputs=[
|
145 |
-
input_dataframe,
|
146 |
-
selected_group_mode,
|
147 |
-
selected_group_index,
|
148 |
-
selected_group_input,
|
149 |
-
selected_group_output,
|
150 |
-
],
|
151 |
-
)
|
152 |
|
153 |
-
|
154 |
-
fn=lambda:
|
155 |
-
inputs=[],
|
156 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
|
157 |
)
|
158 |
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
|
165 |
with gr.Tabs() as tabs:
|
166 |
|
@@ -187,16 +189,10 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
187 |
)
|
188 |
|
189 |
with gr.Accordion("Model Settings", open=False):
|
190 |
-
|
191 |
label="Model Name",
|
192 |
-
choices=
|
193 |
-
|
194 |
-
"llama3-8b-8192",
|
195 |
-
"llama-3.1-70b-versatile",
|
196 |
-
"llama-3.1-8b-instant",
|
197 |
-
"gemma2-9b-it",
|
198 |
-
],
|
199 |
-
value="llama3-70b-8192",
|
200 |
)
|
201 |
temperature = gr.Slider(
|
202 |
label="Temperature", value=1.0, minimum=0.0, maximum=1.0, step=0.1
|
@@ -285,7 +281,7 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
285 |
with gr.Tab("Prompt"):
|
286 |
|
287 |
with gr.Row():
|
288 |
-
prompt_submit_button = gr.Button(value="Submit", variant="primary")
|
289 |
prompt_clear_button = gr.ClearButton(value='Clear All')
|
290 |
|
291 |
with gr.Row():
|
@@ -473,7 +469,8 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
473 |
simple_model_name_input
|
474 |
])
|
475 |
|
476 |
-
|
|
|
477 |
# None | str
|
478 |
"initial_developer": gr.State(value=simple_model_name_input.value),
|
479 |
# None | str
|
@@ -489,9 +486,20 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
489 |
# None | str
|
490 |
"suggester": gr.State(value=simple_model_name_input.value)
|
491 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
492 |
|
493 |
config_state = gr.State(value=config)
|
494 |
|
|
|
|
|
495 |
# set up event handlers for the scope tab
|
496 |
|
497 |
json_file_object.change(
|
@@ -509,8 +517,9 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
509 |
submit_button.click(
|
510 |
fn=process_json_data,
|
511 |
inputs=[
|
|
|
512 |
input_dataframe,
|
513 |
-
|
514 |
generating_batch_size,
|
515 |
temperature,
|
516 |
],
|
@@ -527,17 +536,23 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
527 |
|
528 |
generate_description_button.click(
|
529 |
fn=generate_description,
|
530 |
-
inputs=[
|
|
|
|
|
|
|
|
|
|
|
531 |
outputs=[description_output, suggestions_output],
|
532 |
)
|
533 |
|
534 |
generate_examples_directly_button.click(
|
535 |
fn=generate_examples_from_description,
|
536 |
inputs=[
|
|
|
537 |
description_output,
|
538 |
input_dataframe,
|
539 |
generating_batch_size,
|
540 |
-
|
541 |
temperature,
|
542 |
],
|
543 |
outputs=[examples_directly_output_dataframe],
|
@@ -545,17 +560,18 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
545 |
|
546 |
analyze_input_button.click(
|
547 |
fn=analyze_input_data,
|
548 |
-
inputs=[description_output,
|
549 |
outputs=[input_analysis_output],
|
550 |
)
|
551 |
|
552 |
generate_briefs_button.click(
|
553 |
fn=generate_example_briefs,
|
554 |
inputs=[
|
|
|
555 |
description_output,
|
556 |
input_analysis_output,
|
557 |
generating_batch_size,
|
558 |
-
|
559 |
temperature,
|
560 |
],
|
561 |
outputs=[example_briefs_output],
|
@@ -564,11 +580,12 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
564 |
generate_examples_from_briefs_button.click(
|
565 |
fn=generate_examples_using_briefs,
|
566 |
inputs=[
|
|
|
567 |
description_output,
|
568 |
example_briefs_output,
|
569 |
input_dataframe,
|
570 |
generating_batch_size,
|
571 |
-
|
572 |
temperature,
|
573 |
],
|
574 |
outputs=[examples_from_briefs_output_dataframe],
|
@@ -637,102 +654,130 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
637 |
|
638 |
generate_suggestions_button.click(
|
639 |
fn=generate_suggestions,
|
640 |
-
inputs=[description_output, input_dataframe,
|
641 |
outputs=[suggestions_output],
|
642 |
)
|
643 |
|
644 |
apply_suggestions_button.click(
|
645 |
fn=apply_suggestions,
|
646 |
-
inputs=[description_output, suggestions_output,
|
647 |
-
input_dataframe,
|
648 |
outputs=[description_output],
|
649 |
)
|
650 |
|
651 |
# set up event handlers for the prompt tab
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
652 |
simple_llm_tab.select(
|
653 |
on_model_tab_select,
|
654 |
[
|
655 |
-
simple_model_name_input,
|
656 |
-
advanced_optimizer_model_name_input,
|
657 |
-
advanced_executor_model_name_input,
|
658 |
-
expert_prompt_initial_developer_model_name_input,
|
659 |
-
expert_prompt_acceptance_criteria_model_name_input,
|
660 |
-
expert_prompt_developer_model_name_input,
|
661 |
-
expert_prompt_executor_model_name_input,
|
662 |
-
expert_output_history_analyzer_model_name_input,
|
663 |
-
expert_prompt_analyzer_model_name_input,
|
664 |
-
expert_prompt_suggester_model_name_input
|
665 |
],
|
666 |
[
|
667 |
-
|
668 |
-
model_states["acceptance_criteria"],
|
669 |
-
model_states["developer"],
|
670 |
-
model_states["executor"],
|
671 |
-
model_states["history_analyzer"],
|
672 |
-
model_states["analyzer"],
|
673 |
-
model_states["suggester"]
|
674 |
]
|
675 |
)
|
676 |
advanced_llm_tab.select(
|
677 |
on_model_tab_select,
|
678 |
[
|
679 |
-
simple_model_name_input,
|
680 |
-
advanced_optimizer_model_name_input,
|
681 |
-
advanced_executor_model_name_input,
|
682 |
-
expert_prompt_initial_developer_model_name_input,
|
683 |
-
expert_prompt_acceptance_criteria_model_name_input,
|
684 |
-
expert_prompt_developer_model_name_input,
|
685 |
-
expert_prompt_executor_model_name_input,
|
686 |
-
expert_output_history_analyzer_model_name_input,
|
687 |
-
expert_prompt_analyzer_model_name_input,
|
688 |
-
expert_prompt_suggester_model_name_input
|
689 |
],
|
690 |
[
|
691 |
-
|
692 |
-
model_states["acceptance_criteria"],
|
693 |
-
model_states["developer"],
|
694 |
-
model_states["executor"],
|
695 |
-
model_states["history_analyzer"],
|
696 |
-
model_states["analyzer"],
|
697 |
-
model_states["suggester"]
|
698 |
]
|
699 |
)
|
700 |
expert_llm_tab.select(
|
701 |
on_model_tab_select,
|
702 |
[
|
703 |
-
simple_model_name_input,
|
704 |
-
advanced_optimizer_model_name_input,
|
705 |
-
advanced_executor_model_name_input,
|
706 |
-
expert_prompt_initial_developer_model_name_input,
|
707 |
-
expert_prompt_acceptance_criteria_model_name_input,
|
708 |
-
expert_prompt_developer_model_name_input,
|
709 |
-
expert_prompt_executor_model_name_input,
|
710 |
-
expert_output_history_analyzer_model_name_input,
|
711 |
-
expert_prompt_analyzer_model_name_input,
|
712 |
-
expert_prompt_suggester_model_name_input
|
713 |
],
|
714 |
[
|
715 |
-
|
716 |
-
model_states["acceptance_criteria"],
|
717 |
-
model_states["developer"],
|
718 |
-
model_states["executor"],
|
719 |
-
model_states["history_analyzer"],
|
720 |
-
model_states["analyzer"],
|
721 |
-
model_states["suggester"]
|
722 |
]
|
723 |
)
|
724 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
725 |
generate_acceptance_criteria_button.click(
|
726 |
generate_acceptance_criteria,
|
727 |
inputs=[config_state, selected_example_input, selected_example_output,
|
728 |
-
|
|
|
729 |
prompt_template_group],
|
730 |
outputs=[acceptance_criteria_input, logs_chatbot]
|
731 |
)
|
732 |
generate_initial_system_message_button.click(
|
733 |
generate_initial_system_message,
|
734 |
inputs=[config_state, selected_example_input, selected_example_output,
|
735 |
-
|
|
|
736 |
prompt_template_group],
|
737 |
outputs=[initial_system_message_input, logs_chatbot]
|
738 |
)
|
@@ -743,7 +788,8 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
743 |
config_state,
|
744 |
initial_system_message_input,
|
745 |
selected_example_input,
|
746 |
-
|
|
|
747 |
],
|
748 |
outputs=[output_output]
|
749 |
)
|
@@ -753,7 +799,8 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
753 |
config_state,
|
754 |
system_message_output,
|
755 |
selected_example_input,
|
756 |
-
|
|
|
757 |
],
|
758 |
outputs=[output_output]
|
759 |
)
|
@@ -778,13 +825,20 @@ with gr.Blocks(title='Meta Prompt') as demo:
|
|
778 |
initial_system_message_input,
|
779 |
recursion_limit_input,
|
780 |
max_output_age,
|
781 |
-
|
782 |
-
|
783 |
-
|
784 |
-
|
785 |
-
|
786 |
-
|
787 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
788 |
prompt_template_group,
|
789 |
aggressive_exploration
|
790 |
],
|
|
|
31 |
<p style="text-align:left">A tool for generating and analyzing natural language prompts using multiple language models.</p>
|
32 |
<a href="https://github.com/yaleh/meta-prompt"><img src="https://img.shields.io/badge/GitHub-blue?logo=github" alt="GitHub"></a>""")
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
with gr.Row():
|
35 |
+
with gr.Column(scale=3):
|
36 |
+
input_dataframe = gr.DataFrame(
|
37 |
+
label="Input Examples",
|
38 |
+
headers=["Input", "Output"],
|
39 |
+
datatype=["str", "str"],
|
40 |
+
column_widths=["50%", "50%"],
|
41 |
+
row_count=(1, "dynamic"),
|
42 |
+
col_count=(2, "fixed"),
|
43 |
+
interactive=False,
|
44 |
+
wrap=True
|
45 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
selected_example_input = gr.Textbox(
|
48 |
+
label="Selected Example Input",
|
49 |
+
lines=2,
|
50 |
+
show_copy_button=True,
|
51 |
+
value="",
|
52 |
+
)
|
53 |
+
selected_example_output = gr.Textbox(
|
54 |
+
label="Selected Example Output",
|
55 |
+
lines=2,
|
56 |
+
show_copy_button=True,
|
57 |
+
value="",
|
58 |
+
)
|
59 |
|
60 |
+
with gr.Column(scale=1):
|
61 |
+
with gr.Accordion("Import/Export JSON", open=False):
|
62 |
+
json_file_object = gr.File(
|
63 |
+
label="Import/Export JSON", file_types=[".json"], type="filepath"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
)
|
65 |
+
export_button = gr.Button("Export to JSON")
|
66 |
|
67 |
+
selected_group_mode = gr.State(None) # None, "update", "append"
|
68 |
+
selected_group_index = gr.State(None) # None, int
|
69 |
+
selected_group_input = gr.State("")
|
70 |
+
selected_group_output = gr.State("")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
+
selected_group_input.change(
|
73 |
+
fn=lambda x: x,
|
74 |
+
inputs=[selected_group_input],
|
75 |
+
outputs=[selected_example_input],
|
76 |
+
)
|
77 |
+
selected_group_output.change(
|
78 |
+
fn=lambda x: x,
|
79 |
+
inputs=[selected_group_output],
|
80 |
+
outputs=[selected_example_output],
|
81 |
)
|
82 |
|
83 |
+
@gr.render(
|
84 |
+
inputs=[
|
85 |
+
selected_group_mode,
|
86 |
+
selected_group_index,
|
87 |
+
selected_group_input,
|
88 |
+
selected_group_output,
|
89 |
+
],
|
90 |
+
triggers=[selected_group_mode.change],
|
91 |
+
)
|
92 |
+
def selected_group(mode, index, input, output):
|
93 |
+
if mode is None:
|
94 |
+
return
|
95 |
+
with gr.Group():
|
96 |
+
if mode == "update":
|
97 |
+
with gr.Row():
|
98 |
+
selected_row_index = gr.Number(
|
99 |
+
label="Selected Row Index", value=index, precision=0, interactive=False
|
100 |
+
)
|
101 |
+
delete_row_button = gr.Button(
|
102 |
+
"Delete Selected Row", variant="secondary"
|
103 |
+
)
|
104 |
+
with gr.Row():
|
105 |
+
update_row_button = gr.Button(
|
106 |
+
"Update Selected Row", variant="secondary"
|
107 |
+
)
|
108 |
+
close_button = gr.Button("Close", variant="secondary")
|
109 |
+
|
110 |
+
delete_row_button.click(
|
111 |
+
fn=delete_selected_dataframe_row,
|
112 |
+
inputs=[selected_row_index, input_dataframe],
|
113 |
+
outputs=[
|
114 |
+
input_dataframe,
|
115 |
+
selected_group_mode,
|
116 |
+
selected_group_index,
|
117 |
+
selected_group_input,
|
118 |
+
selected_group_output,
|
119 |
+
],
|
120 |
+
)
|
121 |
+
|
122 |
+
update_row_button.click(
|
123 |
+
fn=update_selected_dataframe_row,
|
124 |
+
inputs=[
|
125 |
+
selected_example_input,
|
126 |
+
selected_example_output,
|
127 |
+
selected_row_index,
|
128 |
+
input_dataframe,
|
129 |
+
],
|
130 |
+
outputs=[
|
131 |
+
input_dataframe,
|
132 |
+
selected_group_mode,
|
133 |
+
selected_group_index,
|
134 |
+
selected_group_input,
|
135 |
+
selected_group_output,
|
136 |
+
],
|
137 |
+
)
|
138 |
+
elif mode == "append":
|
139 |
+
with gr.Row():
|
140 |
+
append_example_button = gr.Button(
|
141 |
+
"Append to Input Examples", variant="secondary"
|
142 |
+
)
|
143 |
+
close_button = gr.Button("Close", variant="secondary")
|
144 |
+
|
145 |
+
append_example_button.click(
|
146 |
+
fn=append_example_to_input_dataframe,
|
147 |
+
inputs=[
|
148 |
+
selected_example_input,
|
149 |
+
selected_example_output,
|
150 |
+
input_dataframe,
|
151 |
+
],
|
152 |
+
outputs=[
|
153 |
+
input_dataframe,
|
154 |
+
selected_group_mode,
|
155 |
+
selected_group_index,
|
156 |
+
selected_group_input,
|
157 |
+
selected_group_output,
|
158 |
+
],
|
159 |
+
)
|
160 |
+
|
161 |
+
close_button.click(
|
162 |
+
fn=lambda: None,
|
163 |
+
inputs=[],
|
164 |
+
outputs=[selected_group_mode],
|
165 |
+
)
|
166 |
|
167 |
with gr.Tabs() as tabs:
|
168 |
|
|
|
189 |
)
|
190 |
|
191 |
with gr.Accordion("Model Settings", open=False):
|
192 |
+
scope_model_name = gr.Dropdown(
|
193 |
label="Model Name",
|
194 |
+
choices=config.llms.keys(),
|
195 |
+
value=list(config.llms.keys())[0],
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
)
|
197 |
temperature = gr.Slider(
|
198 |
label="Temperature", value=1.0, minimum=0.0, maximum=1.0, step=0.1
|
|
|
281 |
with gr.Tab("Prompt"):
|
282 |
|
283 |
with gr.Row():
|
284 |
+
prompt_submit_button = gr.Button(value="Submit", variant="primary", interactive=False)
|
285 |
prompt_clear_button = gr.ClearButton(value='Clear All')
|
286 |
|
287 |
with gr.Row():
|
|
|
469 |
simple_model_name_input
|
470 |
])
|
471 |
|
472 |
+
prompt_model_tab_state = gr.State(value='Simple')
|
473 |
+
model_name_states = {
|
474 |
# None | str
|
475 |
"initial_developer": gr.State(value=simple_model_name_input.value),
|
476 |
# None | str
|
|
|
486 |
# None | str
|
487 |
"suggester": gr.State(value=simple_model_name_input.value)
|
488 |
}
|
489 |
+
model_temperature_states = {
|
490 |
+
"initial_developer": gr.State(value=config.default_llm_temperature),
|
491 |
+
"acceptance_criteria": gr.State(value=config.default_llm_temperature),
|
492 |
+
"developer": gr.State(value=config.default_llm_temperature),
|
493 |
+
"executor": gr.State(value=config.default_llm_temperature),
|
494 |
+
"history_analyzer": gr.State(value=config.default_llm_temperature),
|
495 |
+
"analyzer": gr.State(value=config.default_llm_temperature),
|
496 |
+
"suggester": gr.State(value=config.default_llm_temperature)
|
497 |
+
}
|
498 |
|
499 |
config_state = gr.State(value=config)
|
500 |
|
501 |
+
prompt_inputs_ready_state = gr.State(value=False)
|
502 |
+
|
503 |
# set up event handlers for the scope tab
|
504 |
|
505 |
json_file_object.change(
|
|
|
517 |
submit_button.click(
|
518 |
fn=process_json_data,
|
519 |
inputs=[
|
520 |
+
config_state,
|
521 |
input_dataframe,
|
522 |
+
scope_model_name,
|
523 |
generating_batch_size,
|
524 |
temperature,
|
525 |
],
|
|
|
536 |
|
537 |
generate_description_button.click(
|
538 |
fn=generate_description,
|
539 |
+
inputs=[
|
540 |
+
config_state,
|
541 |
+
input_dataframe,
|
542 |
+
scope_model_name,
|
543 |
+
temperature
|
544 |
+
],
|
545 |
outputs=[description_output, suggestions_output],
|
546 |
)
|
547 |
|
548 |
generate_examples_directly_button.click(
|
549 |
fn=generate_examples_from_description,
|
550 |
inputs=[
|
551 |
+
config_state,
|
552 |
description_output,
|
553 |
input_dataframe,
|
554 |
generating_batch_size,
|
555 |
+
scope_model_name,
|
556 |
temperature,
|
557 |
],
|
558 |
outputs=[examples_directly_output_dataframe],
|
|
|
560 |
|
561 |
analyze_input_button.click(
|
562 |
fn=analyze_input_data,
|
563 |
+
inputs=[config_state, description_output, scope_model_name, temperature],
|
564 |
outputs=[input_analysis_output],
|
565 |
)
|
566 |
|
567 |
generate_briefs_button.click(
|
568 |
fn=generate_example_briefs,
|
569 |
inputs=[
|
570 |
+
config_state,
|
571 |
description_output,
|
572 |
input_analysis_output,
|
573 |
generating_batch_size,
|
574 |
+
scope_model_name,
|
575 |
temperature,
|
576 |
],
|
577 |
outputs=[example_briefs_output],
|
|
|
580 |
generate_examples_from_briefs_button.click(
|
581 |
fn=generate_examples_using_briefs,
|
582 |
inputs=[
|
583 |
+
config_state,
|
584 |
description_output,
|
585 |
example_briefs_output,
|
586 |
input_dataframe,
|
587 |
generating_batch_size,
|
588 |
+
scope_model_name,
|
589 |
temperature,
|
590 |
],
|
591 |
outputs=[examples_from_briefs_output_dataframe],
|
|
|
654 |
|
655 |
generate_suggestions_button.click(
|
656 |
fn=generate_suggestions,
|
657 |
+
inputs=[config_state, description_output, input_dataframe, scope_model_name, temperature],
|
658 |
outputs=[suggestions_output],
|
659 |
)
|
660 |
|
661 |
apply_suggestions_button.click(
|
662 |
fn=apply_suggestions,
|
663 |
+
inputs=[config_state, description_output, suggestions_output,
|
664 |
+
input_dataframe, scope_model_name, temperature],
|
665 |
outputs=[description_output],
|
666 |
)
|
667 |
|
668 |
# set up event handlers for the prompt tab
|
669 |
+
for item in [selected_example_input, selected_example_output]:
|
670 |
+
item.change(
|
671 |
+
fn=lambda x, y: all(v is not None and v != '' for v in [x, y]),
|
672 |
+
inputs=[selected_example_input, selected_example_output],
|
673 |
+
outputs=[prompt_inputs_ready_state],
|
674 |
+
)
|
675 |
+
|
676 |
+
prompt_inputs_ready_state.change(
|
677 |
+
fn=lambda x: gr.update(interactive=x),
|
678 |
+
inputs=[prompt_inputs_ready_state],
|
679 |
+
outputs=[prompt_submit_button],
|
680 |
+
)
|
681 |
+
|
682 |
simple_llm_tab.select(
|
683 |
on_model_tab_select,
|
684 |
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
685 |
],
|
686 |
[
|
687 |
+
prompt_model_tab_state
|
|
|
|
|
|
|
|
|
|
|
|
|
688 |
]
|
689 |
)
|
690 |
advanced_llm_tab.select(
|
691 |
on_model_tab_select,
|
692 |
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
693 |
],
|
694 |
[
|
695 |
+
prompt_model_tab_state
|
|
|
|
|
|
|
|
|
|
|
|
|
696 |
]
|
697 |
)
|
698 |
expert_llm_tab.select(
|
699 |
on_model_tab_select,
|
700 |
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
701 |
],
|
702 |
[
|
703 |
+
prompt_model_tab_state
|
|
|
|
|
|
|
|
|
|
|
|
|
704 |
]
|
705 |
)
|
706 |
|
707 |
+
for item in [
|
708 |
+
prompt_model_tab_state,
|
709 |
+
simple_model_name_input,
|
710 |
+
advanced_optimizer_model_name_input,
|
711 |
+
advanced_executor_model_name_input,
|
712 |
+
expert_prompt_initial_developer_model_name_input,
|
713 |
+
expert_prompt_initial_developer_temperature_input,
|
714 |
+
expert_prompt_acceptance_criteria_model_name_input,
|
715 |
+
expert_prompt_acceptance_criteria_temperature_input,
|
716 |
+
expert_prompt_developer_model_name_input,
|
717 |
+
expert_prompt_developer_temperature_input,
|
718 |
+
expert_prompt_executor_model_name_input,
|
719 |
+
expert_prompt_executor_temperature_input,
|
720 |
+
expert_output_history_analyzer_model_name_input,
|
721 |
+
expert_output_history_analyzer_temperature_input,
|
722 |
+
expert_prompt_analyzer_model_name_input,
|
723 |
+
expert_prompt_analyzer_temperature_input,
|
724 |
+
expert_prompt_suggester_model_name_input,
|
725 |
+
expert_prompt_suggester_temperature_input,
|
726 |
+
]:
|
727 |
+
item.change(
|
728 |
+
on_prompt_model_tab_state_change,
|
729 |
+
[
|
730 |
+
config_state,
|
731 |
+
prompt_model_tab_state,
|
732 |
+
simple_model_name_input,
|
733 |
+
advanced_optimizer_model_name_input,
|
734 |
+
advanced_executor_model_name_input,
|
735 |
+
expert_prompt_initial_developer_model_name_input,
|
736 |
+
expert_prompt_initial_developer_temperature_input,
|
737 |
+
expert_prompt_acceptance_criteria_model_name_input,
|
738 |
+
expert_prompt_acceptance_criteria_temperature_input,
|
739 |
+
expert_prompt_developer_model_name_input,
|
740 |
+
expert_prompt_developer_temperature_input,
|
741 |
+
expert_prompt_executor_model_name_input,
|
742 |
+
expert_prompt_executor_temperature_input,
|
743 |
+
expert_output_history_analyzer_model_name_input,
|
744 |
+
expert_output_history_analyzer_temperature_input,
|
745 |
+
expert_prompt_analyzer_model_name_input,
|
746 |
+
expert_prompt_analyzer_temperature_input,
|
747 |
+
expert_prompt_suggester_model_name_input,
|
748 |
+
expert_prompt_suggester_temperature_input
|
749 |
+
],
|
750 |
+
[
|
751 |
+
model_name_states["initial_developer"],
|
752 |
+
model_temperature_states["initial_developer"],
|
753 |
+
model_name_states["acceptance_criteria"],
|
754 |
+
model_temperature_states["acceptance_criteria"],
|
755 |
+
model_name_states["developer"],
|
756 |
+
model_temperature_states["developer"],
|
757 |
+
model_name_states["executor"],
|
758 |
+
model_temperature_states["executor"],
|
759 |
+
model_name_states["history_analyzer"],
|
760 |
+
model_temperature_states["history_analyzer"],
|
761 |
+
model_name_states["analyzer"],
|
762 |
+
model_temperature_states["analyzer"],
|
763 |
+
model_name_states["suggester"],
|
764 |
+
model_temperature_states["suggester"]
|
765 |
+
],
|
766 |
+
)
|
767 |
+
|
768 |
generate_acceptance_criteria_button.click(
|
769 |
generate_acceptance_criteria,
|
770 |
inputs=[config_state, selected_example_input, selected_example_output,
|
771 |
+
model_name_states["acceptance_criteria"],
|
772 |
+
model_temperature_states["acceptance_criteria"],
|
773 |
prompt_template_group],
|
774 |
outputs=[acceptance_criteria_input, logs_chatbot]
|
775 |
)
|
776 |
generate_initial_system_message_button.click(
|
777 |
generate_initial_system_message,
|
778 |
inputs=[config_state, selected_example_input, selected_example_output,
|
779 |
+
model_name_states["initial_developer"],
|
780 |
+
model_temperature_states["initial_developer"],
|
781 |
prompt_template_group],
|
782 |
outputs=[initial_system_message_input, logs_chatbot]
|
783 |
)
|
|
|
788 |
config_state,
|
789 |
initial_system_message_input,
|
790 |
selected_example_input,
|
791 |
+
model_name_states["executor"],
|
792 |
+
model_temperature_states["executor"]
|
793 |
],
|
794 |
outputs=[output_output]
|
795 |
)
|
|
|
799 |
config_state,
|
800 |
system_message_output,
|
801 |
selected_example_input,
|
802 |
+
model_name_states["executor"],
|
803 |
+
model_temperature_states["executor"]
|
804 |
],
|
805 |
outputs=[output_output]
|
806 |
)
|
|
|
825 |
initial_system_message_input,
|
826 |
recursion_limit_input,
|
827 |
max_output_age,
|
828 |
+
model_name_states["initial_developer"],
|
829 |
+
model_temperature_states["initial_developer"],
|
830 |
+
model_name_states["acceptance_criteria"],
|
831 |
+
model_temperature_states["acceptance_criteria"],
|
832 |
+
model_name_states["developer"],
|
833 |
+
model_temperature_states["developer"],
|
834 |
+
model_name_states["executor"],
|
835 |
+
model_temperature_states["executor"],
|
836 |
+
model_name_states["history_analyzer"],
|
837 |
+
model_temperature_states["history_analyzer"],
|
838 |
+
model_name_states["analyzer"],
|
839 |
+
model_temperature_states["analyzer"],
|
840 |
+
model_name_states["suggester"],
|
841 |
+
model_temperature_states["suggester"],
|
842 |
prompt_template_group,
|
843 |
aggressive_exploration
|
844 |
],
|
app/gradio_meta_prompt_utils.py
CHANGED
@@ -171,44 +171,72 @@ def chat_log_2_chatbot_list(chat_log: str) -> List[List[str]]:
|
|
171 |
print(line)
|
172 |
return chatbot_list
|
173 |
|
174 |
-
def
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
return simple_model_name, \
|
|
|
186 |
simple_model_name, \
|
|
|
187 |
simple_model_name, \
|
|
|
188 |
simple_model_name, \
|
|
|
189 |
simple_model_name, \
|
|
|
190 |
simple_model_name, \
|
191 |
-
|
192 |
-
|
|
|
|
|
193 |
return advanced_optimizer_model_name, \
|
|
|
194 |
advanced_optimizer_model_name, \
|
|
|
195 |
advanced_optimizer_model_name, \
|
|
|
196 |
advanced_executor_model_name, \
|
|
|
197 |
advanced_optimizer_model_name, \
|
|
|
198 |
advanced_optimizer_model_name, \
|
199 |
-
|
200 |
-
elif
|
201 |
return expert_prompt_initial_developer_model_name, \
|
|
|
202 |
expert_prompt_acceptance_criteria_developer_model_name, \
|
|
|
203 |
expert_prompt_developer_model_name, \
|
|
|
204 |
expert_prompt_executor_model_name, \
|
|
|
205 |
expert_prompt_history_analyzer_model_name, \
|
|
|
206 |
expert_prompt_analyzer_model_name, \
|
207 |
-
|
|
|
|
|
208 |
else:
|
209 |
-
raise ValueError(f"Invalid model tab selected: {
|
210 |
-
|
211 |
-
def
|
|
|
|
|
|
|
212 |
"""
|
213 |
Evaluate a system message by using it to generate a response from an
|
214 |
executor model based on the current active tab and provided user message.
|
@@ -236,7 +264,7 @@ def evaluate_system_message(config, system_message, user_message, executor_model
|
|
236 |
Exception: For any other unexpected errors that occur during the
|
237 |
execution of this function.
|
238 |
"""
|
239 |
-
llm = initialize_llm(config, executor_model_name)
|
240 |
template = ChatPromptTemplate.from_messages([
|
241 |
("system", "{system_message}"),
|
242 |
("human", "{user_message}")
|
@@ -251,7 +279,7 @@ def evaluate_system_message(config, system_message, user_message, executor_model
|
|
251 |
raise gr.Error(f"Error: {e}")
|
252 |
|
253 |
|
254 |
-
def generate_acceptance_criteria(config, user_message, expected_output, acceptance_criteria_model_name, prompt_template_group):
|
255 |
"""
|
256 |
Generate acceptance criteria based on the user message and expected output.
|
257 |
|
@@ -280,7 +308,7 @@ def generate_acceptance_criteria(config, user_message, expected_output, acceptan
|
|
280 |
)
|
281 |
logger.addHandler(log_handler)
|
282 |
|
283 |
-
llm = initialize_llm(config, acceptance_criteria_model_name)
|
284 |
if prompt_template_group is None:
|
285 |
prompt_template_group = 'default'
|
286 |
prompt_templates = prompt_templates_confz2langchain(
|
@@ -309,6 +337,7 @@ def generate_initial_system_message(
|
|
309 |
user_message: str,
|
310 |
expected_output: str,
|
311 |
initial_developer_model_name: str,
|
|
|
312 |
prompt_template_group: Optional[str] = None
|
313 |
) -> tuple:
|
314 |
"""
|
@@ -335,7 +364,7 @@ def generate_initial_system_message(
|
|
335 |
)
|
336 |
logger.addHandler(log_handler)
|
337 |
|
338 |
-
llm = initialize_llm(config, initial_developer_model_name)
|
339 |
|
340 |
if prompt_template_group is None:
|
341 |
prompt_template_group = 'default'
|
@@ -371,9 +400,13 @@ def process_message_with_models(
|
|
371 |
config,
|
372 |
user_message: str, expected_output: str, acceptance_criteria: str,
|
373 |
initial_system_message: str, recursion_limit: int, max_output_age: int,
|
374 |
-
initial_developer_model_name: str,
|
375 |
-
|
376 |
-
|
|
|
|
|
|
|
|
|
377 |
prompt_template_group: Optional[str] = None,
|
378 |
aggressive_exploration: bool = False
|
379 |
) -> tuple:
|
@@ -423,13 +456,13 @@ def process_message_with_models(
|
|
423 |
prompt_template_group = 'default'
|
424 |
prompt_templates = prompt_templates_confz2langchain(config.prompt_templates[prompt_template_group])
|
425 |
llms = {
|
426 |
-
NODE_PROMPT_INITIAL_DEVELOPER: initialize_llm(config, initial_developer_model_name),
|
427 |
-
NODE_ACCEPTANCE_CRITERIA_DEVELOPER: initialize_llm(config, acceptance_criteria_model_name),
|
428 |
-
NODE_PROMPT_DEVELOPER: initialize_llm(config, developer_model_name),
|
429 |
-
NODE_PROMPT_EXECUTOR: initialize_llm(config, executor_model_name),
|
430 |
-
NODE_OUTPUT_HISTORY_ANALYZER: initialize_llm(config, history_analyzer_model_name),
|
431 |
-
NODE_PROMPT_ANALYZER: initialize_llm(config, analyzer_model_name),
|
432 |
-
NODE_PROMPT_SUGGESTER: initialize_llm(config, suggester_model_name)
|
433 |
}
|
434 |
meta_prompt_graph = MetaPromptGraph(llms=llms, prompts=prompt_templates,
|
435 |
aggressive_exploration=aggressive_exploration,
|
@@ -513,15 +546,14 @@ def convert_examples_to_json(examples):
|
|
513 |
return pd_examples.to_json(orient="records")
|
514 |
|
515 |
def process_json_data(
|
|
|
516 |
examples, model_name, generating_batch_size, temperature
|
517 |
):
|
518 |
try:
|
519 |
# Convert the gradio dataframe into a JSON array
|
520 |
input_json = convert_examples_to_json(examples)
|
521 |
|
522 |
-
model =
|
523 |
-
model=model_name, temperature=temperature, max_retries=3
|
524 |
-
)
|
525 |
generator = TaskDescriptionGenerator(model)
|
526 |
result = generator.process(input_json, generating_batch_size)
|
527 |
|
@@ -553,11 +585,11 @@ def process_json_data(
|
|
553 |
except Exception as e:
|
554 |
raise gr.Error(f"An error occurred: {str(e)}")
|
555 |
|
556 |
-
def generate_description(examples, model_name, temperature):
|
557 |
try:
|
558 |
input_json = convert_examples_to_json(examples)
|
559 |
|
560 |
-
model =
|
561 |
generator = TaskDescriptionGenerator(model)
|
562 |
result = generator.generate_description(input_json)
|
563 |
description = result["description"]
|
@@ -566,9 +598,9 @@ def generate_description(examples, model_name, temperature):
|
|
566 |
except Exception as e:
|
567 |
raise gr.Error(f"An error occurred: {str(e)}")
|
568 |
|
569 |
-
def analyze_input_data(description, model_name, temperature):
|
570 |
try:
|
571 |
-
model =
|
572 |
generator = TaskDescriptionGenerator(model)
|
573 |
input_analysis = generator.analyze_input(description)
|
574 |
return input_analysis
|
@@ -576,12 +608,10 @@ def analyze_input_data(description, model_name, temperature):
|
|
576 |
raise gr.Error(f"An error occurred: {str(e)}")
|
577 |
|
578 |
def generate_example_briefs(
|
579 |
-
description, input_analysis, generating_batch_size, model_name, temperature
|
580 |
):
|
581 |
try:
|
582 |
-
model =
|
583 |
-
model=model_name, temperature=temperature, max_retries=3
|
584 |
-
)
|
585 |
generator = TaskDescriptionGenerator(model)
|
586 |
briefs = generator.generate_briefs(
|
587 |
description, input_analysis, generating_batch_size
|
@@ -592,13 +622,11 @@ def generate_example_briefs(
|
|
592 |
|
593 |
|
594 |
def generate_examples_using_briefs(
|
595 |
-
description, new_example_briefs, examples, generating_batch_size, model_name, temperature
|
596 |
):
|
597 |
try:
|
598 |
input_json = convert_examples_to_json(examples)
|
599 |
-
model =
|
600 |
-
model=model_name, temperature=temperature, max_retries=3
|
601 |
-
)
|
602 |
generator = TaskDescriptionGenerator(model)
|
603 |
result = generator.generate_examples_from_briefs(
|
604 |
description, new_example_briefs, input_json, generating_batch_size
|
@@ -613,11 +641,11 @@ def generate_examples_using_briefs(
|
|
613 |
|
614 |
|
615 |
def generate_examples_from_description(
|
616 |
-
description, raw_example, generating_batch_size, model_name, temperature
|
617 |
):
|
618 |
try:
|
619 |
input_json = convert_examples_to_json(raw_example)
|
620 |
-
model =
|
621 |
generator = TaskDescriptionGenerator(model)
|
622 |
result = generator.generate_examples_directly(
|
623 |
description, input_json, generating_batch_size
|
@@ -717,20 +745,20 @@ def input_dataframe_change(
|
|
717 |
selected_group_output,
|
718 |
)
|
719 |
|
720 |
-
def generate_suggestions(description, examples, model_name, temperature):
|
721 |
try:
|
722 |
input_json = convert_examples_to_json(examples)
|
723 |
-
model =
|
724 |
generator = TaskDescriptionGenerator(model)
|
725 |
result = generator.generate_suggestions(input_json, description)
|
726 |
return gr.update(choices=result["suggestions"])
|
727 |
except Exception as e:
|
728 |
raise gr.Error(f"An error occurred: {str(e)}")
|
729 |
|
730 |
-
def apply_suggestions(description, suggestions, examples, model_name, temperature):
|
731 |
try:
|
732 |
input_json = convert_examples_to_json(examples)
|
733 |
-
model =
|
734 |
generator = TaskDescriptionGenerator(model)
|
735 |
result = generator.update_description(input_json, description, suggestions)
|
736 |
return result["description"]
|
|
|
171 |
print(line)
|
172 |
return chatbot_list
|
173 |
|
174 |
+
def on_prompt_model_tab_state_change(config, model_tab_select_state,
|
175 |
+
simple_model_name, advanced_optimizer_model_name, advanced_executor_model_name,
|
176 |
+
expert_prompt_initial_developer_model_name,
|
177 |
+
expert_prompt_initial_developer_temperature,
|
178 |
+
expert_prompt_acceptance_criteria_developer_model_name,
|
179 |
+
expert_prompt_acceptance_criteria_temperature,
|
180 |
+
expert_prompt_developer_model_name,
|
181 |
+
expert_prompt_developer_temperature,
|
182 |
+
expert_prompt_executor_model_name,
|
183 |
+
expert_prompt_executor_temperature,
|
184 |
+
expert_prompt_history_analyzer_model_name,
|
185 |
+
expert_prompt_history_analyzer_temperature,
|
186 |
+
expert_prompt_analyzer_model_name,
|
187 |
+
expert_prompt_analyzer_temperature,
|
188 |
+
expert_prompt_suggester_model_name,
|
189 |
+
expert_prompt_suggester_temperature):
|
190 |
+
if model_tab_select_state == 'Simple':
|
191 |
return simple_model_name, \
|
192 |
+
config.default_llm_temperature, \
|
193 |
simple_model_name, \
|
194 |
+
config.default_llm_temperature, \
|
195 |
simple_model_name, \
|
196 |
+
config.default_llm_temperature, \
|
197 |
simple_model_name, \
|
198 |
+
config.default_llm_temperature, \
|
199 |
simple_model_name, \
|
200 |
+
config.default_llm_temperature, \
|
201 |
simple_model_name, \
|
202 |
+
config.default_llm_temperature, \
|
203 |
+
simple_model_name, \
|
204 |
+
config.default_llm_temperature
|
205 |
+
elif model_tab_select_state == 'Advanced':
|
206 |
return advanced_optimizer_model_name, \
|
207 |
+
config.default_llm_temperature, \
|
208 |
advanced_optimizer_model_name, \
|
209 |
+
config.default_llm_temperature, \
|
210 |
advanced_optimizer_model_name, \
|
211 |
+
config.default_llm_temperature, \
|
212 |
advanced_executor_model_name, \
|
213 |
+
config.default_llm_temperature, \
|
214 |
advanced_optimizer_model_name, \
|
215 |
+
config.default_llm_temperature, \
|
216 |
advanced_optimizer_model_name, \
|
217 |
+
config.default_llm_temperature
|
218 |
+
elif model_tab_select_state == 'Expert':
|
219 |
return expert_prompt_initial_developer_model_name, \
|
220 |
+
expert_prompt_initial_developer_temperature, \
|
221 |
expert_prompt_acceptance_criteria_developer_model_name, \
|
222 |
+
expert_prompt_acceptance_criteria_temperature, \
|
223 |
expert_prompt_developer_model_name, \
|
224 |
+
expert_prompt_developer_temperature, \
|
225 |
expert_prompt_executor_model_name, \
|
226 |
+
expert_prompt_executor_temperature, \
|
227 |
expert_prompt_history_analyzer_model_name, \
|
228 |
+
expert_prompt_history_analyzer_temperature, \
|
229 |
expert_prompt_analyzer_model_name, \
|
230 |
+
expert_prompt_analyzer_temperature, \
|
231 |
+
expert_prompt_suggester_model_name, \
|
232 |
+
expert_prompt_suggester_temperature
|
233 |
else:
|
234 |
+
raise ValueError(f"Invalid model tab selected: {model_tab_select_state}")
|
235 |
+
|
236 |
+
def on_model_tab_select(event: gr.SelectData):
|
237 |
+
return event.value
|
238 |
+
|
239 |
+
def evaluate_system_message(config, system_message, user_message, executor_model_name, executor_temperature):
|
240 |
"""
|
241 |
Evaluate a system message by using it to generate a response from an
|
242 |
executor model based on the current active tab and provided user message.
|
|
|
264 |
Exception: For any other unexpected errors that occur during the
|
265 |
execution of this function.
|
266 |
"""
|
267 |
+
llm = initialize_llm(config, executor_model_name, {'temperature': executor_temperature})
|
268 |
template = ChatPromptTemplate.from_messages([
|
269 |
("system", "{system_message}"),
|
270 |
("human", "{user_message}")
|
|
|
279 |
raise gr.Error(f"Error: {e}")
|
280 |
|
281 |
|
282 |
+
def generate_acceptance_criteria(config, user_message, expected_output, acceptance_criteria_model_name, acceptance_criteria_temperature, prompt_template_group):
|
283 |
"""
|
284 |
Generate acceptance criteria based on the user message and expected output.
|
285 |
|
|
|
308 |
)
|
309 |
logger.addHandler(log_handler)
|
310 |
|
311 |
+
llm = initialize_llm(config, acceptance_criteria_model_name, {'temperature': acceptance_criteria_temperature})
|
312 |
if prompt_template_group is None:
|
313 |
prompt_template_group = 'default'
|
314 |
prompt_templates = prompt_templates_confz2langchain(
|
|
|
337 |
user_message: str,
|
338 |
expected_output: str,
|
339 |
initial_developer_model_name: str,
|
340 |
+
initial_developer_temperature: float,
|
341 |
prompt_template_group: Optional[str] = None
|
342 |
) -> tuple:
|
343 |
"""
|
|
|
364 |
)
|
365 |
logger.addHandler(log_handler)
|
366 |
|
367 |
+
llm = initialize_llm(config, initial_developer_model_name, {'temperature': initial_developer_temperature})
|
368 |
|
369 |
if prompt_template_group is None:
|
370 |
prompt_template_group = 'default'
|
|
|
400 |
config,
|
401 |
user_message: str, expected_output: str, acceptance_criteria: str,
|
402 |
initial_system_message: str, recursion_limit: int, max_output_age: int,
|
403 |
+
initial_developer_model_name: str, initial_developer_temperature: float,
|
404 |
+
acceptance_criteria_model_name: str, acceptance_criteria_temperature: float,
|
405 |
+
developer_model_name: str, developer_temperature: float,
|
406 |
+
executor_model_name: str, executor_temperature: float,
|
407 |
+
history_analyzer_model_name: str, history_analyzer_temperature: float,
|
408 |
+
analyzer_model_name: str, analyzer_temperature: float,
|
409 |
+
suggester_model_name: str, suggester_temperature: float,
|
410 |
prompt_template_group: Optional[str] = None,
|
411 |
aggressive_exploration: bool = False
|
412 |
) -> tuple:
|
|
|
456 |
prompt_template_group = 'default'
|
457 |
prompt_templates = prompt_templates_confz2langchain(config.prompt_templates[prompt_template_group])
|
458 |
llms = {
|
459 |
+
NODE_PROMPT_INITIAL_DEVELOPER: initialize_llm(config, initial_developer_model_name, {'temperature': initial_developer_temperature}),
|
460 |
+
NODE_ACCEPTANCE_CRITERIA_DEVELOPER: initialize_llm(config, acceptance_criteria_model_name, {'temperature': acceptance_criteria_temperature}),
|
461 |
+
NODE_PROMPT_DEVELOPER: initialize_llm(config, developer_model_name, {'temperature': developer_temperature}),
|
462 |
+
NODE_PROMPT_EXECUTOR: initialize_llm(config, executor_model_name, {'temperature': executor_temperature}),
|
463 |
+
NODE_OUTPUT_HISTORY_ANALYZER: initialize_llm(config, history_analyzer_model_name, {'temperature': history_analyzer_temperature}),
|
464 |
+
NODE_PROMPT_ANALYZER: initialize_llm(config, analyzer_model_name, {'temperature': analyzer_temperature}),
|
465 |
+
NODE_PROMPT_SUGGESTER: initialize_llm(config, suggester_model_name, {'temperature': suggester_temperature})
|
466 |
}
|
467 |
meta_prompt_graph = MetaPromptGraph(llms=llms, prompts=prompt_templates,
|
468 |
aggressive_exploration=aggressive_exploration,
|
|
|
546 |
return pd_examples.to_json(orient="records")
|
547 |
|
548 |
def process_json_data(
|
549 |
+
config,
|
550 |
examples, model_name, generating_batch_size, temperature
|
551 |
):
|
552 |
try:
|
553 |
# Convert the gradio dataframe into a JSON array
|
554 |
input_json = convert_examples_to_json(examples)
|
555 |
|
556 |
+
model = initialize_llm(config, model_name, {'temperature': temperature, 'max_retries': 3})
|
|
|
|
|
557 |
generator = TaskDescriptionGenerator(model)
|
558 |
result = generator.process(input_json, generating_batch_size)
|
559 |
|
|
|
585 |
except Exception as e:
|
586 |
raise gr.Error(f"An error occurred: {str(e)}")
|
587 |
|
588 |
+
def generate_description(config, examples, model_name, temperature):
|
589 |
try:
|
590 |
input_json = convert_examples_to_json(examples)
|
591 |
|
592 |
+
model = initialize_llm(config, model_name, {'temperature': temperature, 'max_retries': 3})
|
593 |
generator = TaskDescriptionGenerator(model)
|
594 |
result = generator.generate_description(input_json)
|
595 |
description = result["description"]
|
|
|
598 |
except Exception as e:
|
599 |
raise gr.Error(f"An error occurred: {str(e)}")
|
600 |
|
601 |
+
def analyze_input_data(config, description, model_name, temperature):
|
602 |
try:
|
603 |
+
model = initialize_llm(config, model_name, {'temperature': temperature, 'max_retries': 3})
|
604 |
generator = TaskDescriptionGenerator(model)
|
605 |
input_analysis = generator.analyze_input(description)
|
606 |
return input_analysis
|
|
|
608 |
raise gr.Error(f"An error occurred: {str(e)}")
|
609 |
|
610 |
def generate_example_briefs(
|
611 |
+
config, description, input_analysis, generating_batch_size, model_name, temperature
|
612 |
):
|
613 |
try:
|
614 |
+
model = initialize_llm(config, model_name, {'temperature': temperature, 'max_retries': 3})
|
|
|
|
|
615 |
generator = TaskDescriptionGenerator(model)
|
616 |
briefs = generator.generate_briefs(
|
617 |
description, input_analysis, generating_batch_size
|
|
|
622 |
|
623 |
|
624 |
def generate_examples_using_briefs(
|
625 |
+
config, description, new_example_briefs, examples, generating_batch_size, model_name, temperature
|
626 |
):
|
627 |
try:
|
628 |
input_json = convert_examples_to_json(examples)
|
629 |
+
model = initialize_llm(config, model_name, {'temperature': temperature, 'max_retries': 3})
|
|
|
|
|
630 |
generator = TaskDescriptionGenerator(model)
|
631 |
result = generator.generate_examples_from_briefs(
|
632 |
description, new_example_briefs, input_json, generating_batch_size
|
|
|
641 |
|
642 |
|
643 |
def generate_examples_from_description(
|
644 |
+
config, description, raw_example, generating_batch_size, model_name, temperature
|
645 |
):
|
646 |
try:
|
647 |
input_json = convert_examples_to_json(raw_example)
|
648 |
+
model = initialize_llm(config, model_name, {'temperature': temperature, 'max_retries': 3})
|
649 |
generator = TaskDescriptionGenerator(model)
|
650 |
result = generator.generate_examples_directly(
|
651 |
description, input_json, generating_batch_size
|
|
|
745 |
selected_group_output,
|
746 |
)
|
747 |
|
748 |
+
def generate_suggestions(config, description, examples, model_name, temperature):
|
749 |
try:
|
750 |
input_json = convert_examples_to_json(examples)
|
751 |
+
model = initialize_llm(config, model_name, {'temperature': temperature, 'max_retries': 3})
|
752 |
generator = TaskDescriptionGenerator(model)
|
753 |
result = generator.generate_suggestions(input_json, description)
|
754 |
return gr.update(choices=result["suggestions"])
|
755 |
except Exception as e:
|
756 |
raise gr.Error(f"An error occurred: {str(e)}")
|
757 |
|
758 |
+
def apply_suggestions(config, description, suggestions, examples, model_name, temperature):
|
759 |
try:
|
760 |
input_json = convert_examples_to_json(examples)
|
761 |
+
model = initialize_llm(config, model_name, {'temperature': temperature, 'max_retries': 3})
|
762 |
generator = TaskDescriptionGenerator(model)
|
763 |
result = generator.update_description(input_json, description, suggestions)
|
764 |
return result["description"]
|