yaleh commited on
Commit
cac00c9
·
1 Parent(s): c8b5135

Merged Gradio Meta Prompt works.

Browse files
app/gradio_meta_prompt.py CHANGED
@@ -30,235 +30,625 @@ with gr.Blocks(title='Meta Prompt') as demo:
30
  gr.Markdown(f"""<h1 style='text-align: left; margin-bottom: 1rem'>Meta Prompt</h1>
31
  <p style="text-align:left">A tool for generating and analyzing natural language prompts using multiple language models.</p>
32
  <a href="https://github.com/yaleh/meta-prompt"><img src="https://img.shields.io/badge/GitHub-blue?logo=github" alt="GitHub"></a>""")
 
 
 
 
 
 
 
 
 
 
 
 
33
  with gr.Row():
34
- with gr.Column():
35
- user_message_input = gr.Textbox(
36
- label="User Message",
37
- show_copy_button=True
38
- )
39
- expected_output_input = gr.Textbox(
40
- label="Expected Output",
41
- show_copy_button=True
42
- )
43
- with gr.Accordion("Initial System Message & Acceptance Criteria", open=False):
 
 
44
 
45
- with gr.Group():
46
- initial_system_message_input = gr.Textbox(
47
- label="Initial System Message",
48
- show_copy_button=True,
49
- value=""
50
- )
51
- with gr.Row():
52
- evaluate_initial_system_message_button = gr.Button(
53
- value="Evaluate",
54
- variant="secondary"
55
- )
56
- generate_initial_system_message_button = gr.Button(
57
- value="Generate",
58
- variant="secondary"
59
- )
60
 
61
- with gr.Group():
62
- acceptance_criteria_input = gr.Textbox(
63
- label="Acceptance Criteria (Compared with Expected Output [EO])",
64
- show_copy_button=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  )
66
- generate_acceptance_criteria_button = gr.Button(
67
- value="Generate",
68
- variant="secondary"
69
  )
 
70
 
71
- recursion_limit_input = gr.Number(
72
- label="Recursion Limit",
73
- value=config.recursion_limit,
74
- precision=0,
75
- minimum=1,
76
- maximum=config.recursion_limit_max,
77
- step=1
78
- )
79
- max_output_age = gr.Number(
80
- label="Max Output Age",
81
- value=config.max_output_age,
82
- precision=0,
83
- minimum=1,
84
- maximum=config.max_output_age_max,
85
- step=1
86
  )
87
- prompt_template_group = gr.Dropdown(
88
- label="Prompt Template Group",
89
- choices=list(config.prompt_templates.keys()),
90
- value=list(config.prompt_templates.keys())[0]
 
 
 
 
 
 
 
 
 
 
 
 
91
  )
92
- aggressive_exploration = gr.Checkbox(
93
- label="Aggressive Exploration",
94
- value=config.aggressive_exploration
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  with gr.Row():
97
- with gr.Tabs() as llm_tabs:
98
- with gr.Tab('Simple') as simple_llm_tab:
99
- simple_model_name_input = gr.Dropdown(
100
- label="Model Name",
101
- choices=config.llms.keys(),
102
- value=list(config.llms.keys())[0],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  )
104
- # Connect the inputs and outputs to the function
105
- with gr.Row():
106
- simple_submit_button = gr.Button(
107
- value="Submit", variant="primary")
108
- simple_clear_button = gr.ClearButton(
109
- [user_message_input, expected_output_input,
110
- acceptance_criteria_input, initial_system_message_input],
111
- value='Clear All')
112
- with gr.Tab('Advanced') as advanced_llm_tab:
113
- advanced_optimizer_model_name_input = gr.Dropdown(
114
- label="Optimizer Model Name",
115
- choices=config.llms.keys(),
116
- value=list(config.llms.keys())[0],
 
 
 
117
  )
118
- advanced_executor_model_name_input = gr.Dropdown(
119
- label="Executor Model Name",
120
- choices=config.llms.keys(),
121
- value=list(config.llms.keys())[0],
 
 
 
 
 
122
  )
123
- # Connect the inputs and outputs to the function
124
- with gr.Row():
125
- advanced_submit_button = gr.Button(
126
- value="Submit", variant="primary")
127
- advanced_clear_button = gr.ClearButton(
128
- components=[user_message_input, expected_output_input,
129
- acceptance_criteria_input, initial_system_message_input],
130
- value='Clear All')
131
- with gr.Tab('Expert') as expert_llm_tab:
132
- with gr.Row():
133
- expert_prompt_initial_developer_model_name_input = gr.Dropdown(
134
- label="Initial Developer Model Name",
135
- choices=config.llms.keys(),
136
- value=list(config.llms.keys())[0],
137
- )
138
- expert_prompt_initial_developer_temperature_input = gr.Number(
139
- label="Initial Developer Temperature", value=0.1,
140
- precision=1, minimum=0, maximum=1, step=0.1,
141
- interactive=True)
142
 
143
- with gr.Row():
144
- expert_prompt_acceptance_criteria_model_name_input = gr.Dropdown(
145
- label="Acceptance Criteria Model Name",
146
- choices=config.llms.keys(),
147
- value=list(config.llms.keys())[0],
148
- )
149
- expert_prompt_acceptance_criteria_temperature_input = gr.Number(
150
- label="Acceptance Criteria Temperature", value=0.1,
151
- precision=1, minimum=0, maximum=1, step=0.1,
152
- interactive=True)
 
 
 
 
 
153
 
154
- with gr.Row():
155
- expert_prompt_developer_model_name_input = gr.Dropdown(
156
- label="Developer Model Name",
157
- choices=config.llms.keys(),
158
- value=list(config.llms.keys())[0],
159
- )
160
- expert_prompt_developer_temperature_input = gr.Number(
161
- label="Developer Temperature", value=0.1,
162
- precision=1, minimum=0, maximum=1, step=0.1,
163
- interactive=True)
 
 
 
 
164
 
165
- with gr.Row():
166
- expert_prompt_executor_model_name_input = gr.Dropdown(
167
- label="Executor Model Name",
168
- choices=config.llms.keys(),
169
- value=list(config.llms.keys())[0],
170
- )
171
- expert_prompt_executor_temperature_input = gr.Number(
172
- label="Executor Temperature", value=0.1,
173
- precision=1, minimum=0, maximum=1, step=0.1,
174
- interactive=True)
 
175
 
176
- with gr.Row():
177
- expert_output_history_analyzer_model_name_input = gr.Dropdown(
178
- label="History Analyzer Model Name",
179
- choices=config.llms.keys(),
180
- value=list(config.llms.keys())[0],
181
- )
182
- expert_output_history_analyzer_temperature_input = gr.Number(
183
- label="History Analyzer Temperature", value=0.1,
184
- precision=1, minimum=0, maximum=1, step=0.1,
185
- interactive=True)
186
 
187
- with gr.Row():
188
- expert_prompt_analyzer_model_name_input = gr.Dropdown(
189
- label="Analyzer Model Name",
190
- choices=config.llms.keys(),
191
- value=list(config.llms.keys())[0],
 
 
 
 
 
 
 
 
192
  )
193
- expert_prompt_analyzer_temperature_input = gr.Number(
194
- label="Analyzer Temperature", value=0.1,
195
- precision=1, minimum=0, maximum=1, step=0.1,
196
- interactive=True)
 
 
 
 
 
197
 
198
- with gr.Row():
199
- expert_prompt_suggester_model_name_input = gr.Dropdown(
200
- label="Suggester Model Name",
201
- choices=config.llms.keys(),
202
- value=list(config.llms.keys())[0],
 
 
 
203
  )
204
- expert_prompt_suggester_temperature_input = gr.Number(
205
- label="Suggester Temperature", value=0.1,
206
- precision=1, minimum=0, maximum=1, step=0.1,
207
- interactive=True)
208
 
209
- # Connect the inputs and outputs to the function
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
  with gr.Row():
211
- expert_submit_button = gr.Button(
212
- value="Submit", variant="primary")
213
- expert_clear_button = gr.ClearButton(
214
- components=[user_message_input, expected_output_input,
215
- acceptance_criteria_input, initial_system_message_input],
216
- value='Clear All')
217
- with gr.Column():
218
- with gr.Group():
219
- system_message_output = gr.Textbox(
220
- label="System Message", show_copy_button=True)
221
- with gr.Row():
222
- evaluate_system_message_button = gr.Button(
223
- value="Evaluate", variant="secondary")
224
- copy_to_initial_system_message_button = gr.Button(
225
- value="Copy to Initial System Message", variant="secondary")
226
- output_output = gr.Textbox(label="Output", show_copy_button=True)
227
- analysis_output = gr.Textbox(
228
- label="Analysis", show_copy_button=True)
229
- flag_button = gr.Button(
230
- value="Flag", variant="secondary", visible=config.allow_flagging)
231
- with gr.Accordion("Details", open=False, visible=config.verbose):
232
- logs_chatbot = gr.Chatbot(
233
- label='Messages', show_copy_button=True, layout='bubble',
234
- bubble_full_width=False, render_markdown=False
235
- )
236
- clear_logs_button = gr.ClearButton(
237
- [logs_chatbot], value='Clear Logs')
238
 
239
- # Load examples
240
- examples = gr.Examples(config.examples_path, inputs=[
241
- user_message_input,
242
- expected_output_input,
243
- acceptance_criteria_input,
244
- initial_system_message_input,
245
- recursion_limit_input,
246
- simple_model_name_input
247
- ])
248
-
249
- model_states = {
250
- "initial_developer": gr.State(value=simple_model_name_input.value), # None | str
251
- "acceptance_criteria": gr.State(value=simple_model_name_input.value), # None | str
252
- "developer": gr.State(value=simple_model_name_input.value), # None | str
253
- "executor": gr.State(value=simple_model_name_input.value), # None | str
254
- "history_analyzer": gr.State(value=simple_model_name_input.value), # None | str
255
- "analyzer": gr.State(value=simple_model_name_input.value), # None | str
256
- "suggester": gr.State(value=simple_model_name_input.value) # None | str
257
- }
258
-
259
- config_state = gr.State(value=config)
260
-
261
- # set up event handlers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
  simple_llm_tab.select(
263
  on_model_tab_select,
264
  [
@@ -334,14 +724,14 @@ with gr.Blocks(title='Meta Prompt') as demo:
334
 
335
  generate_acceptance_criteria_button.click(
336
  generate_acceptance_criteria,
337
- inputs=[config_state, user_message_input, expected_output_input,
338
  model_states["acceptance_criteria"],
339
  prompt_template_group],
340
  outputs=[acceptance_criteria_input, logs_chatbot]
341
  )
342
  generate_initial_system_message_button.click(
343
  generate_initial_system_message,
344
- inputs=[config_state, user_message_input, expected_output_input,
345
  model_states["initial_developer"],
346
  prompt_template_group],
347
  outputs=[initial_system_message_input, logs_chatbot]
@@ -352,7 +742,7 @@ with gr.Blocks(title='Meta Prompt') as demo:
352
  inputs=[
353
  config_state,
354
  initial_system_message_input,
355
- user_message_input,
356
  model_states["executor"]
357
  ],
358
  outputs=[output_output]
@@ -362,7 +752,7 @@ with gr.Blocks(title='Meta Prompt') as demo:
362
  inputs=[
363
  config_state,
364
  system_message_output,
365
- user_message_input,
366
  model_states["executor"]
367
  ],
368
  outputs=[output_output]
@@ -373,75 +763,17 @@ with gr.Blocks(title='Meta Prompt') as demo:
373
  outputs=[initial_system_message_input]
374
  )
375
 
376
- simple_clear_button.add([system_message_output, output_output,
377
- analysis_output, logs_chatbot])
378
- advanced_clear_button.add([system_message_output, output_output,
379
- analysis_output, logs_chatbot])
380
-
381
- simple_submit_button.click(
382
- process_message_with_models,
383
- inputs=[
384
- config_state,
385
- user_message_input,
386
- expected_output_input,
387
- acceptance_criteria_input,
388
- initial_system_message_input,
389
- recursion_limit_input,
390
- max_output_age,
391
- model_states["initial_developer"],
392
- model_states["acceptance_criteria"],
393
- model_states["developer"],
394
- model_states["executor"],
395
- model_states["history_analyzer"],
396
- model_states["analyzer"],
397
- model_states["suggester"],
398
- prompt_template_group,
399
- aggressive_exploration
400
- ],
401
- outputs=[
402
- system_message_output,
403
- output_output,
404
- analysis_output,
405
- acceptance_criteria_input,
406
- logs_chatbot
407
- ]
408
- )
409
-
410
- advanced_submit_button.click(
411
- process_message_with_models,
412
- inputs=[
413
- config_state,
414
- user_message_input,
415
- expected_output_input,
416
- acceptance_criteria_input,
417
- initial_system_message_input,
418
- recursion_limit_input,
419
- max_output_age,
420
- model_states["initial_developer"],
421
- model_states["acceptance_criteria"],
422
- model_states["developer"],
423
- model_states["executor"],
424
- model_states["history_analyzer"],
425
- model_states["analyzer"],
426
- model_states["suggester"],
427
- prompt_template_group,
428
- aggressive_exploration
429
- ],
430
- outputs=[
431
- system_message_output,
432
- output_output,
433
- analysis_output,
434
- acceptance_criteria_input,
435
- logs_chatbot
436
- ]
437
- )
438
 
439
- expert_submit_button.click(
440
  process_message_with_models,
441
  inputs=[
442
  config_state,
443
- user_message_input,
444
- expected_output_input,
445
  acceptance_criteria_input,
446
  initial_system_message_input,
447
  recursion_limit_input,
@@ -466,8 +798,8 @@ with gr.Blocks(title='Meta Prompt') as demo:
466
  )
467
 
468
  flagging_inputs = [
469
- user_message_input,
470
- expected_output_input,
471
  acceptance_criteria_input,
472
  initial_system_message_input
473
  ]
 
30
  gr.Markdown(f"""<h1 style='text-align: left; margin-bottom: 1rem'>Meta Prompt</h1>
31
  <p style="text-align:left">A tool for generating and analyzing natural language prompts using multiple language models.</p>
32
  <a href="https://github.com/yaleh/meta-prompt"><img src="https://img.shields.io/badge/GitHub-blue?logo=github" alt="GitHub"></a>""")
33
+
34
+ input_dataframe = gr.DataFrame(
35
+ label="Input Examples",
36
+ headers=["Input", "Output"],
37
+ datatype=["str", "str"],
38
+ column_widths=["50%", "50%"],
39
+ row_count=(1, "dynamic"),
40
+ col_count=(2, "fixed"),
41
+ interactive=False,
42
+ wrap=True
43
+ )
44
+
45
  with gr.Row():
46
+ selected_example_input = gr.Textbox(
47
+ label="Selected Example Input",
48
+ lines=2,
49
+ show_copy_button=True,
50
+ value="",
51
+ )
52
+ selected_example_output = gr.Textbox(
53
+ label="Selected Example Output",
54
+ lines=2,
55
+ show_copy_button=True,
56
+ value="",
57
+ )
58
 
59
+ selected_group_mode = gr.State(None) # None, "update", "append"
60
+ selected_group_index = gr.State(None) # None, int
61
+ selected_group_input = gr.State("")
62
+ selected_group_output = gr.State("")
 
 
 
 
 
 
 
 
 
 
 
63
 
64
+ selected_group_input.change(
65
+ fn=lambda x: x,
66
+ inputs=[selected_group_input],
67
+ outputs=[selected_example_input],
68
+ )
69
+ selected_group_output.change(
70
+ fn=lambda x: x,
71
+ inputs=[selected_group_output],
72
+ outputs=[selected_example_output],
73
+ )
74
+
75
+ @gr.render(
76
+ inputs=[
77
+ selected_group_mode,
78
+ selected_group_index,
79
+ selected_group_input,
80
+ selected_group_output,
81
+ ],
82
+ triggers=[selected_group_mode.change],
83
+ )
84
+ def selected_group(mode, index, input, output):
85
+ if mode is None:
86
+ return
87
+ with gr.Group():
88
+ if mode == "update":
89
+ with gr.Row():
90
+ selected_row_index = gr.Number(
91
+ label="Selected Row Index", value=index, precision=0, interactive=False
92
+ )
93
+ delete_row_button = gr.Button(
94
+ "Delete Selected Row", variant="secondary"
95
  )
96
+ with gr.Row():
97
+ update_row_button = gr.Button(
98
+ "Update Selected Row", variant="secondary"
99
  )
100
+ close_button = gr.Button("Close", variant="secondary")
101
 
102
+ delete_row_button.click(
103
+ fn=delete_selected_dataframe_row,
104
+ inputs=[selected_row_index, input_dataframe],
105
+ outputs=[
106
+ input_dataframe,
107
+ selected_group_mode,
108
+ selected_group_index,
109
+ selected_group_input,
110
+ selected_group_output,
111
+ ],
 
 
 
 
 
112
  )
113
+
114
+ update_row_button.click(
115
+ fn=update_selected_dataframe_row,
116
+ inputs=[
117
+ selected_example_input,
118
+ selected_example_output,
119
+ selected_row_index,
120
+ input_dataframe,
121
+ ],
122
+ outputs=[
123
+ input_dataframe,
124
+ selected_group_mode,
125
+ selected_group_index,
126
+ selected_group_input,
127
+ selected_group_output,
128
+ ],
129
  )
130
+ elif mode == "append":
131
+ with gr.Row():
132
+ append_example_button = gr.Button(
133
+ "Append to Input Examples", variant="secondary"
134
+ )
135
+ close_button = gr.Button("Close", variant="secondary")
136
+
137
+ append_example_button.click(
138
+ fn=append_example_to_input_dataframe,
139
+ inputs=[
140
+ selected_example_input,
141
+ selected_example_output,
142
+ input_dataframe,
143
+ ],
144
+ outputs=[
145
+ input_dataframe,
146
+ selected_group_mode,
147
+ selected_group_index,
148
+ selected_group_input,
149
+ selected_group_output,
150
+ ],
151
  )
152
+
153
+ close_button.click(
154
+ fn=lambda: None,
155
+ inputs=[],
156
+ outputs=[selected_group_mode],
157
+ )
158
+
159
+ with gr.Accordion("Import/Export JSON", open=False):
160
+ json_file_object = gr.File(
161
+ label="Import/Export JSON", file_types=[".json"], type="filepath"
162
+ )
163
+ export_button = gr.Button("Export to JSON")
164
+
165
+ with gr.Tabs() as tabs:
166
+
167
+ with gr.Tab("Scope"):
168
+
169
  with gr.Row():
170
+ submit_button = gr.Button("Generate", variant="primary")
171
+ scope_clear_button = gr.ClearButton(
172
+ [
173
+ input_dataframe
174
+ ],
175
+ value="Clear All"
176
+ )
177
+
178
+ examples_output_dataframe = gr.DataFrame(
179
+ # label="Examples",
180
+ headers=["Input", "Output"],
181
+ interactive=False,
182
+ datatype=["str", "str"],
183
+ column_widths=["50%", "50%"],
184
+ row_count=(1, "dynamic"),
185
+ col_count=(2, "fixed"),
186
+ wrap=True
187
+ )
188
+
189
+ with gr.Accordion("Model Settings", open=False):
190
+ model_name = gr.Dropdown(
191
+ label="Model Name",
192
+ choices=[
193
+ "llama3-70b-8192",
194
+ "llama3-8b-8192",
195
+ "llama-3.1-70b-versatile",
196
+ "llama-3.1-8b-instant",
197
+ "gemma2-9b-it",
198
+ ],
199
+ value="llama3-70b-8192",
200
+ )
201
+ temperature = gr.Slider(
202
+ label="Temperature", value=1.0, minimum=0.0, maximum=1.0, step=0.1
203
+ )
204
+ generating_batch_size = gr.Slider(
205
+ label="Generating Batch Size", value=3, minimum=1, maximum=10, step=1
206
+ )
207
+
208
+ with gr.Accordion("Analysis", open=False):
209
+ with gr.Row():
210
+ with gr.Column():
211
+ generate_description_button = gr.Button(
212
+ "Generate Description", variant="secondary"
213
  )
214
+ description_output = gr.Textbox(
215
+ label="Description", lines=5, show_copy_button=True
216
+ )
217
+ with gr.Column():
218
+ # Suggestions components
219
+ generate_suggestions_button = gr.Button(
220
+ "Generate Suggestions", variant="secondary")
221
+ suggestions_output = gr.Dropdown(
222
+ label="Suggestions", choices=[], multiselect=True, allow_custom_value=True)
223
+ apply_suggestions_button = gr.Button(
224
+ "Apply Suggestions", variant="secondary")
225
+
226
+ with gr.Row():
227
+ with gr.Column():
228
+ analyze_input_button = gr.Button(
229
+ "Analyze Input", variant="secondary"
230
  )
231
+ input_analysis_output = gr.Textbox(
232
+ label="Input Analysis", lines=5, show_copy_button=True
233
+ )
234
+ with gr.Column():
235
+ generate_briefs_button = gr.Button(
236
+ "Generate Briefs", variant="secondary"
237
+ )
238
+ example_briefs_output = gr.Textbox(
239
+ label="Example Briefs", lines=5, show_copy_button=True
240
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
 
242
+ with gr.Row():
243
+ with gr.Column():
244
+ generate_examples_directly_button = gr.Button(
245
+ "Generate Examples Directly", variant="secondary"
246
+ )
247
+ examples_directly_output_dataframe = gr.DataFrame(
248
+ label="Examples Directly",
249
+ headers=["Input", "Output"],
250
+ interactive=False,
251
+ datatype=["str", "str"],
252
+ column_widths=["50%", "50%"],
253
+ row_count=(1, "dynamic"),
254
+ col_count=(2, "fixed"),
255
+ wrap=True
256
+ )
257
 
258
+ with gr.Column():
259
+ generate_examples_from_briefs_button = gr.Button(
260
+ "Generate Examples from Briefs", variant="secondary"
261
+ )
262
+ examples_from_briefs_output_dataframe = gr.DataFrame(
263
+ label="Examples from Briefs",
264
+ headers=["Input", "Output"],
265
+ interactive=False,
266
+ datatype=["str", "str"],
267
+ column_widths=["50%", "50%"],
268
+ row_count=(1, "dynamic"),
269
+ col_count=(2, "fixed"),
270
+ wrap=True
271
+ )
272
 
273
+ scope_clear_button.add(
274
+ [
275
+ description_output,
276
+ suggestions_output,
277
+ examples_directly_output_dataframe,
278
+ input_analysis_output,
279
+ example_briefs_output,
280
+ examples_from_briefs_output_dataframe,
281
+ examples_output_dataframe
282
+ ]
283
+ )
284
 
285
+ with gr.Tab("Prompt"):
 
 
 
 
 
 
 
 
 
286
 
287
+ with gr.Row():
288
+ prompt_submit_button = gr.Button(value="Submit", variant="primary")
289
+ prompt_clear_button = gr.ClearButton(value='Clear All')
290
+
291
+ with gr.Row():
292
+ with gr.Column():
293
+ with gr.Accordion("Initial System Message & Acceptance Criteria", open=False):
294
+
295
+ with gr.Group():
296
+ initial_system_message_input = gr.Textbox(
297
+ label="Initial System Message",
298
+ show_copy_button=True,
299
+ value=""
300
  )
301
+ with gr.Row():
302
+ evaluate_initial_system_message_button = gr.Button(
303
+ value="Evaluate",
304
+ variant="secondary"
305
+ )
306
+ generate_initial_system_message_button = gr.Button(
307
+ value="Generate",
308
+ variant="secondary"
309
+ )
310
 
311
+ with gr.Group():
312
+ acceptance_criteria_input = gr.Textbox(
313
+ label="Acceptance Criteria (Compared with Expected Output [EO])",
314
+ show_copy_button=True
315
+ )
316
+ generate_acceptance_criteria_button = gr.Button(
317
+ value="Generate",
318
+ variant="secondary"
319
  )
 
 
 
 
320
 
321
+ recursion_limit_input = gr.Number(
322
+ label="Recursion Limit",
323
+ value=config.recursion_limit,
324
+ precision=0,
325
+ minimum=1,
326
+ maximum=config.recursion_limit_max,
327
+ step=1
328
+ )
329
+ max_output_age = gr.Number(
330
+ label="Max Output Age",
331
+ value=config.max_output_age,
332
+ precision=0,
333
+ minimum=1,
334
+ maximum=config.max_output_age_max,
335
+ step=1
336
+ )
337
+ prompt_template_group = gr.Dropdown(
338
+ label="Prompt Template Group",
339
+ choices=list(config.prompt_templates.keys()),
340
+ value=list(config.prompt_templates.keys())[0]
341
+ )
342
+ aggressive_exploration = gr.Checkbox(
343
+ label="Aggressive Exploration",
344
+ value=config.aggressive_exploration
345
+ )
346
+ with gr.Row():
347
+ with gr.Tabs() as llm_tabs:
348
+ with gr.Tab('Simple') as simple_llm_tab:
349
+ simple_model_name_input = gr.Dropdown(
350
+ label="Model Name",
351
+ choices=config.llms.keys(),
352
+ value=list(config.llms.keys())[0],
353
+ )
354
+ with gr.Tab('Advanced') as advanced_llm_tab:
355
+ advanced_optimizer_model_name_input = gr.Dropdown(
356
+ label="Optimizer Model Name",
357
+ choices=config.llms.keys(),
358
+ value=list(config.llms.keys())[0],
359
+ )
360
+ advanced_executor_model_name_input = gr.Dropdown(
361
+ label="Executor Model Name",
362
+ choices=config.llms.keys(),
363
+ value=list(config.llms.keys())[0],
364
+ )
365
+ with gr.Tab('Expert') as expert_llm_tab:
366
+ with gr.Row():
367
+ expert_prompt_initial_developer_model_name_input = gr.Dropdown(
368
+ label="Initial Developer Model Name",
369
+ choices=config.llms.keys(),
370
+ value=list(config.llms.keys())[0],
371
+ )
372
+ expert_prompt_initial_developer_temperature_input = gr.Number(
373
+ label="Initial Developer Temperature", value=0.1,
374
+ precision=1, minimum=0, maximum=1, step=0.1,
375
+ interactive=True)
376
+
377
+ with gr.Row():
378
+ expert_prompt_acceptance_criteria_model_name_input = gr.Dropdown(
379
+ label="Acceptance Criteria Model Name",
380
+ choices=config.llms.keys(),
381
+ value=list(config.llms.keys())[0],
382
+ )
383
+ expert_prompt_acceptance_criteria_temperature_input = gr.Number(
384
+ label="Acceptance Criteria Temperature", value=0.1,
385
+ precision=1, minimum=0, maximum=1, step=0.1,
386
+ interactive=True)
387
+
388
+ with gr.Row():
389
+ expert_prompt_developer_model_name_input = gr.Dropdown(
390
+ label="Developer Model Name",
391
+ choices=config.llms.keys(),
392
+ value=list(config.llms.keys())[0],
393
+ )
394
+ expert_prompt_developer_temperature_input = gr.Number(
395
+ label="Developer Temperature", value=0.1,
396
+ precision=1, minimum=0, maximum=1, step=0.1,
397
+ interactive=True)
398
+
399
+ with gr.Row():
400
+ expert_prompt_executor_model_name_input = gr.Dropdown(
401
+ label="Executor Model Name",
402
+ choices=config.llms.keys(),
403
+ value=list(config.llms.keys())[0],
404
+ )
405
+ expert_prompt_executor_temperature_input = gr.Number(
406
+ label="Executor Temperature", value=0.1,
407
+ precision=1, minimum=0, maximum=1, step=0.1,
408
+ interactive=True)
409
+
410
+ with gr.Row():
411
+ expert_output_history_analyzer_model_name_input = gr.Dropdown(
412
+ label="History Analyzer Model Name",
413
+ choices=config.llms.keys(),
414
+ value=list(config.llms.keys())[0],
415
+ )
416
+ expert_output_history_analyzer_temperature_input = gr.Number(
417
+ label="History Analyzer Temperature", value=0.1,
418
+ precision=1, minimum=0, maximum=1, step=0.1,
419
+ interactive=True)
420
+
421
+ with gr.Row():
422
+ expert_prompt_analyzer_model_name_input = gr.Dropdown(
423
+ label="Analyzer Model Name",
424
+ choices=config.llms.keys(),
425
+ value=list(config.llms.keys())[0],
426
+ )
427
+ expert_prompt_analyzer_temperature_input = gr.Number(
428
+ label="Analyzer Temperature", value=0.1,
429
+ precision=1, minimum=0, maximum=1, step=0.1,
430
+ interactive=True)
431
+
432
+ with gr.Row():
433
+ expert_prompt_suggester_model_name_input = gr.Dropdown(
434
+ label="Suggester Model Name",
435
+ choices=config.llms.keys(),
436
+ value=list(config.llms.keys())[0],
437
+ )
438
+ expert_prompt_suggester_temperature_input = gr.Number(
439
+ label="Suggester Temperature", value=0.1,
440
+ precision=1, minimum=0, maximum=1, step=0.1,
441
+ interactive=True)
442
+
443
+ with gr.Column():
444
+ with gr.Group():
445
+ system_message_output = gr.Textbox(
446
+ label="System Message", show_copy_button=True)
447
  with gr.Row():
448
+ evaluate_system_message_button = gr.Button(
449
+ value="Evaluate", variant="secondary")
450
+ copy_to_initial_system_message_button = gr.Button(
451
+ value="Copy to Initial System Message", variant="secondary")
452
+ output_output = gr.Textbox(
453
+ label="Output", show_copy_button=True)
454
+ analysis_output = gr.Textbox(
455
+ label="Analysis", show_copy_button=True)
456
+ flag_button = gr.Button(
457
+ value="Flag", variant="secondary", visible=config.allow_flagging)
458
+ with gr.Accordion("Details", open=False, visible=config.verbose):
459
+ logs_chatbot = gr.Chatbot(
460
+ label='Messages', show_copy_button=True, layout='bubble',
461
+ bubble_full_width=False, render_markdown=False
462
+ )
463
+ clear_logs_button = gr.ClearButton(
464
+ [logs_chatbot], value='Clear Logs')
 
 
 
 
 
 
 
 
 
 
465
 
466
+ # Load examples
467
+ examples = gr.Examples(config.examples_path, inputs=[
468
+ selected_example_input,
469
+ selected_example_output,
470
+ acceptance_criteria_input,
471
+ initial_system_message_input,
472
+ recursion_limit_input,
473
+ simple_model_name_input
474
+ ])
475
+
476
+ model_states = {
477
+ # None | str
478
+ "initial_developer": gr.State(value=simple_model_name_input.value),
479
+ # None | str
480
+ "acceptance_criteria": gr.State(value=simple_model_name_input.value),
481
+ # None | str
482
+ "developer": gr.State(value=simple_model_name_input.value),
483
+ # None | str
484
+ "executor": gr.State(value=simple_model_name_input.value),
485
+ # None | str
486
+ "history_analyzer": gr.State(value=simple_model_name_input.value),
487
+ # None | str
488
+ "analyzer": gr.State(value=simple_model_name_input.value),
489
+ # None | str
490
+ "suggester": gr.State(value=simple_model_name_input.value)
491
+ }
492
+
493
+ config_state = gr.State(value=config)
494
+
495
+ # set up event handlers for the scope tab
496
+
497
+ json_file_object.change(
498
+ fn=import_json_data,
499
+ inputs=[json_file_object, input_dataframe],
500
+ outputs=[input_dataframe],
501
+ )
502
+
503
+ export_button.click(
504
+ fn=export_json_data,
505
+ inputs=[input_dataframe],
506
+ outputs=[json_file_object],
507
+ )
508
+
509
+ submit_button.click(
510
+ fn=process_json_data,
511
+ inputs=[
512
+ input_dataframe,
513
+ model_name,
514
+ generating_batch_size,
515
+ temperature,
516
+ ],
517
+ outputs=[
518
+ description_output,
519
+ suggestions_output,
520
+ examples_directly_output_dataframe,
521
+ input_analysis_output,
522
+ example_briefs_output,
523
+ examples_from_briefs_output_dataframe,
524
+ examples_output_dataframe,
525
+ ],
526
+ )
527
+
528
+ generate_description_button.click(
529
+ fn=generate_description,
530
+ inputs=[input_dataframe, model_name, temperature],
531
+ outputs=[description_output, suggestions_output],
532
+ )
533
+
534
+ generate_examples_directly_button.click(
535
+ fn=generate_examples_from_description,
536
+ inputs=[
537
+ description_output,
538
+ input_dataframe,
539
+ generating_batch_size,
540
+ model_name,
541
+ temperature,
542
+ ],
543
+ outputs=[examples_directly_output_dataframe],
544
+ )
545
+
546
+ analyze_input_button.click(
547
+ fn=analyze_input_data,
548
+ inputs=[description_output, model_name, temperature],
549
+ outputs=[input_analysis_output],
550
+ )
551
+
552
+ generate_briefs_button.click(
553
+ fn=generate_example_briefs,
554
+ inputs=[
555
+ description_output,
556
+ input_analysis_output,
557
+ generating_batch_size,
558
+ model_name,
559
+ temperature,
560
+ ],
561
+ outputs=[example_briefs_output],
562
+ )
563
+
564
+ generate_examples_from_briefs_button.click(
565
+ fn=generate_examples_using_briefs,
566
+ inputs=[
567
+ description_output,
568
+ example_briefs_output,
569
+ input_dataframe,
570
+ generating_batch_size,
571
+ model_name,
572
+ temperature,
573
+ ],
574
+ outputs=[examples_from_briefs_output_dataframe],
575
+ )
576
+
577
+ input_dataframe.select(
578
+ fn=format_selected_input_example_dataframe,
579
+ inputs=[input_dataframe],
580
+ outputs=[
581
+ selected_group_mode,
582
+ selected_group_index,
583
+ selected_group_input,
584
+ selected_group_output,
585
+ ],
586
+ )
587
+
588
+ examples_directly_output_dataframe.select(
589
+ fn=format_selected_example,
590
+ inputs=[examples_directly_output_dataframe],
591
+ outputs=[
592
+ selected_group_mode,
593
+ selected_group_index,
594
+ selected_group_input,
595
+ selected_group_output,
596
+ ],
597
+ )
598
+
599
+ examples_from_briefs_output_dataframe.select(
600
+ fn=format_selected_example,
601
+ inputs=[examples_from_briefs_output_dataframe],
602
+ outputs=[
603
+ selected_group_mode,
604
+ selected_group_index,
605
+ selected_group_input,
606
+ selected_group_output,
607
+ ],
608
+ )
609
+
610
+ examples_output_dataframe.select(
611
+ fn=format_selected_example,
612
+ inputs=[examples_output_dataframe],
613
+ outputs=[
614
+ selected_group_mode,
615
+ selected_group_index,
616
+ selected_group_input,
617
+ selected_group_output,
618
+ ],
619
+ )
620
+
621
+ input_dataframe.change(
622
+ fn=input_dataframe_change,
623
+ inputs=[
624
+ input_dataframe,
625
+ selected_group_mode,
626
+ selected_group_index,
627
+ selected_group_input,
628
+ selected_group_output,
629
+ ],
630
+ outputs=[
631
+ selected_group_mode,
632
+ selected_group_index,
633
+ selected_group_input,
634
+ selected_group_output,
635
+ ],
636
+ )
637
+
638
+ generate_suggestions_button.click(
639
+ fn=generate_suggestions,
640
+ inputs=[description_output, input_dataframe, model_name, temperature],
641
+ outputs=[suggestions_output],
642
+ )
643
+
644
+ apply_suggestions_button.click(
645
+ fn=apply_suggestions,
646
+ inputs=[description_output, suggestions_output,
647
+ input_dataframe, model_name, temperature],
648
+ outputs=[description_output],
649
+ )
650
+
651
+ # set up event handlers for the prompt tab
652
  simple_llm_tab.select(
653
  on_model_tab_select,
654
  [
 
724
 
725
  generate_acceptance_criteria_button.click(
726
  generate_acceptance_criteria,
727
+ inputs=[config_state, selected_example_input, selected_example_output,
728
  model_states["acceptance_criteria"],
729
  prompt_template_group],
730
  outputs=[acceptance_criteria_input, logs_chatbot]
731
  )
732
  generate_initial_system_message_button.click(
733
  generate_initial_system_message,
734
+ inputs=[config_state, selected_example_input, selected_example_output,
735
  model_states["initial_developer"],
736
  prompt_template_group],
737
  outputs=[initial_system_message_input, logs_chatbot]
 
742
  inputs=[
743
  config_state,
744
  initial_system_message_input,
745
+ selected_example_input,
746
  model_states["executor"]
747
  ],
748
  outputs=[output_output]
 
752
  inputs=[
753
  config_state,
754
  system_message_output,
755
+ selected_example_input,
756
  model_states["executor"]
757
  ],
758
  outputs=[output_output]
 
763
  outputs=[initial_system_message_input]
764
  )
765
 
766
+ prompt_clear_button.add([selected_example_input, selected_example_output,
767
+ acceptance_criteria_input, initial_system_message_input,
768
+ system_message_output, output_output,
769
+ analysis_output, logs_chatbot])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
770
 
771
+ prompt_submit_button.click(
772
  process_message_with_models,
773
  inputs=[
774
  config_state,
775
+ selected_example_input,
776
+ selected_example_output,
777
  acceptance_criteria_input,
778
  initial_system_message_input,
779
  recursion_limit_input,
 
798
  )
799
 
800
  flagging_inputs = [
801
+ selected_example_input,
802
+ selected_example_output,
803
  acceptance_criteria_input,
804
  initial_system_message_input
805
  ]
app/gradio_meta_prompt_utils.py CHANGED
@@ -5,6 +5,9 @@ import logging
5
  from pathlib import Path
6
  import csv
7
  import io
 
 
 
8
 
9
  import gradio as gr
10
  from gradio import CSVLogger, utils
@@ -18,7 +21,7 @@ from pythonjsonlogger import jsonlogger
18
 
19
  from app.config import MetaPromptConfig, RoleMessage
20
  from meta_prompt import *
21
-
22
 
23
  def prompt_templates_confz2langchain(
24
  prompt_templates: Dict[str, Dict[str, List[RoleMessage]]]
@@ -502,3 +505,234 @@ def initialize_llm(config: MetaPromptConfig, model_name: str, model_config: Opti
502
 
503
  class FileConfig(BaseConfig):
504
  config_file: str = 'config.yml' # default path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  from pathlib import Path
6
  import csv
7
  import io
8
+ import tempfile
9
+
10
+ import pandas as pd
11
 
12
  import gradio as gr
13
  from gradio import CSVLogger, utils
 
21
 
22
  from app.config import MetaPromptConfig, RoleMessage
23
  from meta_prompt import *
24
+ from meta_prompt.sample_generator import TaskDescriptionGenerator
25
 
26
  def prompt_templates_confz2langchain(
27
  prompt_templates: Dict[str, Dict[str, List[RoleMessage]]]
 
505
 
506
  class FileConfig(BaseConfig):
507
  config_file: str = 'config.yml' # default path
508
+
509
+
510
+ def convert_examples_to_json(examples):
511
+ pd_examples = pd.DataFrame(examples)
512
+ pd_examples.columns = pd_examples.columns.str.lower()
513
+ return pd_examples.to_json(orient="records")
514
+
515
+ def process_json_data(
516
+ examples, model_name, generating_batch_size, temperature
517
+ ):
518
+ try:
519
+ # Convert the gradio dataframe into a JSON array
520
+ input_json = convert_examples_to_json(examples)
521
+
522
+ model = ChatOpenAI(
523
+ model=model_name, temperature=temperature, max_retries=3
524
+ )
525
+ generator = TaskDescriptionGenerator(model)
526
+ result = generator.process(input_json, generating_batch_size)
527
+
528
+ description = result["description"]
529
+ examples_directly = [
530
+ [example["input"], example["output"]]
531
+ for example in result["examples_directly"]["examples"]
532
+ ]
533
+ input_analysis = result["examples_from_briefs"]["input_analysis"]
534
+ new_example_briefs = result["examples_from_briefs"]["new_example_briefs"]
535
+ examples_from_briefs = [
536
+ [example["input"], example["output"]]
537
+ for example in result["examples_from_briefs"]["examples"]
538
+ ]
539
+ examples = [
540
+ [example["input"], example["output"]]
541
+ for example in result["additional_examples"]
542
+ ]
543
+ suggestions = result.get("suggestions", [])
544
+ return (
545
+ description,
546
+ gr.update(choices=suggestions, value=[]),
547
+ examples_directly,
548
+ input_analysis,
549
+ new_example_briefs,
550
+ examples_from_briefs,
551
+ examples,
552
+ )
553
+ except Exception as e:
554
+ raise gr.Error(f"An error occurred: {str(e)}")
555
+
556
+ def generate_description(examples, model_name, temperature):
557
+ try:
558
+ input_json = convert_examples_to_json(examples)
559
+
560
+ model = ChatOpenAI(model=model_name, temperature=temperature, max_retries=3)
561
+ generator = TaskDescriptionGenerator(model)
562
+ result = generator.generate_description(input_json)
563
+ description = result["description"]
564
+ suggestions = result["suggestions"]
565
+ return description, gr.update(choices=suggestions, value=[])
566
+ except Exception as e:
567
+ raise gr.Error(f"An error occurred: {str(e)}")
568
+
569
+ def analyze_input_data(description, model_name, temperature):
570
+ try:
571
+ model = ChatOpenAI(model=model_name, temperature=temperature, max_retries=3)
572
+ generator = TaskDescriptionGenerator(model)
573
+ input_analysis = generator.analyze_input(description)
574
+ return input_analysis
575
+ except Exception as e:
576
+ raise gr.Error(f"An error occurred: {str(e)}")
577
+
578
+ def generate_example_briefs(
579
+ description, input_analysis, generating_batch_size, model_name, temperature
580
+ ):
581
+ try:
582
+ model = ChatOpenAI(
583
+ model=model_name, temperature=temperature, max_retries=3
584
+ )
585
+ generator = TaskDescriptionGenerator(model)
586
+ briefs = generator.generate_briefs(
587
+ description, input_analysis, generating_batch_size
588
+ )
589
+ return briefs
590
+ except Exception as e:
591
+ raise gr.Error(f"An error occurred: {str(e)}")
592
+
593
+
594
+ def generate_examples_using_briefs(
595
+ description, new_example_briefs, examples, generating_batch_size, model_name, temperature
596
+ ):
597
+ try:
598
+ input_json = convert_examples_to_json(examples)
599
+ model = ChatOpenAI(
600
+ model=model_name, temperature=temperature, max_retries=3
601
+ )
602
+ generator = TaskDescriptionGenerator(model)
603
+ result = generator.generate_examples_from_briefs(
604
+ description, new_example_briefs, input_json, generating_batch_size
605
+ )
606
+ examples = [
607
+ [example["input"], example["output"]]
608
+ for example in result["examples"]
609
+ ]
610
+ return examples
611
+ except Exception as e:
612
+ raise gr.Error(f"An error occurred: {str(e)}")
613
+
614
+
615
+ def generate_examples_from_description(
616
+ description, raw_example, generating_batch_size, model_name, temperature
617
+ ):
618
+ try:
619
+ input_json = convert_examples_to_json(raw_example)
620
+ model = ChatOpenAI(model=model_name, temperature=temperature, max_retries=3)
621
+ generator = TaskDescriptionGenerator(model)
622
+ result = generator.generate_examples_directly(
623
+ description, input_json, generating_batch_size
624
+ )
625
+ examples = [
626
+ [example["input"], example["output"]] for example in result["examples"]
627
+ ]
628
+ return examples
629
+ except Exception as e:
630
+ raise gr.Error(f"An error occurred: {str(e)}")
631
+
632
+ def format_selected_input_example_dataframe(evt: gr.SelectData, examples):
633
+ if evt.index[0] < len(examples):
634
+ selected_example = examples.iloc[evt.index[0]]
635
+ return "update", evt.index[0]+1, selected_example.iloc[0], selected_example.iloc[1]
636
+ return None, None, None, None
637
+
638
+ def format_selected_example(evt: gr.SelectData, examples):
639
+ if evt.index[0] < len(examples):
640
+ selected_example = examples.iloc[evt.index[0]]
641
+ return (
642
+ "append",
643
+ None,
644
+ selected_example.iloc[0],
645
+ selected_example.iloc[1],
646
+ )
647
+ return None, None, None, None
648
+
649
+ def import_json_data(file, input_dataframe):
650
+ if file is not None:
651
+ df = pd.read_json(file.name)
652
+ # Uppercase the first letter of each column name
653
+ df.columns = df.columns.str.title()
654
+ return df
655
+ return input_dataframe
656
+
657
+ def export_json_data(dataframe):
658
+ if dataframe is not None and not dataframe.empty:
659
+ # Copy the dataframe and lowercase the column names
660
+ df_copy = dataframe.copy()
661
+ df_copy.columns = df_copy.columns.str.lower()
662
+
663
+ json_str = df_copy.to_json(orient="records", indent=2)
664
+
665
+ # create a temporary file with the json string
666
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".json") as temp_file:
667
+ temp_file.write(json_str.encode("utf-8"))
668
+ temp_file_path = temp_file.name
669
+
670
+ return temp_file_path
671
+ return None
672
+
673
+
674
+ def append_example_to_input_dataframe(
675
+ new_example_input, new_example_output, input_dataframe
676
+ ):
677
+ try:
678
+ new_row = pd.DataFrame(
679
+ [[new_example_input, new_example_output]], columns=["Input", "Output"]
680
+ )
681
+ updated_df = pd.concat([input_dataframe, new_row], ignore_index=True)
682
+ return updated_df, None, None, None, None
683
+ except KeyError:
684
+ raise gr.Error("Invalid input or output")
685
+
686
+
687
+ def delete_selected_dataframe_row(row_index, input_dataframe):
688
+ if row_index is not None and row_index > 0:
689
+ input_dataframe = input_dataframe.drop(index=row_index - 1).reset_index(
690
+ drop=True
691
+ )
692
+ return input_dataframe, None, None, None, None
693
+ return input_dataframe, None, None, None, None
694
+
695
+
696
+ def update_selected_dataframe_row(
697
+ selected_example_input, selected_example_output, selected_row_index, input_dataframe
698
+ ):
699
+ if selected_row_index is not None and selected_row_index > 0:
700
+ input_dataframe.iloc[selected_row_index - 1] = [
701
+ selected_example_input,
702
+ selected_example_output,
703
+ ]
704
+ return input_dataframe, None, None, None, None
705
+ return input_dataframe, None, None, None, None
706
+
707
+
708
+ def input_dataframe_change(
709
+ input_dataframe, selected_group_mode, selected_group_index, selected_group_input, selected_group_output
710
+ ):
711
+ if len(input_dataframe) <= 1:
712
+ return None, None, None, None
713
+ return (
714
+ selected_group_mode,
715
+ selected_group_index,
716
+ selected_group_input,
717
+ selected_group_output,
718
+ )
719
+
720
+ def generate_suggestions(description, examples, model_name, temperature):
721
+ try:
722
+ input_json = convert_examples_to_json(examples)
723
+ model = ChatOpenAI(model=model_name, temperature=temperature, max_retries=3)
724
+ generator = TaskDescriptionGenerator(model)
725
+ result = generator.generate_suggestions(input_json, description)
726
+ return gr.update(choices=result["suggestions"])
727
+ except Exception as e:
728
+ raise gr.Error(f"An error occurred: {str(e)}")
729
+
730
+ def apply_suggestions(description, suggestions, examples, model_name, temperature):
731
+ try:
732
+ input_json = convert_examples_to_json(examples)
733
+ model = ChatOpenAI(model=model_name, temperature=temperature, max_retries=3)
734
+ generator = TaskDescriptionGenerator(model)
735
+ result = generator.update_description(input_json, description, suggestions)
736
+ return result["description"]
737
+ except Exception as e:
738
+ raise gr.Error(f"An error occurred: {str(e)}")
app/gradio_sample_generator.py CHANGED
@@ -1,239 +1,6 @@
1
- import json
2
- import tempfile
3
  import gradio as gr
4
- import pandas as pd
5
- from langchain_openai import ChatOpenAI
6
- from meta_prompt.sample_generator import TaskDescriptionGenerator
7
-
8
- def convert_examples_to_json(examples):
9
- pd_examples = pd.DataFrame(examples)
10
- pd_examples.columns = pd_examples.columns.str.lower()
11
- return pd_examples.to_json(orient="records")
12
-
13
- def process_json_data(
14
- examples, model_name, generating_batch_size, temperature
15
- ):
16
- try:
17
- # Convert the gradio dataframe into a JSON array
18
- input_json = convert_examples_to_json(examples)
19
-
20
- model = ChatOpenAI(
21
- model=model_name, temperature=temperature, max_retries=3
22
- )
23
- generator = TaskDescriptionGenerator(model)
24
- result = generator.process(input_json, generating_batch_size)
25
-
26
- description = result["description"]
27
- examples_directly = [
28
- [example["input"], example["output"]]
29
- for example in result["examples_directly"]["examples"]
30
- ]
31
- input_analysis = result["examples_from_briefs"]["input_analysis"]
32
- new_example_briefs = result["examples_from_briefs"]["new_example_briefs"]
33
- examples_from_briefs = [
34
- [example["input"], example["output"]]
35
- for example in result["examples_from_briefs"]["examples"]
36
- ]
37
- examples = [
38
- [example["input"], example["output"]]
39
- for example in result["additional_examples"]
40
- ]
41
- suggestions = result.get("suggestions", [])
42
- return (
43
- description,
44
- gr.update(choices=suggestions, value=[]),
45
- examples_directly,
46
- input_analysis,
47
- new_example_briefs,
48
- examples_from_briefs,
49
- examples,
50
- )
51
- except Exception as e:
52
- raise gr.Error(f"An error occurred: {str(e)}")
53
-
54
- def generate_description(examples, model_name, temperature):
55
- try:
56
- input_json = convert_examples_to_json(examples)
57
-
58
- model = ChatOpenAI(model=model_name, temperature=temperature, max_retries=3)
59
- generator = TaskDescriptionGenerator(model)
60
- result = generator.generate_description(input_json)
61
- description = result["description"]
62
- suggestions = result["suggestions"]
63
- return description, gr.update(choices=suggestions, value=[])
64
- except Exception as e:
65
- raise gr.Error(f"An error occurred: {str(e)}")
66
-
67
- def analyze_input_data(description, model_name, temperature):
68
- try:
69
- model = ChatOpenAI(model=model_name, temperature=temperature, max_retries=3)
70
- generator = TaskDescriptionGenerator(model)
71
- input_analysis = generator.analyze_input(description)
72
- return input_analysis
73
- except Exception as e:
74
- raise gr.Error(f"An error occurred: {str(e)}")
75
-
76
- def generate_example_briefs(
77
- description, input_analysis, generating_batch_size, model_name, temperature
78
- ):
79
- try:
80
- model = ChatOpenAI(
81
- model=model_name, temperature=temperature, max_retries=3
82
- )
83
- generator = TaskDescriptionGenerator(model)
84
- briefs = generator.generate_briefs(
85
- description, input_analysis, generating_batch_size
86
- )
87
- return briefs
88
- except Exception as e:
89
- raise gr.Error(f"An error occurred: {str(e)}")
90
-
91
-
92
- def generate_examples_using_briefs(
93
- description, new_example_briefs, examples, generating_batch_size, model_name, temperature
94
- ):
95
- try:
96
- input_json = convert_examples_to_json(examples)
97
- model = ChatOpenAI(
98
- model=model_name, temperature=temperature, max_retries=3
99
- )
100
- generator = TaskDescriptionGenerator(model)
101
- result = generator.generate_examples_from_briefs(
102
- description, new_example_briefs, input_json, generating_batch_size
103
- )
104
- examples = [
105
- [example["input"], example["output"]]
106
- for example in result["examples"]
107
- ]
108
- return examples
109
- except Exception as e:
110
- raise gr.Error(f"An error occurred: {str(e)}")
111
-
112
-
113
- def generate_examples_from_description(
114
- description, raw_example, generating_batch_size, model_name, temperature
115
- ):
116
- try:
117
- input_json = convert_examples_to_json(raw_example)
118
- model = ChatOpenAI(model=model_name, temperature=temperature, max_retries=3)
119
- generator = TaskDescriptionGenerator(model)
120
- result = generator.generate_examples_directly(
121
- description, input_json, generating_batch_size
122
- )
123
- examples = [
124
- [example["input"], example["output"]] for example in result["examples"]
125
- ]
126
- return examples
127
- except Exception as e:
128
- raise gr.Error(f"An error occurred: {str(e)}")
129
-
130
- def format_selected_input_example_dataframe(evt: gr.SelectData, examples):
131
- if evt.index[0] < len(examples):
132
- selected_example = examples.iloc[evt.index[0]]
133
- return "update", evt.index[0]+1, selected_example.iloc[0], selected_example.iloc[1]
134
- return None, None, None, None
135
-
136
- def format_selected_example(evt: gr.SelectData, examples):
137
- if evt.index[0] < len(examples):
138
- selected_example = examples.iloc[evt.index[0]]
139
- return (
140
- "append",
141
- None,
142
- selected_example.iloc[0],
143
- selected_example.iloc[1],
144
- )
145
- return None, None, None, None
146
-
147
- def import_json_data(file, input_dataframe):
148
- if file is not None:
149
- df = pd.read_json(file.name)
150
- # Uppercase the first letter of each column name
151
- df.columns = df.columns.str.title()
152
- return df
153
- return input_dataframe
154
-
155
- def export_json_data(dataframe):
156
- if dataframe is not None and not dataframe.empty:
157
- # Copy the dataframe and lowercase the column names
158
- df_copy = dataframe.copy()
159
- df_copy.columns = df_copy.columns.str.lower()
160
-
161
- json_str = df_copy.to_json(orient="records", indent=2)
162
-
163
- # create a temporary file with the json string
164
- with tempfile.NamedTemporaryFile(delete=False, suffix=".json") as temp_file:
165
- temp_file.write(json_str.encode("utf-8"))
166
- temp_file_path = temp_file.name
167
-
168
- return temp_file_path
169
- return None
170
-
171
-
172
- def append_example_to_input_dataframe(
173
- new_example_input, new_example_output, input_dataframe
174
- ):
175
- try:
176
- new_row = pd.DataFrame(
177
- [[new_example_input, new_example_output]], columns=["Input", "Output"]
178
- )
179
- updated_df = pd.concat([input_dataframe, new_row], ignore_index=True)
180
- return updated_df, None, None, None, None
181
- except KeyError:
182
- raise gr.Error("Invalid input or output")
183
-
184
-
185
- def delete_selected_dataframe_row(row_index, input_dataframe):
186
- if row_index is not None and row_index > 0:
187
- input_dataframe = input_dataframe.drop(index=row_index - 1).reset_index(
188
- drop=True
189
- )
190
- return input_dataframe, None, None, None, None
191
- return input_dataframe, None, None, None, None
192
-
193
-
194
- def update_selected_dataframe_row(
195
- selected_example_input, selected_example_output, selected_row_index, input_dataframe
196
- ):
197
- if selected_row_index is not None and selected_row_index > 0:
198
- input_dataframe.iloc[selected_row_index - 1] = [
199
- selected_example_input,
200
- selected_example_output,
201
- ]
202
- return input_dataframe, None, None, None, None
203
- return input_dataframe, None, None, None, None
204
-
205
-
206
- def input_dataframe_change(
207
- input_dataframe, selected_group_mode, selected_group_index, selected_group_input, selected_group_output
208
- ):
209
- if len(input_dataframe) <= 1:
210
- return None, None, None, None
211
- return (
212
- selected_group_mode,
213
- selected_group_index,
214
- selected_group_input,
215
- selected_group_output,
216
- )
217
 
218
- def generate_suggestions(description, examples, model_name, temperature):
219
- try:
220
- input_json = convert_examples_to_json(examples)
221
- model = ChatOpenAI(model=model_name, temperature=temperature, max_retries=3)
222
- generator = TaskDescriptionGenerator(model)
223
- result = generator.generate_suggestions(input_json, description)
224
- return gr.update(choices=result["suggestions"])
225
- except Exception as e:
226
- raise gr.Error(f"An error occurred: {str(e)}")
227
-
228
- def apply_suggestions(description, suggestions, examples, model_name, temperature):
229
- try:
230
- input_json = convert_examples_to_json(examples)
231
- model = ChatOpenAI(model=model_name, temperature=temperature, max_retries=3)
232
- generator = TaskDescriptionGenerator(model)
233
- result = generator.update_description(input_json, description, suggestions)
234
- return result["description"]
235
- except Exception as e:
236
- raise gr.Error(f"An error occurred: {str(e)}")
237
 
238
  with gr.Blocks(title="Meta Prompt") as demo:
239
  gr.Markdown("# Scope")
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ from app.gradio_meta_prompt_utils import *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  with gr.Blocks(title="Meta Prompt") as demo:
6
  gr.Markdown("# Scope")