baconnier commited on
Commit
8100125
1 Parent(s): 5a1ecf5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -129
app.py CHANGED
@@ -150,6 +150,134 @@ class PromptRefiner:
150
 
151
  except Exception as e:
152
  return f"Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  custom_css = """
154
  .container {
155
  border: 2px solid #2196F3;
@@ -323,134 +451,6 @@ custom_css = """
323
  border-radius: 8px !important;
324
  }
325
  """
326
- class GradioInterface:
327
- def __init__(self, prompt_refiner: PromptRefiner):
328
- self.prompt_refiner = prompt_refiner
329
- custom_css = custom_css
330
- with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
331
- with gr.Column(elem_classes=["container", "title-container"]):
332
- gr.Markdown("# PROMPT++")
333
- gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
334
- gr.Markdown("Learn how to generate an improved version of your prompts.")
335
-
336
- with gr.Column(elem_classes=["container", "input-container"]):
337
- prompt_text = gr.Textbox(
338
- label="Type your prompt (or let it empty to see metaprompt)",
339
- # elem_classes="no-background",
340
- #elem_classes="container2",
341
- lines=5
342
- )
343
- meta_prompt_choice = gr.Radio(
344
- ["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"],
345
- label="Choose Meta Prompt",
346
- value="star",
347
- elem_classes=["no-background", "radio-group"]
348
- # elem_classes=[ "radio-group"]
349
- )
350
- refine_button = gr.Button("Refine Prompt")
351
-
352
- # Option 1: Put Examples here (before Meta Prompt explanation)
353
- with gr.Row(elem_classes=["container2"]):
354
- with gr.Accordion("Examples", open=False):
355
- gr.Examples(
356
- examples=[
357
- ["Write a story on the end of prompt engineering replaced by an Ai specialized in refining prompts.", "star"],
358
- ["Tell me about that guy who invented the light bulb", "physics"],
359
- ["Explain the universe.", "star"],
360
- ["What's the population of New York City and how tall is the Empire State Building and who was the first mayor?", "morphosis"],
361
- ["List American presidents.", "verse"],
362
- ["Explain why the experiment failed.", "morphosis"],
363
- ["Is nuclear energy good?", "verse"],
364
- ["How does a computer work?", "phor"],
365
- ["How to make money fast?", "done"],
366
- ["how can you prove IT0's lemma in stochastic calculus ?", "arpe"],
367
- ],
368
- inputs=[prompt_text, meta_prompt_choice]
369
- )
370
-
371
- with gr.Accordion("Meta Prompt explanation", open=False):
372
- gr.Markdown(explanation_markdown)
373
-
374
-
375
-
376
- # Option 2: Or put Examples here (after the button)
377
- # with gr.Accordion("Examples", open=False):
378
- # gr.Examples(...)
379
-
380
- with gr.Column(elem_classes=["container", "analysis-container"]):
381
- gr.Markdown(' ')
382
- gr.Markdown("### Initial prompt analysis")
383
- analysis_evaluation = gr.Markdown()
384
- gr.Markdown("### Refined Prompt")
385
- refined_prompt = gr.Textbox(
386
- label="Refined Prompt",
387
- interactive=True,
388
- show_label=True, # Must be True for copy button to show
389
- show_copy_button=True, # Adds the copy button
390
- # elem_classes="no-background"
391
- )
392
- gr.Markdown("### Explanation of Refinements")
393
- explanation_of_refinements = gr.Markdown()
394
-
395
-
396
- with gr.Column(elem_classes=["container", "model-container"]):
397
- # gr.Markdown("## See MetaPrompt Impact")
398
- with gr.Row():
399
- apply_model = gr.Dropdown(models,
400
- value="meta-llama/Llama-3.1-8B-Instruct",
401
- label="Choose the Model",
402
- container=False, # This removes the container around the dropdown
403
- scale=1, # Controls the width relative to other components
404
- min_width=300 # Sets minimum width in pixels
405
- # elem_classes="no-background"
406
- )
407
- apply_button = gr.Button("Apply MetaPrompt")
408
-
409
- # with gr.Column(elem_classes=["container", "results-container"]):
410
- gr.Markdown("### Prompts on choosen model")
411
- with gr.Tabs():
412
- with gr.TabItem("Original Prompt Output"):
413
- original_output = gr.Markdown()
414
- with gr.TabItem("Refined Prompt Output"):
415
- refined_output = gr.Markdown()
416
- with gr.Accordion("Full Response JSON", open=False, visible=True):
417
- full_response_json = gr.JSON()
418
-
419
- refine_button.click(
420
- fn=self.refine_prompt,
421
- inputs=[prompt_text, meta_prompt_choice],
422
- outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
423
- )
424
-
425
- apply_button.click(
426
- fn=self.apply_prompts,
427
- inputs=[prompt_text, refined_prompt, apply_model],
428
- outputs=[original_output, refined_output]
429
- )
430
-
431
- def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
432
- input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
433
- # Since result is a tuple with 4 elements based on the return value of prompt_refiner.refine_prompt
434
- initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = self.prompt_refiner.refine_prompt(input_data)
435
-
436
- analysis_evaluation = f"\n\n{initial_prompt_evaluation}"
437
- return (
438
- analysis_evaluation,
439
- refined_prompt,
440
- explanation_refinements,
441
- full_response
442
- )
443
-
444
- def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
445
- original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
446
- refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
447
- return original_output, refined_output
448
-
449
- def launch(self, share=False):
450
- self.interface.launch(share=share)
451
-
452
-
453
-
454
 
455
  metaprompt_explanations = {
456
  "star": "Use ECHO when you need a comprehensive, multi-stage approach for complex prompts. It's ideal for tasks requiring in-depth analysis, exploration of multiple alternatives, and synthesis of ideas. Choose this over others when you have time for a thorough refinement process and need to consider various aspects of the prompt.",
@@ -508,5 +508,5 @@ if __name__ == '__main__':
508
  autoregressive_metaprompt = os.getenv('autoregressive_metaprompt')
509
 
510
  prompt_refiner = PromptRefiner(api_token)
511
- gradio_interface = GradioInterface(prompt_refiner)
512
  gradio_interface.launch(share=True)
 
150
 
151
  except Exception as e:
152
  return f"Error: {str(e)}"
153
+
154
+ class GradioInterface:
155
+ def __init__(self, prompt_refiner: PromptRefiner,custom_css):
156
+ self.prompt_refiner = prompt_refiner
157
+ custom_css = custom_css
158
+ with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
159
+ with gr.Column(elem_classes=["container", "title-container"]):
160
+ gr.Markdown("# PROMPT++")
161
+ gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
162
+ gr.Markdown("Learn how to generate an improved version of your prompts.")
163
+
164
+ with gr.Column(elem_classes=["container", "input-container"]):
165
+ prompt_text = gr.Textbox(
166
+ label="Type your prompt (or let it empty to see metaprompt)",
167
+ # elem_classes="no-background",
168
+ #elem_classes="container2",
169
+ lines=5
170
+ )
171
+ meta_prompt_choice = gr.Radio(
172
+ ["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"],
173
+ label="Choose Meta Prompt",
174
+ value="star",
175
+ elem_classes=["no-background", "radio-group"]
176
+ # elem_classes=[ "radio-group"]
177
+ )
178
+ refine_button = gr.Button("Refine Prompt")
179
+
180
+ # Option 1: Put Examples here (before Meta Prompt explanation)
181
+ with gr.Row(elem_classes=["container2"]):
182
+ with gr.Accordion("Examples", open=False):
183
+ gr.Examples(
184
+ examples=[
185
+ ["Write a story on the end of prompt engineering replaced by an Ai specialized in refining prompts.", "star"],
186
+ ["Tell me about that guy who invented the light bulb", "physics"],
187
+ ["Explain the universe.", "star"],
188
+ ["What's the population of New York City and how tall is the Empire State Building and who was the first mayor?", "morphosis"],
189
+ ["List American presidents.", "verse"],
190
+ ["Explain why the experiment failed.", "morphosis"],
191
+ ["Is nuclear energy good?", "verse"],
192
+ ["How does a computer work?", "phor"],
193
+ ["How to make money fast?", "done"],
194
+ ["how can you prove IT0's lemma in stochastic calculus ?", "arpe"],
195
+ ],
196
+ inputs=[prompt_text, meta_prompt_choice]
197
+ )
198
+
199
+ with gr.Accordion("Meta Prompt explanation", open=False):
200
+ gr.Markdown(explanation_markdown)
201
+
202
+
203
+
204
+ # Option 2: Or put Examples here (after the button)
205
+ # with gr.Accordion("Examples", open=False):
206
+ # gr.Examples(...)
207
+
208
+ with gr.Column(elem_classes=["container", "analysis-container"]):
209
+ gr.Markdown(' ')
210
+ gr.Markdown("### Initial prompt analysis")
211
+ analysis_evaluation = gr.Markdown()
212
+ gr.Markdown("### Refined Prompt")
213
+ refined_prompt = gr.Textbox(
214
+ label="Refined Prompt",
215
+ interactive=True,
216
+ show_label=True, # Must be True for copy button to show
217
+ show_copy_button=True, # Adds the copy button
218
+ # elem_classes="no-background"
219
+ )
220
+ gr.Markdown("### Explanation of Refinements")
221
+ explanation_of_refinements = gr.Markdown()
222
+
223
+
224
+ with gr.Column(elem_classes=["container", "model-container"]):
225
+ # gr.Markdown("## See MetaPrompt Impact")
226
+ with gr.Row():
227
+ apply_model = gr.Dropdown(models,
228
+ value="meta-llama/Llama-3.1-8B-Instruct",
229
+ label="Choose the Model",
230
+ container=False, # This removes the container around the dropdown
231
+ scale=1, # Controls the width relative to other components
232
+ min_width=300 # Sets minimum width in pixels
233
+ # elem_classes="no-background"
234
+ )
235
+ apply_button = gr.Button("Apply MetaPrompt")
236
+
237
+ # with gr.Column(elem_classes=["container", "results-container"]):
238
+ gr.Markdown("### Prompts on choosen model")
239
+ with gr.Tabs():
240
+ with gr.TabItem("Original Prompt Output"):
241
+ original_output = gr.Markdown()
242
+ with gr.TabItem("Refined Prompt Output"):
243
+ refined_output = gr.Markdown()
244
+ with gr.Accordion("Full Response JSON", open=False, visible=True):
245
+ full_response_json = gr.JSON()
246
+
247
+ refine_button.click(
248
+ fn=self.refine_prompt,
249
+ inputs=[prompt_text, meta_prompt_choice],
250
+ outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
251
+ )
252
+
253
+ apply_button.click(
254
+ fn=self.apply_prompts,
255
+ inputs=[prompt_text, refined_prompt, apply_model],
256
+ outputs=[original_output, refined_output]
257
+ )
258
+
259
+ def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
260
+ input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
261
+ # Since result is a tuple with 4 elements based on the return value of prompt_refiner.refine_prompt
262
+ initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = self.prompt_refiner.refine_prompt(input_data)
263
+
264
+ analysis_evaluation = f"\n\n{initial_prompt_evaluation}"
265
+ return (
266
+ analysis_evaluation,
267
+ refined_prompt,
268
+ explanation_refinements,
269
+ full_response
270
+ )
271
+
272
+ def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
273
+ original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
274
+ refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
275
+ return original_output, refined_output
276
+
277
+ def launch(self, share=False):
278
+ self.interface.launch(share=share)
279
+
280
+
281
  custom_css = """
282
  .container {
283
  border: 2px solid #2196F3;
 
451
  border-radius: 8px !important;
452
  }
453
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454
 
455
  metaprompt_explanations = {
456
  "star": "Use ECHO when you need a comprehensive, multi-stage approach for complex prompts. It's ideal for tasks requiring in-depth analysis, exploration of multiple alternatives, and synthesis of ideas. Choose this over others when you have time for a thorough refinement process and need to consider various aspects of the prompt.",
 
508
  autoregressive_metaprompt = os.getenv('autoregressive_metaprompt')
509
 
510
  prompt_refiner = PromptRefiner(api_token)
511
+ gradio_interface = GradioInterface(prompt_refiner,custom_css)
512
  gradio_interface.launch(share=True)