baconnier commited on
Commit
a5b0627
1 Parent(s): 9653254

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -18
app.py CHANGED
@@ -167,23 +167,7 @@ class GradioInterface:
167
  )
168
  with gr.Accordion("Meta Prompt explanation", open=False):
169
  gr.Markdown(explanation_markdown)
170
- with gr.Column(elem_classes=["container", "examples-container"]):
171
- with gr.Accordion("Examples", open=False):
172
- gr.Examples(
173
- examples=[
174
- ["Write a story on the end of prompt engineering replaced by an Ai specialized in refining prompts.", "star"],
175
- ["Tell me about that guy who invented the light bulb", "physics"],
176
- ["Explain the universe.", "star"],
177
- ["What's the population of New York City and how tall is the Empire State Building and who was the first mayor?", "morphosis"],
178
- ["List American presidents.", "verse"],
179
- ["Explain why the experiment failed.", "morphosis"],
180
- ["Is nuclear energy good?", "verse"],
181
- ["How does a computer work?", "phor"],
182
- ["How to make money fast?", "done"],
183
- ["how can you prove IT0's lemma in stochastic calculus ?", "arpe"],
184
- ],
185
- inputs=[prompt_text, meta_prompt_choice]
186
- )
187
 
188
  with gr.Column(elem_classes=["container", "analysis-container"]):
189
  gr.Markdown("### Initial prompt analysis")
@@ -238,7 +222,23 @@ class GradioInterface:
238
  inputs=[prompt_text, refined_prompt, apply_model],
239
  outputs=[original_output, refined_output]
240
  )
241
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
243
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
244
  result = self.prompt_refiner.refine_prompt(input_data)
 
167
  )
168
  with gr.Accordion("Meta Prompt explanation", open=False):
169
  gr.Markdown(explanation_markdown)
170
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
 
172
  with gr.Column(elem_classes=["container", "analysis-container"]):
173
  gr.Markdown("### Initial prompt analysis")
 
222
  inputs=[prompt_text, refined_prompt, apply_model],
223
  outputs=[original_output, refined_output]
224
  )
225
+ with gr.Column(elem_classes=["container", "examples-container"]):
226
+ with gr.Accordion("Examples", open=False):
227
+ gr.Examples(
228
+ examples=[
229
+ ["Write a story on the end of prompt engineering replaced by an Ai specialized in refining prompts.", "star"],
230
+ ["Tell me about that guy who invented the light bulb", "physics"],
231
+ ["Explain the universe.", "star"],
232
+ ["What's the population of New York City and how tall is the Empire State Building and who was the first mayor?", "morphosis"],
233
+ ["List American presidents.", "verse"],
234
+ ["Explain why the experiment failed.", "morphosis"],
235
+ ["Is nuclear energy good?", "verse"],
236
+ ["How does a computer work?", "phor"],
237
+ ["How to make money fast?", "done"],
238
+ ["how can you prove IT0's lemma in stochastic calculus ?", "arpe"],
239
+ ],
240
+ inputs=[prompt_text, meta_prompt_choice]
241
+ )
242
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
243
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
244
  result = self.prompt_refiner.refine_prompt(input_data)