baconnier commited on
Commit
2819e15
1 Parent(s): 0bb3006

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +485 -440
app.py CHANGED
@@ -1,462 +1,507 @@
1
  import os
2
  import json
3
- from typing import Optional, Dict, List
4
- from pydantic import BaseModel, Field, ValidationError
5
  from huggingface_hub import InferenceClient
6
- from huggingface_hub.errors import HfHubHTTPError
7
  import gradio as gr
 
 
 
8
 
9
- # Define Pydantic models for input and output
10
  class PromptInput(BaseModel):
11
- text: str = Field(..., description="The initial prompt text")
12
- meta_prompt_choice: str = Field(..., description="Choice of meta prompt strategy")
13
-
14
- class LLMRequest(BaseModel):
15
- model: str
16
- messages: List[Dict[str, str]]
17
- max_tokens: int = 2000
18
- temperature: float = 0.8
19
-
20
- class LLMResponseChoice(BaseModel):
21
- message: Dict[str, str]
22
-
23
- class LLMResponse(BaseModel):
24
- choices: List[LLMResponseChoice]
25
 
26
  class RefinementOutput(BaseModel):
27
- initial_prompt_evaluation: Optional[str] = None
28
- refined_prompt: Optional[str] = None
29
- explanation_of_refinements: Optional[str] = None
30
- response_content: Optional[str] = None
 
31
 
32
  class PromptRefiner:
33
- def __init__(self, api_token: str):
34
- self.client = InferenceClient(token=api_token, timeout=300)
35
- self.meta_prompts = {
36
- "morphosis": original_meta_prompt,
37
- "verse": new_meta_prompt,
38
- "physics": metaprompt1,
39
- "bolism": loic_metaprompt,
40
- "done": metadone,
41
- "star": echo_prompt_refiner,
42
- "math": math_meta_prompt,
43
- "arpe": autoregressive_metaprompt
44
- }
45
-
46
- def refine_prompt(self, prompt_input: PromptInput) -> tuple:
47
- try:
48
- selected_meta_prompt = self.meta_prompts.get(prompt_input.meta_prompt_choice, advanced_meta_prompt)
49
- messages = [
50
- {"role": "system", "content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more detailed.'},
51
- {"role": "user", "content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt_input.text)}
52
- ]
53
-
54
- request_data = LLMRequest(
55
- model=prompt_refiner_model,
56
- messages=messages
57
- )
58
-
59
- response = self.client.chat_completion(**request_data.dict())
60
- response_data = LLMResponse(**response)
61
-
62
- # Extract the content from the response
63
- response_content = response_data.choices[0].message['content'].strip()
64
-
65
- # Parse the response content using Pydantic
66
- result = self._parse_response_content(response_content)
67
-
68
- return (
69
- result.initial_prompt_evaluation,
70
- result.refined_prompt,
71
- result.explanation_of_refinements,
72
- result.dict()
73
- )
74
-
75
- except (HfHubHTTPError, ValidationError) as e:
76
- return (
77
- f"Error: {str(e)}",
78
- "The selected model is currently experiencing high traffic.",
79
- "The selected model is currently experiencing high traffic.",
80
- {}
81
- )
82
- except Exception as e:
83
- return (
84
- f"Error: {str(e)}",
85
- "",
86
- "An unexpected error occurred.",
87
- {}
88
- )
89
-
90
- def _parse_response_content(self, response_content: str) -> RefinementOutput:
91
- try:
92
- # Assume the response content is a valid JSON string
93
- response_dict = json.loads(response_content)
94
- return RefinementOutput(**response_dict)
95
- except (json.JSONDecodeError, ValidationError) as e:
96
- print(f"Error parsing response: {e}")
97
- print(f"Raw content: {response_content}")
98
- return RefinementOutput(
99
- initial_prompt_evaluation="Error parsing response",
100
- explanation_of_refinements=str(e),
101
- response_content=response_content
102
- )
103
-
104
- def apply_prompt(self, prompt: str, model: str) -> str:
105
- try:
106
- messages = [
107
- {"role": "system", "content": "You are a helpful assistant. Answer in stylized version with latex format or markdown if relevant. Separate your answer into logical sections using level 2 headers (##) for sections and bolding (**) for subsections. Incorporate a variety of lists, headers, and text to make the answer visually appealing"},
108
- {"role": "user", "content": prompt}
109
- ]
110
-
111
- request_data = LLMRequest(
112
- model=model,
113
- messages=messages
114
- )
115
-
116
- response = self.client.chat_completion(**request_data.dict())
117
- response_data = LLMResponse(**response)
118
- output = response_data.choices[0].message['content'].strip()
119
- return output.replace('\n\n', '\n').strip()
120
-
121
- except Exception as e:
122
- return f"Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
  class GradioInterface:
125
- def __init__(self, prompt_refiner: PromptRefiner):
126
- self.prompt_refiner = prompt_refiner
127
- custom_css = """
128
- .container {
129
- border: 2px solid #2196F3;
130
- border-radius: 10px;
131
- padding: 12px;
132
- margin: 6px;
133
- background: white;
134
- position: relative;
135
- width: 100% !important;
136
- max-width: 1200px !important;
137
- margin: 0 auto 20px auto !important;
138
- }
139
-
140
- .container::before {
141
- position: absolute;
142
- top: -10px;
143
- left: 20px;
144
- background: white;
145
- padding: 0 10px;
146
- color: #2196F3;
147
- font-weight: bold;
148
- font-size: 1.2em;
149
- }
150
-
151
- .title-container {
152
- width: fit-content !important;
153
- margin: 0 auto !important;
154
- padding: 2px 40px !important;
155
- border: 1px solid #0066cc !important;
156
- border-radius: 10px !important;
157
- background-color: rgba(0, 102, 204, 0.05) !important;
158
- }
159
-
160
- .title-container * {
161
- text-align: center;
162
- margin: 0 !important;
163
- line-height: 1.2 !important;
164
- }
165
-
166
- .title-container h1 {
167
- font-size: 28px !important;
168
- margin-bottom: 1px !important;
169
- }
170
-
171
- .title-container h3 {
172
- font-size: 18px !important;
173
- margin-bottom: 1px !important;
174
- }
175
-
176
- .title-container p {
177
- font-size: 14px !important;
178
- margin-bottom: 1px !important;
179
- }
180
-
181
- .input-container::before {
182
- content: 'PROMPT REFINEMENT';
183
- }
184
-
185
- .analysis-container::before {
186
- content: 'ANALYSIS';
187
- }
188
-
189
- .model-container::before {
190
- content: 'MODEL APPLICATION';
191
- }
192
-
193
- .examples-container::before {
194
- content: 'EXAMPLES';
195
- }
196
-
197
- /* Resizable textbox */
198
- .input-container textarea {
199
- resize: vertical !important;
200
- min-height: 100px !important;
201
- max-height: 500px !important;
202
- width: 100% !important;
203
- border: 1px solid #ddd !important;
204
- border-radius: 4px !important;
205
- padding: 8px !important;
206
- transition: all 0.3s ease !important;
207
- }
208
-
209
- .input-container textarea:focus {
210
- border-color: #2196F3 !important;
211
- box-shadow: 0 0 0 2px rgba(33, 150, 243, 0.1) !important;
212
- }
213
-
214
- /* Radio group styling */
215
- .radio-group {
216
- background-color: rgba(0, 102, 204, 0.05) !important;
217
- padding: 10px !important;
218
- border-radius: 8px !important;
219
- border: 1px solid rgba(0, 102, 204, 0.1) !important;
220
- display: flex !important;
221
- justify-content: center !important;
222
- flex-wrap: wrap !important;
223
- gap: 8px !important;
224
- width: 100% !important;
225
- }
226
-
227
- .gradio-radio {
228
- display: flex !important;
229
- justify-content: center !important;
230
- flex-wrap: wrap !important;
231
- gap: 8px !important;
232
- }
233
-
234
- .gradio-radio label {
235
- display: flex !important;
236
- align-items: center !important;
237
- padding: 6px 12px !important;
238
- border: 1px solid #ddd !important;
239
- border-radius: 4px !important;
240
- cursor: pointer !important;
241
- background: white !important;
242
- margin: 4px !important;
243
- }
244
-
245
- .gradio-radio input[type="radio"]:checked + label {
246
- background: rgba(0, 102, 204, 0.1) !important;
247
- border-color: #0066cc !important;
248
- color: #0066cc !important;
249
- font-weight: bold !important;
250
- }
251
-
252
- /* Button styling */
253
- .gradio-button {
254
- background-color: white !important;
255
- color: #2196F3 !important;
256
- border: 2px solid #2196F3 !important;
257
- border-radius: 4px !important;
258
- padding: 8px 16px !important;
259
- margin: 10px 0 !important;
260
- font-weight: bold !important;
261
- transition: all 0.3s ease !important;
262
- }
263
-
264
- .gradio-button:hover {
265
- background-color: #2196F3 !important;
266
- color: white !important;
267
- box-shadow: 0 2px 5px rgba(33, 150, 243, 0.3) !important;
268
- }
269
-
270
- /* Accordion styling */
271
- .gradio-accordion {
272
- margin: 10px 0 !important;
273
- border: none !important;
274
- }
275
-
276
- /* Container alignment */
277
- .gradio-container {
278
- display: flex !important;
279
- flex-direction: column !important;
280
- align-items: center !important;
281
- width: 100% !important;
282
- max-width: 1200px !important;
283
- margin: 0 auto !important;
284
- }
285
-
286
- /* Dropdown styling */
287
- .gradio-dropdown {
288
- width: 100% !important;
289
- max-width: 300px !important;
290
- }
291
-
292
- /* JSON container */
293
- .full-response-json {
294
- margin-top: 20px !important;
295
- padding: 10px !important;
296
- background-color: rgba(0, 102, 204, 0.05) !important;
297
- border-radius: 8px !important;
298
- }
299
- """
300
-
301
- with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
302
- with gr.Column(elem_classes=["container", "title-container"]):
303
- gr.Markdown("# PROMPT++")
304
- gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
305
- gr.Markdown("Learn how to generate an improved version of your prompts.")
306
-
307
- with gr.Column(elem_classes=["container", "input-container"]):
308
- prompt_text = gr.Textbox(
309
- label="Type your prompt (or let it empty to see metaprompt)",
310
- lines=5
311
- )
312
- meta_prompt_choice = gr.Radio(
313
- ["star", "done", "physics", "morphosis", "verse", "phor", "bolism", "math", "arpe"],
314
- label="Choose Meta Prompt",
315
- value="star",
316
- elem_classes=["no-background", "radio-group"]
317
- )
318
- refine_button = gr.Button("Refine Prompt")
319
-
320
- with gr.Row(elem_classes=["container2"]):
321
- with gr.Accordion("Examples", open=False):
322
- gr.Examples(
323
- examples=[
324
- ["Write a story on the end of prompt engineering replaced by an Ai specialized in refining prompts.", "star"],
325
- ["Tell me about that guy who invented the light bulb", "physics"],
326
- ["Explain the universe.", "star"],
327
- ["What's the population of New York City and how tall is the Empire State Building and who was the first mayor?", "morphosis"],
328
- ["List American presidents.", "verse"],
329
- ["Explain why the experiment failed.", "morphosis"],
330
- ["Is nuclear energy good?", "verse"],
331
- ["How does a computer work?", "phor"],
332
- ["How to make money fast?", "done"],
333
- ["how can you prove IT0's lemma in stochastic calculus ?", "arpe"],
334
- ],
335
- inputs=[prompt_text, meta_prompt_choice]
336
- )
337
-
338
- with gr.Accordion("Meta Prompt explanation", open=False):
339
- gr.Markdown(explanation_markdown)
340
-
341
- with gr.Column(elem_classes=["container", "analysis-container"]):
342
- gr.Markdown(' ')
343
- gr.Markdown("### Initial prompt analysis")
344
- analysis_evaluation = gr.Markdown()
345
- gr.Markdown("### Refined Prompt")
346
- refined_prompt = gr.Textbox(
347
- label="Refined Prompt",
348
- interactive=True,
349
- show_label=True,
350
- show_copy_button=True
351
- )
352
- gr.Markdown("### Explanation of Refinements")
353
- explanation_of_refinements = gr.Markdown()
354
-
355
- with gr.Column(elem_classes=["container", "model-container"]):
356
- with gr.Row():
357
- apply_model = gr.Dropdown(models,
 
 
 
 
 
 
 
 
 
 
 
 
 
358
  value="meta-llama/Llama-3.1-8B-Instruct",
359
  label="Choose the Model",
360
- container=False,
361
- scale=1,
362
- min_width=300
363
- )
364
- apply_button = gr.Button("Apply MetaPrompt")
365
-
366
- gr.Markdown("### Prompts on chosen model")
367
- with gr.Tabs():
368
- with gr.TabItem("Original Prompt Output"):
369
- original_output = gr.Markdown()
370
- with gr.TabItem("Refined Prompt Output"):
371
- refined_output = gr.Markdown()
372
- with gr.Accordion("Full Response JSON", open=False, visible=True):
373
- full_response_json = gr.JSON()
374
-
375
- refine_button.click(
376
- fn=self.refine_prompt,
377
- inputs=[prompt_text, meta_prompt_choice],
378
- outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
379
- )
380
-
381
- apply_button.click(
382
- fn=self.apply_prompts,
383
- inputs=[prompt_text, refined_prompt, apply_model],
384
- outputs=[original_output, refined_output]
385
- )
386
-
387
- def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
388
- try:
389
- input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
390
- initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = self.prompt_refiner.refine_prompt(input_data)
391
-
392
- analysis_evaluation = f"\n\n{initial_prompt_evaluation}"
393
- return (
394
- analysis_evaluation,
395
- refined_prompt,
396
- explanation_refinements,
397
- full_response
398
- )
399
- except ValidationError as e:
400
- return (
401
- f"Validation Error: {str(e)}",
402
- "",
403
- "",
404
- {}
405
- )
406
-
407
- def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
408
- original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
409
- refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
410
- return original_output, refined_output
411
-
412
- def launch(self, share=False):
413
- self.interface.launch(share=share)
414
 
415
  metaprompt_explanations = {
416
- "star": "Use ECHO when you need a comprehensive, multi-stage approach for complex prompts. It's ideal for tasks requiring in-depth analysis, exploration of multiple alternatives, and synthesis of ideas. Choose this over others when you have time for a thorough refinement process and need to consider various aspects of the prompt.",
417
- "done": "Opt for this when you want a structured approach with emphasis on role-playing and advanced techniques. It's particularly useful for tasks that benefit from diverse perspectives and complex reasoning. Prefer this over 'physics' when you need a more detailed, step-by-step refinement process.",
418
- "physics": "Select this when you need a balance between structure and advanced techniques, with a focus on role-playing. It's similar to 'done' but may be more suitable for scientific or technical prompts. Choose this over 'done' for a slightly less complex approach.",
419
- "morphosis": "Use this simplified approach for straightforward prompts or when time is limited. It focuses on essential improvements without complex techniques. Prefer this over other methods when you need quick, clear refinements without extensive analysis.",
420
- "verse": "Choose this method when you need to analyze and improve a prompt's strengths and weaknesses, with a focus on information flow. It's particularly useful for enhancing the logical structure of prompts. Use this over 'morphosis' when you need more depth but less complexity than 'star'.",
421
- "phor": "Employ this advanced approach when you need to combine multiple prompt engineering techniques. It's ideal for complex tasks requiring both clarity and sophisticated prompting methods. Select this over 'star' when you want a more flexible, technique-focused approach.",
422
- "bolism": "Utilize this method when working with autoregressive language models and when the task requires careful reasoning before conclusions. It's best for prompts that need detailed output formatting. Choose this over others when the prompt's structure and reasoning order are crucial."
423
  }
424
 
425
- models = [
426
- "meta-llama/Meta-Llama-3-70B-Instruct",
427
- "meta-llama/Meta-Llama-3-8B-Instruct",
428
- "meta-llama/Llama-3.1-70B-Instruct",
429
- "meta-llama/Llama-3.1-8B-Instruct",
430
- "meta-llama/Llama-3.2-3B-Instruct",
431
- "meta-llama/Llama-3.2-1B-Instruct",
432
- "meta-llama/Llama-2-13b-chat-hf",
433
- "meta-llama/Llama-2-7b-chat-hf",
434
- "HuggingFaceH4/zephyr-7b-beta",
435
- "HuggingFaceH4/zephyr-7b-alpha",
436
- "Qwen/Qwen2.5-72B-Instruct",
437
- "Qwen/Qwen2.5-1.5B",
438
- "google/gemma-1.1-2b-it"
439
- ]
 
 
 
 
 
 
 
440
 
441
  explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
442
 
443
  if __name__ == '__main__':
444
- api_token = os.getenv('HF_API_TOKEN')
445
- if not api_token:
446
- raise ValueError("HF_API_TOKEN not found in environment variables")
447
-
448
- # Load environment variables for meta prompts
449
- metadone = os.getenv('metadone')
450
- prompt_refiner_model = os.getenv('prompt_refiner_model')
451
- echo_prompt_refiner = os.getenv('echo_prompt_refiner')
452
- metaprompt1 = os.getenv('metaprompt1')
453
- loic_metaprompt = os.getenv('loic_metaprompt')
454
- original_meta_prompt = os.getenv('original_meta_prompt')
455
- new_meta_prompt = os.getenv('new_meta_prompt')
456
- advanced_meta_prompt = os.getenv('advanced_meta_prompt')
457
- math_meta_prompt = os.getenv('metamath')
458
- autoregressive_metaprompt = os.getenv('autoregressive_metaprompt')
459
-
460
- prompt_refiner = PromptRefiner(api_token)
461
- gradio_interface = GradioInterface(prompt_refiner)
462
- gradio_interface.launch(share=True)
 
 
1
  import os
2
  import json
3
+ import re
 
4
  from huggingface_hub import InferenceClient
 
5
  import gradio as gr
6
+ from pydantic import BaseModel, Field
7
+ from typing import Optional, Literal
8
+ from huggingface_hub.errors import HfHubHTTPError
9
 
 
10
  class PromptInput(BaseModel):
11
+ text: str = Field(..., description="The initial prompt text")
12
+ meta_prompt_choice: Literal["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"] = Field(..., description="Choice of meta prompt strategy")
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  class RefinementOutput(BaseModel):
15
+ query_analysis: Optional[str] = None
16
+ initial_prompt_evaluation: Optional[str] = None
17
+ refined_prompt: Optional[str] = None
18
+ explanation_of_refinements: Optional[str] = None
19
+ raw_content: Optional[str] = None
20
 
21
  class PromptRefiner:
22
+ def __init__(self, api_token: str):
23
+ self.client = InferenceClient(token=api_token, timeout=300)
24
+ self.meta_prompts = {
25
+ "morphosis": original_meta_prompt,
26
+ "verse": new_meta_prompt,
27
+ "physics": metaprompt1,
28
+ "bolism": loic_metaprompt,
29
+ "done": metadone,
30
+ "star": echo_prompt_refiner,
31
+ "math": math_meta_prompt,
32
+ "arpe": autoregressive_metaprompt
33
+ }
34
+
35
+ def refine_prompt(self, prompt_input: PromptInput) -> tuple:
36
+ try:
37
+ # Select meta prompt using dictionary instead of if-elif chain
38
+ selected_meta_prompt = self.meta_prompts.get(
39
+ prompt_input.meta_prompt_choice,
40
+ advanced_meta_prompt
41
+ )
42
+
43
+ messages = [
44
+ {
45
+ "role": "system",
46
+ "content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more detailed.'
47
+ },
48
+ {
49
+ "role": "user",
50
+ "content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt_input.text)
51
+ }
52
+ ]
53
+
54
+ response = self.client.chat_completion(
55
+ model=prompt_refiner_model,
56
+ messages=messages,
57
+ max_tokens=2000,
58
+ temperature=0.8
59
+ )
60
+
61
+ response_content = response.choices[0].message.content.strip()
62
+
63
+ # Parse the response
64
+ result = self._parse_response(response_content)
65
+
66
+ return (
67
+ result.get('initial_prompt_evaluation', ''),
68
+ result.get('refined_prompt', ''),
69
+ result.get('explanation_of_refinements', ''),
70
+ result
71
+ )
72
+
73
+ except HfHubHTTPError as e:
74
+ return (
75
+ "Error: Model timeout. Please try again later.",
76
+ "The selected model is currently experiencing high traffic.",
77
+ "The selected model is currently experiencing high traffic.",
78
+ {}
79
+ )
80
+ except Exception as e:
81
+ return (
82
+ f"Error: {str(e)}",
83
+ "",
84
+ "An unexpected error occurred.",
85
+ {}
86
+ )
87
+
88
+ def _parse_response(self, response_content: str) -> dict:
89
+ try:
90
+ # Try to find JSON in response
91
+ json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL)
92
+ if json_match:
93
+ json_str = json_match.group(1)
94
+ json_str = re.sub(r'\n\s*', ' ', json_str)
95
+ json_str = json_str.replace('"', '\\"')
96
+ json_output = json.loads(f'"{json_str}"')
97
+
98
+ if isinstance(json_output, str):
99
+ json_output = json.loads(json_output)
100
+ output={
101
+ key: value.replace('\\"', '"') if isinstance(value, str) else value
102
+ for key, value in json_output.items()
103
+ }
104
+ output['response_content']=json_output
105
+ # Clean up JSON values
106
+ return output
107
+
108
+ # Fallback to regex parsing if no JSON found
109
+ output = {}
110
+ for key in ["initial_prompt_evaluation", "refined_prompt", "explanation_of_refinements"]:
111
+ pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})'
112
+ match = re.search(pattern, response_content, re.DOTALL)
113
+ output[key] = match.group(1).replace('\\n', '\n').replace('\\"', '"') if match else ""
114
+ output['response_content']=response_content
115
+ return output
116
+
117
+ except (json.JSONDecodeError, ValueError) as e:
118
+ print(f"Error parsing response: {e}")
119
+ print(f"Raw content: {response_content}")
120
+ return {
121
+ "initial_prompt_evaluation": "Error parsing response",
122
+ "refined_prompt": "",
123
+ "explanation_of_refinements": str(e),
124
+ 'response_content':str(e)
125
+ }
126
+
127
+ def apply_prompt(self, prompt: str, model: str) -> str:
128
+ try:
129
+ messages = [
130
+ {
131
+ "role": "system",
132
+ "content": "You are a helpful assistant. Answer in stylized version with latex format or markdown if relevant. Separate your answer into logical sections using level 2 headers (##) for sections and bolding (**) for subsections. Incorporate a variety of lists, headers, and text to make the answer visually appealing"
133
+ },
134
+ {
135
+ "role": "user",
136
+ "content": prompt
137
+ }
138
+ ]
139
+
140
+ response = self.client.chat_completion(
141
+ model=model,
142
+ messages=messages,
143
+ max_tokens=2000,
144
+ temperature=0.8
145
+ )
146
+
147
+ output = response.choices[0].message.content.strip()
148
+ return output.replace('\n\n', '\n').strip()
149
+
150
+ except Exception as e:
151
+ return f"Error: {str(e)}"
152
 
153
  class GradioInterface:
154
+ def __init__(self, prompt_refiner: PromptRefiner):
155
+ self.prompt_refiner = prompt_refiner
156
+ custom_css = """
157
+ .container {
158
+ border: 2px solid #2196F3;
159
+ border-radius: 10px;
160
+ padding: 12px;
161
+ margin: 6px;
162
+ background: white;
163
+ position: relative;
164
+ width: 100% !important;
165
+ max-width: 1200px !important;
166
+ margin: 0 auto 20px auto !important;
167
+ }
168
+
169
+ .container::before {
170
+ position: absolute;
171
+ top: -10px;
172
+ left: 20px;
173
+ background: white;
174
+ padding: 0 10px;
175
+ color: #2196F3;
176
+ font-weight: bold;
177
+ font-size: 1.2em;
178
+ }
179
+
180
+ .title-container {
181
+ width: fit-content !important;
182
+ margin: 0 auto !important;
183
+ padding: 2px 40px !important;
184
+ border: 1px solid #0066cc !important;
185
+ border-radius: 10px !important;
186
+ background-color: rgba(0, 102, 204, 0.05) !important;
187
+ }
188
+
189
+ .title-container * {
190
+ text-align: center;
191
+ margin: 0 !important;
192
+ line-height: 1.2 !important;
193
+ }
194
+
195
+ .title-container h1 {
196
+ font-size: 28px !important;
197
+ margin-bottom: 1px !important;
198
+ }
199
+
200
+ .title-container h3 {
201
+ font-size: 18px !important;
202
+ margin-bottom: 1px !important;
203
+ }
204
+
205
+ .title-container p {
206
+ font-size: 14px !important;
207
+ margin-bottom: 1px !important;
208
+ }
209
+
210
+ .input-container::before {
211
+ content: 'PROMPT REFINEMENT';
212
+ }
213
+
214
+ .analysis-container::before {
215
+ content: 'ANALYSIS';
216
+ }
217
+
218
+ .model-container::before {
219
+ content: 'MODEL APPLICATION';
220
+ }
221
+
222
+ .examples-container::before {
223
+ content: 'EXAMPLES';
224
+ }
225
+
226
+ /* Resizable textbox */
227
+ .input-container textarea {
228
+ resize: vertical !important;
229
+ min-height: 100px !important;
230
+ max-height: 500px !important;
231
+ width: 100% !important;
232
+ border: 1px solid #ddd !important;
233
+ border-radius: 4px !important;
234
+ padding: 8px !important;
235
+ transition: all 0.3s ease !important;
236
+ }
237
+
238
+ .input-container textarea:focus {
239
+ border-color: #2196F3 !important;
240
+ box-shadow: 0 0 0 2px rgba(33, 150, 243, 0.1) !important;
241
+ }
242
+
243
+ /* Radio group styling */
244
+ .radio-group {
245
+ background-color: rgba(0, 102, 204, 0.05) !important;
246
+ padding: 10px !important;
247
+ border-radius: 8px !important;
248
+ border: 1px solid rgba(0, 102, 204, 0.1) !important;
249
+ display: flex !important;
250
+ justify-content: center !important;
251
+ flex-wrap: wrap !important;
252
+ gap: 8px !important;
253
+ width: 100% !important;
254
+ }
255
+
256
+ .gradio-radio {
257
+ display: flex !important;
258
+ justify-content: center !important;
259
+ flex-wrap: wrap !important;
260
+ gap: 8px !important;
261
+ }
262
+
263
+ .gradio-radio label {
264
+ display: flex !important;
265
+ align-items: center !important;
266
+ padding: 6px 12px !important;
267
+ border: 1px solid #ddd !important;
268
+ border-radius: 4px !important;
269
+ cursor: pointer !important;
270
+ background: white !important;
271
+ margin: 4px !important;
272
+ }
273
+
274
+ .gradio-radio input[type="radio"]:checked + label {
275
+ background: rgba(0, 102, 204, 0.1) !important;
276
+ border-color: #0066cc !important;
277
+ color: #0066cc !important;
278
+ font-weight: bold !important;
279
+ }
280
+
281
+ /* Button styling */
282
+ .gradio-button {
283
+ background-color: white !important;
284
+ color: #2196F3 !important;
285
+ border: 2px solid #2196F3 !important;
286
+ border-radius: 4px !important;
287
+ padding: 8px 16px !important;
288
+ margin: 10px 0 !important;
289
+ font-weight: bold !important;
290
+ transition: all 0.3s ease !important;
291
+ }
292
+
293
+ .gradio-button:hover {
294
+ background-color: #2196F3 !important;
295
+ color: white !important;
296
+ box-shadow: 0 2px 5px rgba(33, 150, 243, 0.3) !important;
297
+ }
298
+
299
+ /* Accordion styling */
300
+ .gradio-accordion {
301
+ margin: 10px 0 !important;
302
+ border: none !important;
303
+ }
304
+
305
+ /* Container alignment */
306
+ .gradio-container {
307
+ display: flex !important;
308
+ flex-direction: column !important;
309
+ align-items: center !important;
310
+ width: 100% !important;
311
+ max-width: 1200px !important;
312
+ margin: 0 auto !important;
313
+ }
314
+
315
+ /* Dropdown styling */
316
+ .gradio-dropdown {
317
+ width: 100% !important;
318
+ max-width: 300px !important;
319
+ }
320
+
321
+ /* JSON container */
322
+ .full-response-json {
323
+ margin-top: 20px !important;
324
+ padding: 10px !important;
325
+ background-color: rgba(0, 102, 204, 0.05) !important;
326
+ border-radius: 8px !important;
327
+ }
328
+ """
329
+
330
+ with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
331
+ with gr.Column(elem_classes=["container", "title-container"]):
332
+ gr.Markdown("# PROMPT++")
333
+ gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
334
+ gr.Markdown("Learn how to generate an improved version of your prompts.")
335
+
336
+ with gr.Column(elem_classes=["container", "input-container"]):
337
+ prompt_text = gr.Textbox(
338
+ label="Type your prompt (or let it empty to see metaprompt)",
339
+ # elem_classes="no-background",
340
+ #elem_classes="container2",
341
+ lines=5
342
+ )
343
+ meta_prompt_choice = gr.Radio(
344
+ ["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"],
345
+ label="Choose Meta Prompt",
346
+ value="star",
347
+ elem_classes=["no-background", "radio-group"]
348
+ # elem_classes=[ "radio-group"]
349
+ )
350
+ refine_button = gr.Button("Refine Prompt")
351
+
352
+ # Option 1: Put Examples here (before Meta Prompt explanation)
353
+ with gr.Row(elem_classes=["container2"]):
354
+ with gr.Accordion("Examples", open=False):
355
+ gr.Examples(
356
+ examples=[
357
+ ["Write a story on the end of prompt engineering replaced by an Ai specialized in refining prompts.", "star"],
358
+ ["Tell me about that guy who invented the light bulb", "physics"],
359
+ ["Explain the universe.", "star"],
360
+ ["What's the population of New York City and how tall is the Empire State Building and who was the first mayor?", "morphosis"],
361
+ ["List American presidents.", "verse"],
362
+ ["Explain why the experiment failed.", "morphosis"],
363
+ ["Is nuclear energy good?", "verse"],
364
+ ["How does a computer work?", "phor"],
365
+ ["How to make money fast?", "done"],
366
+ ["how can you prove IT0's lemma in stochastic calculus ?", "arpe"],
367
+ ],
368
+ inputs=[prompt_text, meta_prompt_choice]
369
+ )
370
+
371
+ with gr.Accordion("Meta Prompt explanation", open=False):
372
+ gr.Markdown(explanation_markdown)
373
+
374
+
375
+
376
+ # Option 2: Or put Examples here (after the button)
377
+ # with gr.Accordion("Examples", open=False):
378
+ # gr.Examples(...)
379
+
380
+ with gr.Column(elem_classes=["container", "analysis-container"]):
381
+ gr.Markdown(' ')
382
+ gr.Markdown("### Initial prompt analysis")
383
+ analysis_evaluation = gr.Markdown()
384
+ gr.Markdown("### Refined Prompt")
385
+ refined_prompt = gr.Textbox(
386
+ label="Refined Prompt",
387
+ interactive=True,
388
+ show_label=True, # Must be True for copy button to show
389
+ show_copy_button=True, # Adds the copy button
390
+ # elem_classes="no-background"
391
+ )
392
+ gr.Markdown("### Explanation of Refinements")
393
+ explanation_of_refinements = gr.Markdown()
394
+
395
+
396
+ with gr.Column(elem_classes=["container", "model-container"]):
397
+ # gr.Markdown("## See MetaPrompt Impact")
398
+ with gr.Row():
399
+ apply_model = gr.Dropdown(models,
400
  value="meta-llama/Llama-3.1-8B-Instruct",
401
  label="Choose the Model",
402
+ container=False, # This removes the container around the dropdown
403
+ scale=1, # Controls the width relative to other components
404
+ min_width=300 # Sets minimum width in pixels
405
+ # elem_classes="no-background"
406
+ )
407
+ apply_button = gr.Button("Apply MetaPrompt")
408
+
409
+ # with gr.Column(elem_classes=["container", "results-container"]):
410
+ gr.Markdown("### Prompts on choosen model")
411
+ with gr.Tabs():
412
+ with gr.TabItem("Original Prompt Output"):
413
+ original_output = gr.Markdown()
414
+ with gr.TabItem("Refined Prompt Output"):
415
+ refined_output = gr.Markdown()
416
+ with gr.Accordion("Full Response JSON", open=False, visible=True):
417
+ full_response_json = gr.JSON()
418
+
419
+ refine_button.click(
420
+ fn=self.refine_prompt,
421
+ inputs=[prompt_text, meta_prompt_choice],
422
+ outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
423
+ )
424
+
425
+ apply_button.click(
426
+ fn=self.apply_prompts,
427
+ inputs=[prompt_text, refined_prompt, apply_model],
428
+ outputs=[original_output, refined_output]
429
+ )
430
+
431
+ def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
432
+ input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
433
+ # Since result is a tuple with 4 elements based on the return value of prompt_refiner.refine_prompt
434
+ initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = self.prompt_refiner.refine_prompt(input_data)
435
+
436
+ analysis_evaluation = f"\n\n{initial_prompt_evaluation}"
437
+ return (
438
+ analysis_evaluation,
439
+ refined_prompt,
440
+ explanation_refinements,
441
+ full_response
442
+ )
443
+
444
+ def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
445
+ original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
446
+ refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
447
+ return original_output, refined_output
448
+
449
+ def launch(self, share=False):
450
+ self.interface.launch(share=share)
 
 
 
 
 
451
 
452
  metaprompt_explanations = {
453
+ "star": "Use ECHO when you need a comprehensive, multi-stage approach for complex prompts. It's ideal for tasks requiring in-depth analysis, exploration of multiple alternatives, and synthesis of ideas. Choose this over others when you have time for a thorough refinement process and need to consider various aspects of the prompt.",
454
+ "done": "Opt for this when you want a structured approach with emphasis on role-playing and advanced techniques. It's particularly useful for tasks that benefit from diverse perspectives and complex reasoning. Prefer this over 'physics' when you need a more detailed, step-by-step refinement process.",
455
+ "physics": "Select this when you need a balance between structure and advanced techniques, with a focus on role-playing. It's similar to 'done' but may be more suitable for scientific or technical prompts. Choose this over 'done' for a slightly less complex approach.",
456
+ "morphosis": "Use this simplified approach for straightforward prompts or when time is limited. It focuses on essential improvements without complex techniques. Prefer this over other methods when you need quick, clear refinements without extensive analysis.",
457
+ "verse": "Choose this method when you need to analyze and improve a prompt's strengths and weaknesses, with a focus on information flow. It's particularly useful for enhancing the logical structure of prompts. Use this over 'morphosis' when you need more depth but less complexity than 'star'.",
458
+ "phor": "Employ this advanced approach when you need to combine multiple prompt engineering techniques. It's ideal for complex tasks requiring both clarity and sophisticated prompting methods. Select this over 'star' when you want a more flexible, technique-focused approach.",
459
+ "bolism": "Utilize this method when working with autoregressive language models and when the task requires careful reasoning before conclusions. It's best for prompts that need detailed output formatting. Choose this over others when the prompt's structure and reasoning order are crucial."
460
  }
461
 
462
+ models = [
463
+ # Meta-Llama models (all support system)
464
+ "meta-llama/Meta-Llama-3-70B-Instruct",
465
+ "meta-llama/Meta-Llama-3-8B-Instruct",
466
+ "meta-llama/Llama-3.1-70B-Instruct",
467
+ "meta-llama/Llama-3.1-8B-Instruct",
468
+ "meta-llama/Llama-3.2-3B-Instruct",
469
+ "meta-llama/Llama-3.2-1B-Instruct",
470
+ "meta-llama/Llama-2-13b-chat-hf",
471
+ "meta-llama/Llama-2-7b-chat-hf",
472
+
473
+ # HuggingFaceH4 models (support system)
474
+ "HuggingFaceH4/zephyr-7b-beta",
475
+ "HuggingFaceH4/zephyr-7b-alpha",
476
+
477
+ # Qwen models (support system)
478
+ "Qwen/Qwen2.5-72B-Instruct",
479
+ "Qwen/Qwen2.5-1.5B",
480
+
481
+ # Google models (supports system)
482
+ "google/gemma-1.1-2b-it"
483
+ ]
484
 
485
  explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
486
 
487
  if __name__ == '__main__':
488
+ meta_info=""
489
+ api_token = os.getenv('HF_API_TOKEN')
490
+ if not api_token:
491
+ raise ValueError("HF_API_TOKEN not found in environment variables")
492
+
493
+ metadone = os.getenv('metadone')
494
+ prompt_refiner_model = os.getenv('prompt_refiner_model')
495
+ echo_prompt_refiner = os.getenv('echo_prompt_refiner')
496
+ metaprompt1 = os.getenv('metaprompt1')
497
+ loic_metaprompt = os.getenv('loic_metaprompt')
498
+ openai_metaprompt = os.getenv('openai_metaprompt')
499
+ original_meta_prompt = os.getenv('original_meta_prompt')
500
+ new_meta_prompt = os.getenv('new_meta_prompt')
501
+ advanced_meta_prompt = os.getenv('advanced_meta_prompt')
502
+ math_meta_prompt = os.getenv('metamath')
503
+ autoregressive_metaprompt = os.getenv('autoregressive_metaprompt')
504
+
505
+ prompt_refiner = PromptRefiner(api_token)
506
+ gradio_interface = GradioInterface(prompt_refiner)
507
+ gradio_interface.launch(share=True)