yxmnjxzx commited on
Commit
1f0f2f0
·
verified ·
1 Parent(s): 23801de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -15
app.py CHANGED
@@ -9,9 +9,44 @@ from typing import Optional, Literal
9
  from custom_css import custom_css
10
  from variables import *
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  class PromptInput(BaseModel):
13
  text: str = Field(..., description="The initial prompt text")
14
- meta_prompt_choice: Literal["superstar","star","done","physics","morphosis", "verse", "phor","bolism","math", "math_meta"] = Field(..., description="Choice of meta prompt strategy")
15
 
16
  class RefinementOutput(BaseModel):
17
  query_analysis: Optional[str] = None
@@ -19,12 +54,52 @@ class RefinementOutput(BaseModel):
19
  refined_prompt: Optional[str] = None
20
  explanation_of_refinements: Optional[str] = None
21
  raw_content: Optional[str] = None
 
22
 
23
  class PromptRefiner:
24
  def __init__(self, api_token: str):
25
  self.client = Groq(api_key=api_key)
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  def refine_prompt(self, prompt_input: PromptInput) -> RefinementOutput:
 
 
 
 
 
 
 
 
 
 
28
  if prompt_input.meta_prompt_choice == "morphosis":
29
  selected_meta_prompt = original_meta_prompt
30
  elif prompt_input.meta_prompt_choice == "verse":
@@ -164,9 +239,9 @@ class GradioInterface:
164
  lines=5
165
  )
166
  meta_prompt_choice = gr.Radio(
167
- ["superstar", "star", "done", "physics", "morphosis", "verse", "phor","bolism","math","math_meta"],
168
  label="Choose Meta Prompt",
169
- value="star",
170
  elem_classes=["no-background", "radio-group"]
171
  # elem_classes=[ "radio-group"]
172
  )
@@ -238,13 +313,25 @@ class GradioInterface:
238
  refined_output = gr.Markdown()
239
  with gr.Accordion("Full Response JSON", open=False, visible=True):
240
  full_response_json = gr.JSON()
241
-
 
 
 
 
 
 
 
 
 
 
 
 
242
  refine_button.click(
243
  fn=self.refine_prompt,
244
  inputs=[prompt_text, meta_prompt_choice],
245
- outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
246
  )
247
-
248
  apply_button.click(
249
  fn=self.apply_prompts,
250
  inputs=[prompt_text, refined_prompt, apply_model],
@@ -255,36 +342,41 @@ class GradioInterface:
255
  try:
256
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
257
  result = self.prompt_refiner.refine_prompt(input_data)
 
 
 
258
 
259
  # Ensure all values are strings or None
260
  analysis_evaluation = str(result.initial_prompt_evaluation) if result.initial_prompt_evaluation else ""
261
  refined_prompt = str(result.refined_prompt) if result.refined_prompt else ""
262
  explanation_refinements = str(result.explanation_of_refinements) if result.explanation_of_refinements else ""
263
 
264
- # Create a safe JSON-serializable dictionary
265
  full_response = {
266
- "initial_prompt_evaluation": analysis_evaluation,
267
- "refined_prompt": refined_prompt,
268
- "explanation_of_refinements": explanation_refinements,
269
- "raw_content": str(result.raw_content) if result.raw_content else ""
 
270
  }
271
 
272
  return (
273
  analysis_evaluation,
274
  refined_prompt,
275
  explanation_refinements,
276
- full_response
 
277
  )
278
  except Exception as e:
279
- # Return safe default values in case of any error
280
  error_response = {
281
  "error": str(e),
282
  "initial_prompt_evaluation": "",
283
  "refined_prompt": "",
284
  "explanation_of_refinements": "",
285
- "raw_content": ""
 
286
  }
287
- return "", "", "", error_response
288
 
289
  def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
290
  original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
 
9
  from custom_css import custom_css
10
  from variables import *
11
 
12
+ logger = logging.getLogger(__name__)
13
+
14
+ # Add system prompt generation meta prompt
15
+ SYSTEM_META_PROMPT = = """
16
+ Given a task description or existing prompt, produce a detailed system prompt to guide a language model in completing the task effectively.
17
+ # Guidelines
18
+ - Understand the Task: Grasp the main objective, goals, requirements, constraints, and expected output.
19
+ - Minimal Changes: If an existing prompt is provided, improve it only if it's simple. For complex prompts, enhance clarity and add missing elements without altering the original structure.
20
+ - Reasoning Before Conclusions**: Encourage reasoning steps before any conclusions are reached. ATTENTION! If the user provides examples where the reasoning happens afterward, REVERSE the order! NEVER START EXAMPLES WITH CONCLUSIONS!
21
+ - Reasoning Order: Call out reasoning portions of the prompt and conclusion parts (specific fields by name). For each, determine the ORDER in which this is done, and whether it needs to be reversed.
22
+ - Conclusion, classifications, or results should ALWAYS appear last.
23
+ - Examples: Include high-quality examples if helpful, using placeholders [in brackets] for complex elements.
24
+ - What kinds of examples may need to be included, how many, and whether they are complex enough to benefit from placeholders.
25
+ - Clarity and Conciseness: Use clear, specific language. Avoid unnecessary instructions or bland statements.
26
+ - Formatting: Use markdown features for readability. DO NOT USE ``` CODE BLOCKS UNLESS SPECIFICALLY REQUESTED.
27
+ - Preserve User Content: If the input task or prompt includes extensive guidelines or examples, preserve them entirely, or as closely as possible. If they are vague, consider breaking down into sub-steps. Keep any details, guidelines, examples, variables, or placeholders provided by the user.
28
+ - Constants: DO include constants in the prompt, as they are not susceptible to prompt injection. Such as guides, rubrics, and examples.
29
+ - Output Format: Explicitly the most appropriate output format, in detail. This should include length and syntax (e.g. short sentence, paragraph, JSON, etc.)
30
+ - For tasks outputting well-defined or structured data (classification, JSON, etc.) bias toward outputting a JSON.
31
+ - JSON should never be wrapped in code blocks (```) unless explicitly requested.
32
+ The final prompt you output should adhere to the following structure below. Do not include any additional commentary, only output the completed system prompt. SPECIFICALLY, do not include any additional messages at the start or end of the prompt. (e.g. no "---")
33
+ [Concise instruction describing the task - this should be the first line in the prompt, no section header]
34
+ [Additional details as needed.]
35
+ [Optional sections with headings or bullet points for detailed steps.]
36
+ # Steps [optional]
37
+ [optional: a detailed breakdown of the steps necessary to accomplish the task]
38
+ # Output Format
39
+ [Specifically call out how the output should be formatted, be it response length, structure e.g. JSON, markdown, etc]
40
+ # Examples [optional]
41
+ [Optional: 1-3 well-defined examples with placeholders if necessary. Clearly mark where examples start and end, and what the input and output are. User placeholders as necessary.]
42
+ [If the examples are shorter than what a realistic example is expected to be, make a reference with () explaining how real examples should be longer / shorter / different. AND USE PLACEHOLDERS! ]
43
+ # Notes [optional]
44
+ [optional: edge cases, details, and an area to call or repeat out specific important considerations]
45
+ """.strip()
46
+
47
  class PromptInput(BaseModel):
48
  text: str = Field(..., description="The initial prompt text")
49
+ meta_prompt_choice: Literal["superstar","star","done","physics","morphosis", "verse", "phor","bolism","math", "math_meta", "system"] = Field(..., description="Choice of meta prompt strategy")
50
 
51
  class RefinementOutput(BaseModel):
52
  query_analysis: Optional[str] = None
 
54
  refined_prompt: Optional[str] = None
55
  explanation_of_refinements: Optional[str] = None
56
  raw_content: Optional[str] = None
57
+ system_prompt: Optional[str] = None # New field for system prompt
58
 
59
  class PromptRefiner:
60
  def __init__(self, api_token: str):
61
  self.client = Groq(api_key=api_key)
62
 
63
+ def generate_system_prompt(self, task_or_prompt: str, model: str = "llama-3.2-90b-text-preview") -> str:
64
+ """Generate a system prompt for the given task or prompt."""
65
+ messages = [
66
+ {
67
+ "role": "system",
68
+ "content": SYSTEM_META_PROMPT,
69
+ },
70
+ {
71
+ "role": "user",
72
+ "content": f"Task, Goal, or Current Prompt:\n{task_or_prompt}",
73
+ },
74
+ ]
75
+
76
+ response = self.client.chat.completions.create(
77
+ model=model,
78
+ messages=messages,
79
+ max_tokens=3000,
80
+ temperature=0.7,
81
+ stream=True
82
+ )
83
+
84
+ prompt = ''
85
+ for chunk in response:
86
+ if chunk.choices[0].delta.content is not None:
87
+ prompt += chunk.choices[0].delta.content
88
+
89
+ logger.info("Generated system prompt: %s", prompt)
90
+ return prompt
91
+
92
  def refine_prompt(self, prompt_input: PromptInput) -> RefinementOutput:
93
+ # Handle system prompt generation separately
94
+ if prompt_input.meta_prompt_choice == "system":
95
+ system_prompt = self.generate_system_prompt(prompt_input.text)
96
+ return RefinementOutput(
97
+ refined_prompt=system_prompt,
98
+ explanation_of_refinements="Generated system prompt based on the task/prompt.",
99
+ system_prompt=system_prompt
100
+ )
101
+
102
+ # Existing meta prompt selection logic
103
  if prompt_input.meta_prompt_choice == "morphosis":
104
  selected_meta_prompt = original_meta_prompt
105
  elif prompt_input.meta_prompt_choice == "verse":
 
239
  lines=5
240
  )
241
  meta_prompt_choice = gr.Radio(
242
+ ["superstar", "star", "done", "physics", "morphosis", "verse", "phor","bolism","math","math_meta", "system"],
243
  label="Choose Meta Prompt",
244
+ value="superstar",
245
  elem_classes=["no-background", "radio-group"]
246
  # elem_classes=[ "radio-group"]
247
  )
 
313
  refined_output = gr.Markdown()
314
  with gr.Accordion("Full Response JSON", open=False, visible=True):
315
  full_response_json = gr.JSON()
316
+
317
+ # Add new tab for system prompt output
318
+ with gr.Column(elem_classes=["container", "system-prompt-container"]):
319
+ with gr.Tabs():
320
+ with gr.TabItem("System Prompt"):
321
+ system_prompt_output = gr.Textbox(
322
+ label="Generated System Prompt",
323
+ interactive=True,
324
+ show_label=True,
325
+ show_copy_button=True
326
+ )
327
+
328
+ # Modified click handler to include system prompt output
329
  refine_button.click(
330
  fn=self.refine_prompt,
331
  inputs=[prompt_text, meta_prompt_choice],
332
+ outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json, system_prompt_output]
333
  )
334
+
335
  apply_button.click(
336
  fn=self.apply_prompts,
337
  inputs=[prompt_text, refined_prompt, apply_model],
 
342
  try:
343
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
344
  result = self.prompt_refiner.refine_prompt(input_data)
345
+
346
+ # Include system prompt in output
347
+ system_prompt = str(result.system_prompt) if result.system_prompt else ""
348
 
349
  # Ensure all values are strings or None
350
  analysis_evaluation = str(result.initial_prompt_evaluation) if result.initial_prompt_evaluation else ""
351
  refined_prompt = str(result.refined_prompt) if result.refined_prompt else ""
352
  explanation_refinements = str(result.explanation_of_refinements) if result.explanation_of_refinements else ""
353
 
354
+ # Create response dictionary
355
  full_response = {
356
+ "initial_prompt_evaluation": str(result.initial_prompt_evaluation) if result.initial_prompt_evaluation else "",
357
+ "refined_prompt": str(result.refined_prompt) if result.refined_prompt else "",
358
+ "explanation_of_refinements": str(result.explanation_of_refinements) if result.explanation_of_refinements else "",
359
+ "raw_content": str(result.raw_content) if result.raw_content else "",
360
+ "system_prompt": system_prompt
361
  }
362
 
363
  return (
364
  analysis_evaluation,
365
  refined_prompt,
366
  explanation_refinements,
367
+ full_response,
368
+ system_prompt
369
  )
370
  except Exception as e:
 
371
  error_response = {
372
  "error": str(e),
373
  "initial_prompt_evaluation": "",
374
  "refined_prompt": "",
375
  "explanation_of_refinements": "",
376
+ "raw_content": "",
377
+ "system_prompt": ""
378
  }
379
+ return "", "", "", error_response, ""
380
 
381
  def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
382
  original_output = self.prompt_refiner.apply_prompt(original_prompt, model)