Spaces:
Running
Running
import os | |
import json | |
import re | |
import gradio as gr | |
from groq import Groq | |
from pydantic import BaseModel, Field | |
from typing import Optional, Literal | |
from custom_css import custom_css | |
from variables import * | |
class PromptInput(BaseModel): | |
text: str = Field(..., description="The initial prompt text") | |
meta_prompt_choice: Literal["superstar","star","done","physics","morphosis", "verse", "phor","bolism","math", "math_meta"] = Field(..., description="Choice of meta prompt strategy") | |
class RefinementOutput(BaseModel): | |
query_analysis: Optional[str] = None | |
initial_prompt_evaluation: Optional[str] = None | |
refined_prompt: Optional[str] = None | |
explanation_of_refinements: Optional[str] = None | |
raw_content: Optional[str] = None | |
class PromptRefiner: | |
def __init__(self, api_token: str): | |
self.client = Groq(api_key=api_key) | |
def refine_prompt(self, prompt_input: PromptInput) -> RefinementOutput: | |
if prompt_input.meta_prompt_choice == "morphosis": | |
selected_meta_prompt = original_meta_prompt | |
elif prompt_input.meta_prompt_choice == "verse": | |
selected_meta_prompt = new_meta_prompt | |
elif prompt_input.meta_prompt_choice == "physics": | |
selected_meta_prompt = metaprompt1 | |
elif prompt_input.meta_prompt_choice == "bolism": | |
selected_meta_prompt = loic_metaprompt | |
elif prompt_input.meta_prompt_choice == "done": | |
selected_meta_prompt = metadone | |
elif prompt_input.meta_prompt_choice == "star": | |
selected_meta_prompt = echo_prompt_refiner | |
elif prompt_input.meta_prompt_choice == "superstar": | |
selected_meta_prompt = advanced_echo_prompt_refiner | |
elif prompt_input.meta_prompt_choice == "math": | |
selected_meta_prompt = math_meta_prompt | |
elif prompt_input.meta_prompt_choice == "math_meta": | |
selected_meta_prompt = math_meta | |
else: | |
selected_meta_prompt = advanced_meta_prompt | |
messages = [ | |
{"role": "system", "content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more detailed.'}, | |
{"role": "user", "content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt_input.text)} | |
] | |
response = self.client.chat.completions.create( | |
model="llama-3.2-90b-text-preview", | |
messages=messages, | |
max_tokens=8192, | |
temperature=0.7 | |
) | |
response_content = response.choices[0].message.content.strip() | |
try: | |
# Extract JSON from between <json> tags | |
json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL) | |
if json_match: | |
json_str = json_match.group(1) | |
# Remove newlines and escape quotes within the JSON string | |
json_str = re.sub(r'\n\s*', ' ', json_str) | |
json_str = json_str.replace('"', '\\"') | |
# Wrap the entire string in quotes and parse it | |
json_output = json.loads(f'"{json_str}"') | |
# Ensure json_output is a dictionary | |
if isinstance(json_output, str): | |
json_output = json.loads(json_output) | |
# Unescape the parsed JSON | |
for key, value in json_output.items(): | |
if isinstance(value, str): | |
json_output[key] = value.replace('\\"', '"') | |
return RefinementOutput(**json_output, raw_content=response_content) | |
else: | |
raise ValueError("No JSON found in the response") | |
except (json.JSONDecodeError, ValueError) as e: | |
print(f"Error parsing JSON: {e}") | |
print(f"Raw content: {response_content}") | |
# If JSON parsing fails, attempt to extract the content manually | |
output = {} | |
for key in ["initial_prompt_evaluation", "refined_prompt", "explanation_of_refinements"]: | |
pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})' | |
match = re.search(pattern, response_content, re.DOTALL) | |
if match: | |
output[key] = match.group(1).replace('\\n', '\n').replace('\\"', '"') | |
else: | |
output[key] = "" # Set empty string if content not found | |
return RefinementOutput(**output, raw_content=response_content) | |
def apply_prompt(self, prompt: str, model: str) -> str: | |
try: | |
messages = [ | |
{ | |
"role": "system", | |
"content": """You are a markdown formatting expert. Format your responses with proper spacing and structure following these rules: | |
1. Paragraph Spacing: | |
- Add TWO blank lines between major sections (##) | |
- Add ONE blank line between subsections (###) | |
- Add ONE blank line between paragraphs within sections | |
- Add ONE blank line before and after lists | |
- Add ONE blank line before and after code blocks | |
- Add ONE blank line before and after blockquotes | |
2. Section Formatting: | |
# Title | |
## Major Section | |
[blank line] | |
Content paragraph 1 | |
[blank line] | |
Content paragraph 2 | |
[blank line]""" | |
}, | |
{ | |
"role": "user", | |
"content": prompt | |
} | |
] | |
response = self.client.chat.completions.create( | |
model=model, | |
messages=messages, | |
max_tokens=8000, # Increased token limit | |
temperature=0.8, | |
stream=True # Enable streaming in the API call | |
) | |
# Initialize an empty string to accumulate the response | |
full_response = "" | |
# Process the streaming response | |
for chunk in response: | |
if chunk.choices[0].delta.content is not None: | |
full_response += chunk.choices[0].delta.content | |
# Return the complete response | |
return full_response.replace('\n\n', '\n').strip() | |
except Exception as e: | |
return f"Error: {str(e)}" | |
class GradioInterface: | |
def __init__(self, prompt_refiner: PromptRefiner,custom_css): | |
self.prompt_refiner = prompt_refiner | |
custom_css = custom_css | |
with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface: | |
with gr.Column(elem_classes=["container", "title-container"]): | |
gr.Markdown("# PROMPT++") | |
gr.Markdown("### Automating Prompt Engineering by Refining your Prompts") | |
gr.Markdown("Learn how to generate an improved version of your prompts.") | |
with gr.Column(elem_classes=["container", "input-container"]): | |
prompt_text = gr.Textbox( | |
label="Type your prompt (or let it empty to see metaprompt)", | |
# elem_classes="no-background", | |
#elem_classes="container2", | |
lines=5 | |
) | |
meta_prompt_choice = gr.Radio( | |
["superstar", "star", "done", "physics", "morphosis", "verse", "phor","bolism","math","math_meta"], | |
label="Choose Meta Prompt", | |
value="star", | |
elem_classes=["no-background", "radio-group"] | |
# elem_classes=[ "radio-group"] | |
) | |
refine_button = gr.Button("Refine Prompt") | |
# Option 1: Put Examples here (before Meta Prompt explanation) | |
with gr.Row(elem_classes=["container2"]): | |
with gr.Accordion("Examples", open=False): | |
gr.Examples( | |
examples=[ | |
["Write a story on the end of prompt engineering replaced by an Ai specialized in refining prompts.", "superstar"], | |
["Tell me about that guy who invented the light bulb", "physics"], | |
["Explain the universe.", "star"], | |
["What's the population of New York City and how tall is the Empire State Building and who was the first mayor?", "morphosis"], | |
["List American presidents.", "verse"], | |
["Explain why the experiment failed.", "morphosis"], | |
["Is nuclear energy good?", "verse"], | |
["How does a computer work?", "phor"], | |
["How to make money fast?", "done"], | |
["how can you prove IT0's lemma in stochastic calculus ?", "math_meta"], | |
], | |
inputs=[prompt_text, meta_prompt_choice] | |
) | |
with gr.Accordion("Meta Prompt explanation", open=False): | |
gr.Markdown(explanation_markdown) | |
# Option 2: Or put Examples here (after the button) | |
# with gr.Accordion("Examples", open=False): | |
# gr.Examples(...) | |
with gr.Column(elem_classes=["container", "analysis-container"]): | |
gr.Markdown(' ') | |
gr.Markdown("### Initial prompt analysis") | |
analysis_evaluation = gr.Markdown() | |
gr.Markdown("### Refined Prompt") | |
refined_prompt = gr.Textbox( | |
label="Refined Prompt", | |
interactive=True, | |
show_label=True, # Must be True for copy button to show | |
show_copy_button=True, # Adds the copy button | |
# elem_classes="no-background" | |
) | |
gr.Markdown("### Explanation of Refinements") | |
explanation_of_refinements = gr.Markdown() | |
with gr.Column(elem_classes=["container", "model-container"]): | |
# gr.Markdown("## See MetaPrompt Impact") | |
with gr.Row(): | |
apply_model = gr.Dropdown(models, | |
value="llama-3.1-70b-versatile", | |
label="Choose the Model", | |
container=False, # This removes the container around the dropdown | |
scale=1, # Controls the width relative to other components | |
min_width=300 # Sets minimum width in pixels | |
# elem_classes="no-background" | |
) | |
apply_button = gr.Button("Apply MetaPrompt") | |
# with gr.Column(elem_classes=["container", "results-container"]): | |
gr.Markdown("### Prompts on choosen model") | |
with gr.Tabs(): | |
with gr.TabItem("Original Prompt Output"): | |
original_output = gr.Markdown() | |
with gr.TabItem("Refined Prompt Output"): | |
refined_output = gr.Markdown() | |
with gr.Accordion("Full Response JSON", open=False, visible=True): | |
full_response_json = gr.JSON() | |
refine_button.click( | |
fn=self.refine_prompt, | |
inputs=[prompt_text, meta_prompt_choice], | |
outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json] | |
) | |
apply_button.click( | |
fn=self.apply_prompts, | |
inputs=[prompt_text, refined_prompt, apply_model], | |
outputs=[original_output, refined_output] | |
) | |
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple: | |
try: | |
input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice) | |
result = self.prompt_refiner.refine_prompt(input_data) | |
# Ensure all values are strings or None | |
analysis_evaluation = str(result.initial_prompt_evaluation) if result.initial_prompt_evaluation else "" | |
refined_prompt = str(result.refined_prompt) if result.refined_prompt else "" | |
explanation_refinements = str(result.explanation_of_refinements) if result.explanation_of_refinements else "" | |
# Create a safe JSON-serializable dictionary | |
full_response = { | |
"initial_prompt_evaluation": analysis_evaluation, | |
"refined_prompt": refined_prompt, | |
"explanation_of_refinements": explanation_refinements, | |
"raw_content": str(result.raw_content) if result.raw_content else "" | |
} | |
return ( | |
analysis_evaluation, | |
refined_prompt, | |
explanation_refinements, | |
full_response | |
) | |
except Exception as e: | |
# Return safe default values in case of any error | |
error_response = { | |
"error": str(e), | |
"initial_prompt_evaluation": "", | |
"refined_prompt": "", | |
"explanation_of_refinements": "", | |
"raw_content": "" | |
} | |
return "", "", "", error_response | |
def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str): | |
original_output = self.prompt_refiner.apply_prompt(original_prompt, model) | |
refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model) | |
return original_output, refined_output | |
def launch(self, share=False): | |
self.interface.launch(share=share) | |
# explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()]) | |
``` | |
meta_info="" | |
api_key = os.getenv('GROQ_API_KEY') | |
if not api_key: | |
raise ValueError("GROQ_API_KEY not found in environment variables") | |
metadone=os.getenv('metadone') | |
echo_prompt_refiner = os.getenv('echo_prompt_refiner') | |
advanced_echo_prompt_refiner = os.getenv('advanced_echo_prompt_refiner') | |
metaprompt1 = os.getenv('metaprompt1') | |
loic_metaprompt = os.getenv('loic_metaprompt') | |
openai_metaprompt=os.getenv('openai_metaprompt') | |
original_meta_prompt = os.getenv('original_meta_prompt') | |
new_meta_prompt = os.getenv('new_meta_prompt') | |
advanced_meta_prompt = os.getenv('advanced_meta_prompt') | |
math_meta_prompt = os.getenv('math_meta_prompt') | |
math_meta = os.getenv('math_meta') | |
``` | |
# Main code to run the application | |
if __name__ == '__main__': | |
prompt_refiner = PromptRefiner(api_key) | |
gradio_interface = GradioInterface(prompt_refiner,custom_css) | |
gradio_interface.launch(share=True) |