baconnier's picture
Update app.py
c68a73d verified
raw
history blame
10.2 kB
import os
import json
import re
from huggingface_hub import InferenceClient
import gradio as gr
from pydantic import BaseModel, Field
from typing import Optional, Literal
class PromptInput(BaseModel):
text: str = Field(..., description="The initial prompt text")
meta_prompt_choice: Literal["star","done","physics","morphosis", "verse", "phor","bolism"] = Field(..., description="Choice of meta prompt strategy")
class RefinementOutput(BaseModel):
query_analysis: Optional[str] = None
initial_prompt_evaluation: Optional[str] = None
refined_prompt: Optional[str] = None
explanation_of_refinements: Optional[str] = None
raw_content: Optional[str] = None
class PromptRefiner:
def __init__(self, api_token: str):
self.client = InferenceClient(token=api_token)
def refine_prompt(self, prompt_input: PromptInput) -> RefinementOutput:
if prompt_input.meta_prompt_choice == "morphosis":
selected_meta_prompt = original_meta_prompt
elif prompt_input.meta_prompt_choice == "verse":
selected_meta_prompt = new_meta_prompt
elif prompt_input.meta_prompt_choice == "physics":
selected_meta_prompt = metaprompt1
elif prompt_input.meta_prompt_choice == "bolism":
selected_meta_prompt = loic_metaprompt
elif prompt_input.meta_prompt_choice == "done":
selected_meta_prompt = metadone
elif prompt_input.meta_prompt_choice == "star":
selected_meta_prompt = echo_prompt_refiner
else:
selected_meta_prompt = advanced_meta_prompt
messages = [
{"role": "system", "content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more detailed.'},
{"role": "user", "content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt_input.text)}
]
response = self.client.chat_completion(
model="meta-llama/Meta-Llama-3-70B-Instruct",
messages=messages,
max_tokens=4000,
temperature=0.3
)
response_content = response.choices[0].message.content.strip()
try:
json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL)
if json_match:
json_str = json_match.group(1)
json_str = re.sub(r'\n\s*', ' ', json_str)
json_str = json_str.replace('"', '\\"')
json_output = json.loads(f'"{json_str}"')
if isinstance(json_output, str):
json_output = json.loads(json_output)
for key, value in json_output.items():
if isinstance(value, str):
json_output[key] = value.replace('\\"', '"')
return RefinementOutput(**json_output, raw_content=response_content)
else:
raise ValueError("No JSON found in the response")
except (json.JSONDecodeError, ValueError) as e:
print(f"Error parsing JSON: {e}")
print(f"Raw content: {response_content}")
output = {}
for key in ["initial_prompt_evaluation", "refined_prompt", "explanation_of_refinements"]:
pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})'
match = re.search(pattern, response_content, re.DOTALL)
if match:
output[key] = match.group(1).replace('\\n', '\n').replace('\\"', '"')
else:
output[key] = ""
return RefinementOutput(**output, raw_content=response_content)
def apply_prompt(self, prompt: str, model: str) -> str:
try:
messages = [
{"role": "system", "content": "You are a helpful assistant. Answer in stylized version with latex format or markdown if relevant. Separate your answer into logical sections using level 2 headers (##) for sections and bolding (**) for subsections.Incorporate a variety of lists, headers, and text to make the answer visually appealing"},
{"role": "user", "content": prompt}
]
response = self.client.chat_completion(
model=model,
messages=messages,
max_tokens=4000,
temperature=0.8
)
output = response.choices[0].message.content.strip()
output = output.replace('\n\n', '\n').strip()
return output
except Exception as e:
return f"Error: {str(e)}"
class GradioInterface:
def __init__(self, prompt_refiner: PromptRefiner):
self.prompt_refiner = prompt_refiner
with gr.Blocks() as self.interface:
gr.Markdown("# PROMPT++")
gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
gr.Markdown("Learn how to generate an improved version of your prompts. Enter a main idea for a prompt, choose a meta prompt, and the model will attempt to generate an improved version.")
with gr.Row():
prompt_text = gr.Textbox(label="Type the prompt here")
with gr.Row():
meta_prompt_choice = gr.Radio(["star","done","physics","morphosis", "verse", "phor","bolism"], label="Choose Meta Prompt", value="morphosis")
refine_button = gr.Button("Refine Prompt")
with gr.Row():
gr.Markdown("### Initial prompt analysis")
with gr.Column():
analysis_evaluation = gr.Markdown(label="Analysis and Evaluation")
gr.Markdown("### Refined Prompt")
refined_prompt = gr.Textbox(label="Refined Prompt")
gr.Markdown("### Explanation of Refinements")
explanation_of_refinements = gr.Markdown(label="Explanation of Refinements")
with gr.Accordion("Full Response JSON", open=False):
full_response_json = gr.JSON()
refine_button.click(
fn=self.refine_prompt,
inputs=[prompt_text, meta_prompt_choice],
outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
)
with gr.Row():
apply_model = gr.Dropdown(
[
"meta-llama/Meta-Llama-3-70B-Instruct",
"meta-llama/Llama-3.1-8B-Instruct",
"meta-llama/Llama-2-7b-chat-hf",
"Qwen/Qwen2.5-1.5B-Instruct",
"NousResearch/Hermes-3-Llama-3.1-8B",
"Qwen/Qwen2.5-72B-Instruct",
"HuggingFaceH4/zephyr-7b-alpha",
"microsoft/Phi-3.5-mini-instruct",
"Qwen/Qwen2.5-0.5B-Instruct"
],
value="meta-llama/Meta-Llama-3-70B-Instruct",
label="Choose the Model to apply to the prompts"
)
apply_button = gr.Button("Apply Prompts")
with gr.Row():
with gr.Column():
gr.Markdown("### Original Prompt Output")
original_output = gr.Markdown(label="Original Prompt Output")
with gr.Column():
gr.Markdown("### Refined Prompt Output")
refined_output = gr.Markdown(label="Refined Prompt Output")
apply_button.click(
fn=self.apply_prompts,
inputs=[prompt_text, refined_prompt, apply_model],
outputs=[original_output, refined_output]
)
gr.Examples(
examples=[
["Tell me about that guy who invented the light bulb", "physics"],
["Explain the universe.", "star"],
["What's the population of New York City and how tall is the Empire State Building and who was the first mayor?", "morphosis"],
["List American presidents.", "verse"],
["Write a story.", "bolism"],
["Explain why the experiment failed.", "morphosis"],
["Is nuclear energy good?", "verse"],
["How does a computer work?", "phor"],
["How to make money fast?", "done"],
["how can you prove IT0's lemma in stochastic calculus ?", "star"],
],
inputs=[prompt_text, meta_prompt_choice]
)
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
result = self.prompt_refiner.refine_prompt(input_data)
analysis_evaluation = f"\n\n{result.initial_prompt_evaluation}"
return (
analysis_evaluation,
result.refined_prompt,
result.explanation_of_refinements,
result.dict()
)
def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
return original_output, refined_output
def launch(self, share=False):
self.interface.launch(share=share)
# Main code to run the application
if __name__ == '__main__':
api_token = os.getenv('HF_API_TOKEN')
if not api_token:
raise ValueError("HF_API_TOKEN not found in environment variables")
metadone = os.getenv('metadone')
echo_prompt_refiner = os.getenv('echo_prompt_refiner')
metaprompt1 = os.getenv('metaprompt1')
loic_metaprompt = os.getenv('loic_metaprompt')
openai_metaprompt = os.getenv('openai_metaprompt')
original_meta_prompt = os.getenv('original_meta_prompt')
new_meta_prompt = os.getenv('new_meta_prompt')
advanced_meta_prompt = os.getenv('advanced_meta_prompt')
prompt_refiner = PromptRefiner(api_token)
gradio_interface = GradioInterface(prompt_refiner)
gradio_interface.launch(share=True)