Spaces:
Running
Running
File size: 2,709 Bytes
6babc2d b25d878 53f56d7 6babc2d 53f56d7 16d2c99 53f56d7 6babc2d 53f56d7 70fc62f 53f56d7 6babc2d 70fc62f a3d4f67 1584099 53f56d7 b25d878 53f56d7 6babc2d 53f56d7 70fc62f 53f56d7 3fb7bf3 7543368 4bba9ed 44f6549 5e0769d 4bba9ed 7a9ceb1 4bba9ed 7a9ceb1 d626111 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import json
import os
# Load templates from environment variable with a safe default
templates_json = os.getenv('PROMPT_TEMPLATES', '{}')
try:
# Parse JSON data with error handling
prompt_data = json.loads(templates_json)
except json.JSONDecodeError:
# Fallback to empty dict if JSON is invalid
prompt_data = {}
# Create explanations dictionary with safe access
metaprompt_explanations = {
key: data.get("description", "No description available")
for key, data in prompt_data.items()
} if prompt_data else {}
# Generate markdown explanation
explanation_markdown = "".join([
f"- **{key}**: {value}\n"
for key, value in metaprompt_explanations.items()
])
# Define models list
models = [
"meta-llama/Meta-Llama-3-70B-Instruct",
"meta-llama/Meta-Llama-3-8B-Instruct",
"meta-llama/Llama-3.1-70B-Instruct",
"meta-llama/Llama-3.1-8B-Instruct",
"meta-llama/Llama-3.2-3B-Instruct",
"meta-llama/Llama-3.2-1B-Instruct",
"meta-llama/Llama-2-13b-chat-hf",
"meta-llama/Llama-2-7b-chat-hf",
"HuggingFaceH4/zephyr-7b-beta",
"HuggingFaceH4/zephyr-7b-alpha",
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-1.5B",
"microsoft/Phi-3.5-mini-instruct"
]
examples=[
["Write a story on the end of prompt engineering replaced by an Ai specialized in refining prompts.", "done"],
["Tell me about that guy who invented the light bulb", "physics"],
["Explain the universe.", "star"],
["What's the population of New York City and how tall is the Empire State Building and who was the first mayor?", "morphosis"],
["List American presidents.", "verse"],
["Explain why the experiment failed.", "morphosis"],
["Is nuclear energy good?", "verse"],
["How does a computer work?", "phor"],
["How to make money fast?", "done"],
["how can you prove IT0's lemma in stochastic calculus ?", "arpe"],
]
# Get API token with error handling
api_token = os.getenv('HF_API_TOKEN')
if not api_token:
raise ValueError("HF_API_TOKEN not found in environment variables")
# Create meta_prompts dictionary with safe access
meta_prompts = {
key: data.get("template", "No template available")
for key, data in prompt_data.items()
} if prompt_data else {}
prompt_refiner_model = os.getenv('prompt_refiner_model', 'meta-llama/Llama-3.1-8B-Instruct')
print("prompt_refiner_model used :"+prompt_refiner_model)
#prompt_refiner_model = os.getenv('prompt_refiner_model')
echo_prompt_refiner = os.getenv('echo_prompt_refiner')
openai_metaprompt = os.getenv('openai_metaprompt')
advanced_meta_prompt = os.getenv('advanced_meta_prompt')
|