|
import json |
|
import os |
|
from together import Together |
|
|
|
def rerank_best_answer(json_files, config_file='config.json', model="meta-llama/Llama-3-8b-chat-hf"): |
|
|
|
|
|
|
|
together_ai_key = os.getenv("TOGETHER_AI") |
|
if not together_ai_key: |
|
raise ValueError("TOGETHER_AI environment variable not found. Please set it before running the script.") |
|
|
|
|
|
client = Together(api_key=together_ai_key) |
|
|
|
|
|
combined_prompts = {} |
|
for json_file in json_files: |
|
with open(json_file, 'r') as file: |
|
data = json.load(file) |
|
|
|
|
|
for item in data: |
|
query_id = item['query_id'] |
|
if query_id not in combined_prompts: |
|
combined_prompts[query_id] = { |
|
"question": item['input'], |
|
"answers": {} |
|
} |
|
combined_prompts[query_id]["answers"][json_file] = item['response'] |
|
|
|
responses = [] |
|
|
|
for query_id, prompt in combined_prompts.items(): |
|
|
|
prompt_text = f"""Input JSON: |
|
{json.dumps(prompt, indent=4)} |
|
|
|
For the above question, identify which model gave the best response based on accuracy. Ensure the chosen response is an answer and not a follow-up question. Provide the output in the format: |
|
{{ |
|
"best_model": "<model_name>", |
|
"best_answer": "<answer>" |
|
}} |
|
Just output this JSON and nothing else. |
|
""" |
|
|
|
|
|
response = client.chat.completions.create( |
|
model=model, |
|
messages=[{"role": "user", "content": prompt_text}], |
|
) |
|
response_content = response.choices[0].message.content |
|
|
|
|
|
prompt_text_extract_bestModel = f"""Input JSON: |
|
{json.dumps(response_content, indent=4)} |
|
|
|
Just Output the best_model from above JSON and nothing else. |
|
""" |
|
prompt_text_extract_bestAnswer = f"""Input JSON: |
|
{json.dumps(response_content, indent=4)} |
|
|
|
Just Output the best_answer from above JSON and nothing else. |
|
""" |
|
response_bestModel = client.chat.completions.create( |
|
model=model, |
|
messages=[{"role": "user", "content": prompt_text_extract_bestModel}], |
|
) |
|
response_bestAnswer = client.chat.completions.create( |
|
model=model, |
|
messages=[{"role": "user", "content": prompt_text_extract_bestAnswer}], |
|
) |
|
|
|
|
|
responses.append({"query_id": query_id, "question": prompt["question"], "best_model": response_bestModel.choices[0].message.content, "best_answer": response_bestAnswer.choices[0].message.content}) |
|
|
|
print(response_bestModel.choices[0].message.content) |
|
|
|
return responses |
|
|
|
|
|
def rankerAgent(prompt, config_file='config.json', model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"): |
|
|
|
with open(config_file, 'r') as file: |
|
config = json.load(file) |
|
|
|
together_ai_key = config.get("TOGETHER_AI") |
|
if not together_ai_key: |
|
raise ValueError("TOGETHER_AI key not found in the config file.") |
|
|
|
|
|
client = Together(api_key=together_ai_key) |
|
|
|
prompt_text = f"""Input JSON: |
|
{json.dumps(prompt, indent=4)} |
|
|
|
For the above question, identify which model gave the best response based on accuracy. Ensure the chosen response is an answer and not a follow-up question. Provide the output in the format: |
|
{{ |
|
"best_model": "<model_name>", |
|
"best_answer": "<answer>" |
|
}} |
|
Just output this JSON and nothing else. |
|
""" |
|
|
|
|
|
response = client.chat.completions.create( |
|
model=model, |
|
messages=[{"role": "user", "content": prompt_text}], |
|
) |
|
response_content = response.choices[0].message.content |
|
|
|
|
|
prompt_text_extract_bestModel = f"""Input JSON: |
|
{json.dumps(response_content, indent=4)} |
|
|
|
Just Output the best_model from above JSON and nothing else. |
|
""" |
|
prompt_text_extract_bestAnswer = f"""Input JSON: |
|
{json.dumps(response_content, indent=4)} |
|
|
|
Just Output the best_answer from above JSON and nothing else. |
|
""" |
|
response_bestModel = client.chat.completions.create( |
|
model=model, |
|
messages=[{"role": "user", "content": prompt_text_extract_bestModel}], |
|
) |
|
response_bestAnswer = client.chat.completions.create( |
|
model=model, |
|
messages=[{"role": "user", "content": prompt_text_extract_bestAnswer}], |
|
) |
|
|
|
return response_bestModel.choices[0].message.content, response_bestAnswer.choices[0].message.content |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|