Spaces:
Sleeping
Sleeping
File size: 4,507 Bytes
b467f2f 61ac281 95c946c b467f2f 1c1b54f b467f2f 1c1b54f b467f2f 1c1b54f b467f2f f8b9b57 1c1b54f f8b9b57 61ac281 f8b9b57 61ac281 f8b9b57 61ac281 f8b9b57 61ac281 1c1b54f b467f2f 1c1b54f b467f2f 1c1b54f b467f2f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
import json
from sentence_transformers import SentenceTransformer, util
from groq import Groq
import datetime
import requests
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from dotenv import load_dotenv
import os
import pandas as pd
import csv
import os
# Load environment variables
load_dotenv()
# Initialize Groq client
groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
# Load models and dataset
similarity_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
# Load dataset
with open('dataset.json', 'r') as f:
dataset = json.load(f)
# Precompute embeddings
dataset_questions = [item.get("input", "").lower().strip() for item in dataset]
dataset_answers = [item.get("response", "") for item in dataset]
dataset_embeddings = similarity_model.encode(dataset_questions, convert_to_tensor=True)
def query_groq_llm(prompt, model_name="llama3-70b-8192"):
try:
chat_completion = groq_client.chat.completions.create(
messages=[{
"role": "user",
"content": prompt
}],
model=model_name,
temperature=0.7,
max_tokens=500
)
return chat_completion.choices[0].message.content.strip()
except Exception as e:
print(f"Error querying Groq API: {e}")
return ""
def get_best_answer(user_input):
user_input_lower = user_input.lower().strip()
# π Fee-specific shortcut
if any(keyword in user_input_lower for keyword in ["fee", "fees", "charges", "semester fee"]):
return (
"π° For complete and up-to-date fee details for this program, we recommend visiting the official University of Education fee structure page.\n"
"Youβll find comprehensive information regarding tuition, admission charges, and other applicable fees there.\n"
"π https://ue.edu.pk/allfeestructure.php"
)
# π Similarity matching
user_embedding = similarity_model.encode(user_input_lower, convert_to_tensor=True)
similarities = util.pytorch_cos_sim(user_embedding, dataset_embeddings)[0]
best_match_idx = similarities.argmax().item()
best_score = similarities[best_match_idx].item()
# βοΈ If not matched well, log to CSV
if best_score < 0.65:
file_path = "unmatched_queries.csv"
print(f"[DEBUG] Similarity score too low: {best_score}. Logging query to: {file_path}")
# Check if file exists
if not os.path.exists(file_path):
print(f"[DEBUG] File {file_path} does not exist. Creating file with header.")
try:
with open(file_path, mode="w", newline="", encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow(["Unmatched Queries"])
print(f"[DEBUG] Header written successfully.")
except Exception as e:
print(f"[ERROR] Failed to create file: {e}")
try:
with open(file_path, mode="a", newline="", encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow([user_input])
print(f"[DEBUG] Query logged: {user_input}")
except Exception as e:
print(f"[ERROR] Failed to write query to CSV: {e}")
# π§ Prompt construction
if best_score >= 0.65:
original_answer = dataset_answers[best_match_idx]
prompt = f"""As an official assistant for University of Education Lahore, provide a clear response:
Question: {user_input}
Original Answer: {original_answer}
Improved Answer:"""
else:
prompt = f"""As an official assistant for University of Education Lahore, provide a helpful response:
Include relevant details about university policies.
If unsure, direct to official channels.
Question: {user_input}
Official Answer:"""
# π§ Query LLM
llm_response = query_groq_llm(prompt)
# π§Ύ Process LLM output
if llm_response:
for marker in ["Improved Answer:", "Official Answer:"]:
if marker in llm_response:
response = llm_response.split(marker)[-1].strip()
break
else:
response = llm_response
else:
response = dataset_answers[best_match_idx] if best_score >= 0.65 else """For official information:
π +92-42-99262231-33
βοΈ [email protected]
π ue.edu.pk"""
return response
|