File size: 3,954 Bytes
b467f2f
 
 
 
 
 
 
 
 
 
1c1b54f
b467f2f
 
 
 
 
 
 
 
 
 
1c1b54f
b467f2f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c1b54f
b467f2f
 
 
 
 
 
 
1c1b54f
b467f2f
 
 
 
 
1c1b54f
 
 
 
 
 
 
 
 
 
 
 
 
b467f2f
 
 
 
 
 
 
 
 
 
 
 
 
1c1b54f
b467f2f
 
1c1b54f
b467f2f
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import json
from sentence_transformers import SentenceTransformer, util
from groq import Groq
import datetime
import requests
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from dotenv import load_dotenv
import os
import pandas as pd  # <-- Required for Excel logging

# Load environment variables
load_dotenv()

# Initialize Groq client
groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))

# Load models and dataset
similarity_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')

# Load dataset
with open('dataset.json', 'r') as f:
    dataset = json.load(f)

# Precompute embeddings
dataset_questions = [item.get("input", "").lower().strip() for item in dataset]
dataset_answers = [item.get("response", "") for item in dataset]
dataset_embeddings = similarity_model.encode(dataset_questions, convert_to_tensor=True)

def query_groq_llm(prompt, model_name="llama3-70b-8192"):
    try:
        chat_completion = groq_client.chat.completions.create(
            messages=[{
                "role": "user",
                "content": prompt
            }],
            model=model_name,
            temperature=0.7,
            max_tokens=500
        )
        return chat_completion.choices[0].message.content.strip()
    except Exception as e:
        print(f"Error querying Groq API: {e}")
        return ""

def get_best_answer(user_input):
    user_input_lower = user_input.lower().strip()

    # πŸ‘‰ Fee-specific shortcut
    if any(keyword in user_input_lower for keyword in ["fee", "fees", "charges", "semester fee"]):
        return (
            "πŸ’° For complete and up-to-date fee details for this program, we recommend visiting the official University of Education fee structure page.\n"
            "You’ll find comprehensive information regarding tuition, admission charges, and other applicable fees there.\n"
            "πŸ”— https://ue.edu.pk/allfeestructure.php"
        )

    # πŸ” Similarity matching
    user_embedding = similarity_model.encode(user_input_lower, convert_to_tensor=True)
    similarities = util.pytorch_cos_sim(user_embedding, dataset_embeddings)[0]
    best_match_idx = similarities.argmax().item()
    best_score = similarities[best_match_idx].item()

    # ✏️ If not matched well, log to Excel
    if best_score < 0.65:
        file_path = "unmatched_queries.xlsx"
        if os.path.exists(file_path):
            try:
                df = pd.read_excel(file_path)
                new_row = {"Unmatched Queries": user_input}
                df = pd.concat([df, pd.DataFrame([new_row])], ignore_index=True)
                df.to_excel(file_path, index=False)
            except Exception as e:
                print(f"Error updating unmatched_queries.xlsx: {e}")

    # 🧠 Prompt construction
    if best_score >= 0.65:
        original_answer = dataset_answers[best_match_idx]
        prompt = f"""As an official assistant for University of Education Lahore, provide a clear response:
        Question: {user_input}
        Original Answer: {original_answer}
        Improved Answer:"""
    else:
        prompt = f"""As an official assistant for University of Education Lahore, provide a helpful response:
        Include relevant details about university policies.
        If unsure, direct to official channels.
        Question: {user_input}
        Official Answer:"""

    # 🧠 Query LLM
    llm_response = query_groq_llm(prompt)

    # 🧾 Process LLM output
    if llm_response:
        for marker in ["Improved Answer:", "Official Answer:"]:
            if marker in llm_response:
                response = llm_response.split(marker)[-1].strip()
                break
        else:
            response = llm_response
    else:
        response = dataset_answers[best_match_idx] if best_score >= 0.65 else """For official information:
        πŸ“ž +92-42-99262231-33
        βœ‰οΈ [email protected]
        🌐 ue.edu.pk"""

    return response