File size: 3,354 Bytes
0e95308
 
 
bf8e143
 
 
 
 
 
 
0e95308
bf8e143
0e95308
 
bf8e143
 
0e95308
bf8e143
3e83acd
 
a9f7e8d
bf8e143
 
0e95308
bf8e143
 
 
0e95308
 
bf8e143
0e95308
bf8e143
 
 
 
 
0e95308
 
 
 
bf8e143
0e95308
bf8e143
0e95308
 
04356f4
cb720fe
0280e01
a9f7e8d
 
0280e01
04356f4
bf8e143
04356f4
0280e01
 
a9f7e8d
7b27360
2978c6a
 
 
 
04356f4
2978c6a
a9f7e8d
 
 
 
2978c6a
a9f7e8d
 
 
 
 
04356f4
 
2978c6a
04356f4
a9f7e8d
04356f4
a9f7e8d
 
 
 
2978c6a
a9f7e8d
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import json
from sentence_transformers import SentenceTransformer, util
from groq import Groq
import datetime
import requests
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from dotenv import load_dotenv
import os

# Load environment variables
load_dotenv()

# Initialize Groq client
groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))

# Load models and dataset
similarity_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')

# Load dataset (automatically using the path)
with open('dataset.json', 'r') as f:
    dataset = json.load(f)

# Precompute embeddings
dataset_questions = [item.get("input", "").lower().strip() for item in dataset]
dataset_answers = [item.get("response", "") for item in dataset]
dataset_embeddings = similarity_model.encode(dataset_questions, convert_to_tensor=True)

def query_groq_llm(prompt, model_name="llama3-70b-8192"):
    try:
        chat_completion = groq_client.chat.completions.create(
            messages=[{
                "role": "user",
                "content": prompt
            }],
            model=model_name,
            temperature=0.7,
            max_tokens=500
        )
        return chat_completion.choices[0].message.content.strip()
    except Exception as e:
        print(f"Error querying Groq API: {e}")
        return ""

def get_best_answer(user_input):
    user_input_lower = user_input.lower().strip()

    # πŸ‘‰ Check if question is about fee
    if any(keyword in user_input_lower for keyword in ["semester fee","semester fees"]):
        return (
            "πŸ’° For complete and up-to-date fee details for this program, we recommend visiting the official University of Education fee structure page.\n"
            "You’ll find comprehensive information regarding tuition, admission charges, and other applicable fees there.\n"
            "πŸ”— https://ue.edu.pk/allfeestructure.php"
        )

    # πŸ” Continue with normal similarity-based logic
    user_embedding = similarity_model.encode(user_input_lower, convert_to_tensor=True)
    similarities = util.pytorch_cos_sim(user_embedding, dataset_embeddings)[0]
    best_match_idx = similarities.argmax().item()
    best_score = similarities[best_match_idx].item()

    if best_score >= 0.65:
        original_answer = dataset_answers[best_match_idx]
        prompt = f"""As an official assistant for University of Education Lahore, provide a clear response:
        Question: {user_input}
        Original Answer: {original_answer}
        Improved Answer:"""
    else:
        prompt = f"""As an official assistant for University of Education Lahore, provide a helpful response:
        Include relevant details about university policies.
        If unsure, direct to official channels.
        Question: {user_input}
        Official Answer:"""

    llm_response = query_groq_llm(prompt)

    if llm_response:
        for marker in ["Improved Answer:", "Official Answer:"]:
            if marker in llm_response:
                response = llm_response.split(marker)[-1].strip()
                break
        else:
            response = llm_response
    else:
        response = dataset_answers[best_match_idx] if best_score >= 0.65 else """For official information:
        πŸ“ž +92-42-99262231-33
        βœ‰οΈ [email protected]
        🌐 ue.edu.pk"""

    return response