midrees2806 commited on
Commit
bf8e143
Β·
verified Β·
1 Parent(s): 04356f4

Update rag.py

Browse files
Files changed (1) hide show
  1. rag.py +48 -105
rag.py CHANGED
@@ -1,148 +1,91 @@
1
- import os
2
  import json
3
- import requests
4
- import pandas as pd
5
- from dotenv import load_dotenv
6
- from datetime import datetime
7
  from sentence_transformers import SentenceTransformer, util
8
  from groq import Groq
9
- from datasets import load_dataset, Dataset
 
 
 
 
 
 
10
 
11
- # βœ… Load environment variables from .env
12
  load_dotenv()
13
 
14
- # βœ… API Keys
15
- HF_TOKEN = os.getenv("HF_TOKEN")
16
- GROQ_API_KEY = os.getenv("GROQ_API_KEY")
17
 
18
- # βœ… Initialize Groq client
19
- groq_client = Groq(api_key=GROQ_API_KEY)
20
-
21
- # βœ… Hugging Face Dataset Repo
22
- HF_DATASET_REPO = "midrees2806/unmatched_queries"
23
-
24
- # βœ… Sentence Transformer model for semantic similarity
25
  similarity_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
26
 
27
- # βœ… Greeting keywords
28
- GREETINGS = [
29
- "hi", "hello", "hey", "good morning", "good afternoon", "good evening",
30
- "assalam o alaikum", "salam", "aoa", "hi there", "hey there", "greetings"
31
- ]
32
-
33
- # βœ… Load dataset
34
- try:
35
- with open('dataset.json', 'r') as f:
36
- dataset = json.load(f)
37
- assert all('input' in d and 'response' in d for d in dataset), "Invalid dataset format"
38
- except Exception as e:
39
- print(f"[ERROR] Loading dataset: {e}")
40
- dataset = []
41
 
42
- # βœ… Prepare embeddings
43
- dataset_questions = [d["input"].lower().strip() for d in dataset]
44
- dataset_answers = [d["response"] for d in dataset]
45
  dataset_embeddings = similarity_model.encode(dataset_questions, convert_to_tensor=True)
46
 
47
- # βœ… Function: Save unmatched queries to Hugging Face Hub
48
- def manage_unmatched_queries(query: str):
49
- try:
50
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
51
- try:
52
- ds = load_dataset(HF_DATASET_REPO, token=HF_TOKEN)
53
- df = ds["train"].to_pandas()
54
- except:
55
- df = pd.DataFrame(columns=["Query", "Timestamp", "Processed"])
56
- if query not in df["Query"].values:
57
- new_entry = {"Query": query, "Timestamp": timestamp, "Processed": False}
58
- df = pd.concat([df, pd.DataFrame([new_entry])], ignore_index=True)
59
- updated_ds = Dataset.from_pandas(df)
60
- updated_ds.push_to_hub(HF_DATASET_REPO, token=HF_TOKEN)
61
- except Exception as e:
62
- print(f"[ERROR] Logging unmatched query: {e}")
63
-
64
- # βœ… Function: Call Groq LLM
65
- def query_groq_llm(prompt: str, model_name="llama3-70b-8192") -> str:
66
  try:
67
- completion = groq_client.chat.completions.create(
68
- messages=[{"role": "user", "content": prompt}],
 
 
 
69
  model=model_name,
70
  temperature=0.7,
71
  max_tokens=500
72
  )
73
- return completion.choices[0].message.content.strip()
74
  except Exception as e:
75
- print(f"[ERROR] Groq LLM call failed: {e}")
76
  return ""
77
 
78
- # βœ… Main RAG logic
79
  def get_best_answer(user_input):
80
- if not user_input.strip():
81
- return "Please enter a valid question."
82
-
83
  user_input_lower = user_input.lower().strip()
84
 
85
- if any(greet in user_input_lower for greet in GREETINGS):
86
- greeting_response = query_groq_llm(
87
- f"You are an official assistant for University of Education Lahore. "
88
- f"Respond to this greeting in a friendly and professional manner: {user_input}"
89
- )
90
- return greeting_response if greeting_response else "Hello! How can I assist you today?"
91
-
92
- if any(keyword in user_input_lower for keyword in ["fee structure", "fees structure", "semester fees", "semester fee"]):
93
  return (
94
  "πŸ’° For complete and up-to-date fee details for this program, we recommend visiting the official University of Education fee structure page.\n"
95
- "You'll find comprehensive information regarding tuition, admission charges, and other applicable fees there.\n"
96
  "πŸ”— https://ue.edu.pk/allfeestructure.php"
97
  )
98
 
 
99
  user_embedding = similarity_model.encode(user_input_lower, convert_to_tensor=True)
100
  similarities = util.pytorch_cos_sim(user_embedding, dataset_embeddings)[0]
101
  best_match_idx = similarities.argmax().item()
102
  best_score = similarities[best_match_idx].item()
103
 
104
- if best_score < 0.65:
105
- manage_unmatched_queries(user_input)
106
-
107
  if best_score >= 0.65:
108
  original_answer = dataset_answers[best_match_idx]
109
- prompt = f"""Name is UOE AI Assistant! You are an official assistant for the University of Education Lahore.
110
-
111
- Rephrase the following official answer clearly and professionally.
112
- Use structured formatting (like headings, bullet points, or numbered lists) where appropriate.
113
- DO NOT add any new or extra information. ONLY rephrase and improve the clarity and formatting of the original answer.
114
-
115
- ### Question:
116
- {user_input}
117
-
118
- ### Original Answer:
119
- {original_answer}
120
-
121
- ### Rephrased Answer:
122
- """
123
  else:
124
- prompt = f"""Name is UOE AI Assistant! As an official assistant for University of Education Lahore, provide a helpful response:
125
- Include relevant details about university policies.
126
- If unsure, direct to official channels.
127
-
128
- ### Question:
129
- {user_input}
130
-
131
- ### Official Answer:
132
- """
133
 
134
  llm_response = query_groq_llm(prompt)
135
 
136
  if llm_response:
137
- for marker in ["Improved Answer:", "Official Answer:", "Rephrased Answer:"]:
138
  if marker in llm_response:
139
- return llm_response.split(marker)[-1].strip()
140
- return llm_response
 
 
141
  else:
142
- return dataset_answers[best_match_idx] if best_score >= 0.65 else (
143
- "For official information:\n"
144
- "πŸ“ž +92-42-99262231-33\n"
145
- "βœ‰οΈ info@ue.edu.pk\n"
146
- "🌐 https://ue.edu.pk"
147
- )
148
 
 
 
 
1
  import json
 
 
 
 
2
  from sentence_transformers import SentenceTransformer, util
3
  from groq import Groq
4
+ import datetime
5
+ import requests
6
+ from io import BytesIO
7
+ from PIL import Image, ImageDraw, ImageFont
8
+ import numpy as np
9
+ from dotenv import load_dotenv
10
+ import os
11
 
12
+ # Load environment variables
13
  load_dotenv()
14
 
15
+ # Initialize Groq client
16
+ groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
 
17
 
18
+ # Load models and dataset
 
 
 
 
 
 
19
  similarity_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
20
 
21
+ # Load dataset (automatically using the path)
22
+ with open('dataset.json', 'r') as f:
23
+ dataset = json.load(f)
 
 
 
 
 
 
 
 
 
 
 
24
 
25
+ # Precompute embeddings
26
+ dataset_questions = [item.get("input", "").lower().strip() for item in dataset]
27
+ dataset_answers = [item.get("response", "") for item in dataset]
28
  dataset_embeddings = similarity_model.encode(dataset_questions, convert_to_tensor=True)
29
 
30
+ def query_groq_llm(prompt, model_name="llama3-70b-8192"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  try:
32
+ chat_completion = groq_client.chat.completions.create(
33
+ messages=[{
34
+ "role": "user",
35
+ "content": prompt
36
+ }],
37
  model=model_name,
38
  temperature=0.7,
39
  max_tokens=500
40
  )
41
+ return chat_completion.choices[0].message.content.strip()
42
  except Exception as e:
43
+ print(f"Error querying Groq API: {e}")
44
  return ""
45
 
 
46
  def get_best_answer(user_input):
 
 
 
47
  user_input_lower = user_input.lower().strip()
48
 
49
+ # πŸ‘‰ Check if question is about fee
50
+ if any(keyword in user_input_lower for keyword in ["fee", "fees", "charges", "semester fee"]):
 
 
 
 
 
 
51
  return (
52
  "πŸ’° For complete and up-to-date fee details for this program, we recommend visiting the official University of Education fee structure page.\n"
53
+ "You’ll find comprehensive information regarding tuition, admission charges, and other applicable fees there.\n"
54
  "πŸ”— https://ue.edu.pk/allfeestructure.php"
55
  )
56
 
57
+ # πŸ” Continue with normal similarity-based logic
58
  user_embedding = similarity_model.encode(user_input_lower, convert_to_tensor=True)
59
  similarities = util.pytorch_cos_sim(user_embedding, dataset_embeddings)[0]
60
  best_match_idx = similarities.argmax().item()
61
  best_score = similarities[best_match_idx].item()
62
 
 
 
 
63
  if best_score >= 0.65:
64
  original_answer = dataset_answers[best_match_idx]
65
+ prompt = f"""As an official assistant for University of Education Lahore, provide a clear response:
66
+ Question: {user_input}
67
+ Original Answer: {original_answer}
68
+ Improved Answer:"""
 
 
 
 
 
 
 
 
 
 
69
  else:
70
+ prompt = f"""As an official assistant for University of Education Lahore, provide a helpful response:
71
+ Include relevant details about university policies.
72
+ If unsure, direct to official channels.
73
+ Question: {user_input}
74
+ Official Answer:"""
 
 
 
 
75
 
76
  llm_response = query_groq_llm(prompt)
77
 
78
  if llm_response:
79
+ for marker in ["Improved Answer:", "Official Answer:"]:
80
  if marker in llm_response:
81
+ response = llm_response.split(marker)[-1].strip()
82
+ break
83
+ else:
84
+ response = llm_response
85
  else:
86
+ response = dataset_answers[best_match_idx] if best_score >= 0.65 else """For official information:
87
+ πŸ“ž +92-42-99262231-33
88
+ βœ‰οΈ [email protected]
89
+ 🌐 ue.edu.pk"""
 
 
90
 
91
+ return response