midrees2806 commited on
Commit
b467f2f
Β·
verified Β·
1 Parent(s): f60c241

Upload 4 files

Browse files
Files changed (4) hide show
  1. app (1).py +70 -0
  2. dataset.json +0 -0
  3. rag.py +91 -0
  4. requirements (1).txt +7 -0
app (1).py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from rag import get_best_answer
3
+
4
+ # Custom CSS for the interface
5
+ css = """
6
+ #chatbot {
7
+ height: 350px;
8
+ overflow: auto;
9
+ border-radius: 10px;
10
+ border: 1px solid #e0e0e0;
11
+ }
12
+ .textbox {
13
+ border-radius: 20px !important;
14
+ padding: 12px 20px !important;
15
+ }
16
+ .btn-column {
17
+ display: flex;
18
+ flex-direction: column;
19
+ gap: 10px;
20
+ }
21
+ """
22
+
23
+ def create_interface():
24
+ with gr.Blocks(css=css, theme="soft") as demo:
25
+ gr.Markdown("""
26
+ <h1 style='text-align: center;'>University of Education Lahore Chatbot</h1>
27
+ <p style='text-align: center;'>Official AI Assistant for University Information</p>
28
+ """)
29
+
30
+ # Define the chat interface
31
+ chatbot = gr.Chatbot(elem_id="chatbot")
32
+ examples = [
33
+ "What are the admission requirements?",
34
+ "How can I contact the administration?",
35
+ "What programs are offered?"
36
+ ]
37
+
38
+ with gr.Row():
39
+ message = gr.Textbox(
40
+ label="Type your question here",
41
+ placeholder="Ask about admissions, programs, or university services...",
42
+ elem_classes="textbox",
43
+ scale=4
44
+ )
45
+ with gr.Column(scale=1, elem_classes="btn-column"):
46
+ submit_button = gr.Button("↩️ Enter")
47
+ reset_button = gr.Button("πŸ—‘οΈ Reset Chat")
48
+
49
+ # Set up both Enter key and button to trigger the response
50
+ def respond(message, chat_history):
51
+ bot_message = get_best_answer(message)
52
+ chat_history.append((message, bot_message))
53
+ return "", chat_history
54
+
55
+ message.submit(respond, [message, chatbot], [message, chatbot])
56
+ submit_button.click(respond, [message, chatbot], [message, chatbot])
57
+
58
+ # Reset button to clear history
59
+ def reset_conversation():
60
+ return []
61
+
62
+ reset_button.click(reset_conversation, [], [chatbot])
63
+
64
+ gr.Examples(examples, inputs=message)
65
+
66
+ return demo
67
+
68
+ if __name__ == "__main__":
69
+ demo = create_interface()
70
+ demo.launch()
dataset.json ADDED
The diff for this file is too large to render. See raw diff
 
rag.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from sentence_transformers import SentenceTransformer, util
3
+ from groq import Groq
4
+ import datetime
5
+ import requests
6
+ from io import BytesIO
7
+ from PIL import Image, ImageDraw, ImageFont
8
+ import numpy as np
9
+ from dotenv import load_dotenv
10
+ import os
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+
15
+ # Initialize Groq client
16
+ groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
17
+
18
+ # Load models and dataset
19
+ similarity_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
20
+
21
+ # Load dataset (automatically using the path)
22
+ with open('dataset.json', 'r') as f:
23
+ dataset = json.load(f)
24
+
25
+ # Precompute embeddings
26
+ dataset_questions = [item.get("input", "").lower().strip() for item in dataset]
27
+ dataset_answers = [item.get("response", "") for item in dataset]
28
+ dataset_embeddings = similarity_model.encode(dataset_questions, convert_to_tensor=True)
29
+
30
+ def query_groq_llm(prompt, model_name="llama3-70b-8192"):
31
+ try:
32
+ chat_completion = groq_client.chat.completions.create(
33
+ messages=[{
34
+ "role": "user",
35
+ "content": prompt
36
+ }],
37
+ model=model_name,
38
+ temperature=0.7,
39
+ max_tokens=500
40
+ )
41
+ return chat_completion.choices[0].message.content.strip()
42
+ except Exception as e:
43
+ print(f"Error querying Groq API: {e}")
44
+ return ""
45
+
46
+ def get_best_answer(user_input):
47
+ user_input_lower = user_input.lower().strip()
48
+
49
+ # πŸ‘‰ Check if question is about fee
50
+ if any(keyword in user_input_lower for keyword in ["fee", "fees", "charges", "semester fee"]):
51
+ return (
52
+ "πŸ’° For complete and up-to-date fee details for this program, we recommend visiting the official University of Education fee structure page.\n"
53
+ "You’ll find comprehensive information regarding tuition, admission charges, and other applicable fees there.\n"
54
+ "πŸ”— https://ue.edu.pk/allfeestructure.php"
55
+ )
56
+
57
+ # πŸ” Continue with normal similarity-based logic
58
+ user_embedding = similarity_model.encode(user_input_lower, convert_to_tensor=True)
59
+ similarities = util.pytorch_cos_sim(user_embedding, dataset_embeddings)[0]
60
+ best_match_idx = similarities.argmax().item()
61
+ best_score = similarities[best_match_idx].item()
62
+
63
+ if best_score >= 0.65:
64
+ original_answer = dataset_answers[best_match_idx]
65
+ prompt = f"""As an official assistant for University of Education Lahore, provide a clear response:
66
+ Question: {user_input}
67
+ Original Answer: {original_answer}
68
+ Improved Answer:"""
69
+ else:
70
+ prompt = f"""As an official assistant for University of Education Lahore, provide a helpful response:
71
+ Include relevant details about university policies.
72
+ If unsure, direct to official channels.
73
+ Question: {user_input}
74
+ Official Answer:"""
75
+
76
+ llm_response = query_groq_llm(prompt)
77
+
78
+ if llm_response:
79
+ for marker in ["Improved Answer:", "Official Answer:"]:
80
+ if marker in llm_response:
81
+ response = llm_response.split(marker)[-1].strip()
82
+ break
83
+ else:
84
+ response = llm_response
85
+ else:
86
+ response = dataset_answers[best_match_idx] if best_score >= 0.65 else """For official information:
87
+ πŸ“ž +92-42-99262231-33
88
+ βœ‰οΈ [email protected]
89
+ 🌐 ue.edu.pk"""
90
+
91
+ return response
requirements (1).txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ sentence-transformers
2
+ groq
3
+ gradio
4
+ pillow
5
+ requests
6
+ numpy
7
+ python-dotenv