midrees2806 commited on
Commit
2978c6a
Β·
verified Β·
1 Parent(s): 501be3c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -0
app.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from sentence_transformers import SentenceTransformer, util
3
+ from groq import Groq
4
+ import datetime
5
+ import requests
6
+ from io import BytesIO
7
+ from PIL import Image, ImageDraw, ImageFont
8
+ import numpy as np
9
+ from dotenv import load_dotenv
10
+ import os
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+
15
+ # Initialize Groq client
16
+ groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
17
+
18
+ # Load models and dataset
19
+ similarity_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
20
+
21
+ # Load dataset (automatically using the path)
22
+ with open('dataset.json', 'r') as f:
23
+ dataset = json.load(f)
24
+
25
+ # Precompute embeddings
26
+ dataset_questions = [item.get("input", "").lower().strip() for item in dataset]
27
+ dataset_answers = [item.get("response", "") for item in dataset]
28
+ dataset_embeddings = similarity_model.encode(dataset_questions, convert_to_tensor=True)
29
+
30
+ def query_groq_llm(prompt, model_name="llama3-70b-8192"):
31
+ try:
32
+ chat_completion = groq_client.chat.completions.create(
33
+ messages=[{
34
+ "role": "user",
35
+ "content": prompt
36
+ }],
37
+ model=model_name,
38
+ temperature=0.7,
39
+ max_tokens=500
40
+ )
41
+ return chat_completion.choices[0].message.content.strip()
42
+ except Exception as e:
43
+ print(f"Error querying Groq API: {e}")
44
+ return ""
45
+
46
+ def get_best_answer(user_input):
47
+ user_embedding = similarity_model.encode(user_input.lower().strip(), convert_to_tensor=True)
48
+ similarities = util.pytorch_cos_sim(user_embedding, dataset_embeddings)[0]
49
+ best_match_idx = similarities.argmax().item()
50
+ best_score = similarities[best_match_idx].item()
51
+
52
+ if best_score >= 0.65:
53
+ original_answer = dataset_answers[best_match_idx]
54
+ prompt = f"""As an official assistant for University of Education Lahore, provide a clear response:
55
+ Question: {user_input}
56
+ Original Answer: {original_answer}
57
+ Improved Answer:"""
58
+ else:
59
+ prompt = f"""As an official assistant for University of Education Lahore, provide a helpful response:
60
+ Include relevant details about university policies.
61
+ If unsure, direct to official channels.
62
+
63
+ Question: {user_input}
64
+
65
+ Official Answer:"""
66
+
67
+ llm_response = query_groq_llm(prompt)
68
+
69
+ if llm_response:
70
+ for marker in ["Improved Answer:", "Official Answer:"]:
71
+ if marker in llm_response:
72
+ response = llm_response.split(marker)[-1].strip()
73
+ break
74
+ else:
75
+ response = llm_response
76
+ else:
77
+ response = dataset_answers[best_match_idx] if best_score >= 0.65 else """For official information:
78
+ πŸ“ž +92-42-99262231-33
79
+ βœ‰οΈ [email protected]
80
+ 🌐 ue.edu.pk"""
81
+
82
+ return response