Update app.py
Browse files
app.py
CHANGED
@@ -1,203 +1,211 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
|
|
3 |
from groq import Groq
|
4 |
from sentence_transformers import SentenceTransformer
|
5 |
import faiss
|
6 |
import numpy as np
|
7 |
import PyPDF2
|
8 |
-
import re
|
9 |
-
from collections import Counter
|
10 |
from sklearn.metrics.pairwise import cosine_similarity
|
11 |
-
import
|
12 |
-
|
13 |
-
# Setup logging
|
14 |
-
logging.basicConfig(filename='query_logs.log', level=logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')
|
15 |
-
|
16 |
-
# Grog API key (Use environment variable or replace it with your actual API key)
|
17 |
-
grog_api_key = "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"
|
18 |
|
19 |
-
#
|
20 |
-
client = Groq(api_key=grog_api_key)
|
21 |
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
24 |
|
25 |
-
|
|
|
|
|
|
|
26 |
cache = {}
|
27 |
|
28 |
-
#
|
29 |
-
if os.path.exists(book_path):
|
30 |
-
print(f"Book found at: {book_path}")
|
31 |
-
else:
|
32 |
-
print("Book not found!")
|
33 |
|
34 |
-
# Function to read the PDF file
|
35 |
def read_pdf(file_path):
|
|
|
|
|
|
|
|
|
|
|
36 |
with open(file_path, 'rb') as file:
|
37 |
reader = PyPDF2.PdfReader(file)
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
def vectorize_text(text):
|
51 |
try:
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
index = faiss.IndexFlatL2(embeddings.shape[1]) # L2 distance index
|
59 |
-
index.add(np.array(embeddings)) # Add embeddings to the index
|
60 |
-
print(f"Added {len(sentences)} sentences to the vector store.")
|
61 |
-
|
62 |
-
return index, sentences
|
63 |
except Exception as e:
|
64 |
-
|
65 |
return None, None
|
66 |
|
67 |
-
#
|
68 |
-
vector_index, sentences = vectorize_text(book_text)
|
69 |
|
70 |
-
|
71 |
-
if vector_index:
|
72 |
-
print("Vectorization complete.")
|
73 |
-
else:
|
74 |
-
print("Vectorization failed.")
|
75 |
-
|
76 |
-
# Function to generate embeddings for the query using the SentenceTransformer
|
77 |
-
def generate_query_embedding(query, sentence_transformer_model):
|
78 |
return sentence_transformer_model.encode([query])
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
if D[0][0] > threshold:
|
83 |
-
return False
|
84 |
-
return True
|
85 |
|
86 |
-
|
87 |
-
def generate_diverse_responses(client, prompt, n=3):
|
88 |
responses = []
|
89 |
for i in range(n):
|
90 |
-
temperature = 0.
|
91 |
-
top_p = 0.9 - (i * 0.
|
92 |
try:
|
93 |
chat_completion = client.chat.completions.create(
|
94 |
-
messages=[{
|
95 |
-
"role": "user",
|
96 |
-
"content": prompt,
|
97 |
-
}],
|
98 |
model="llama3-8b-8192",
|
99 |
temperature=temperature,
|
100 |
top_p=top_p
|
101 |
)
|
102 |
-
responses.append(chat_completion.choices[0].message.content)
|
103 |
except Exception as e:
|
104 |
logging.error(f"Error generating response: {str(e)}")
|
105 |
-
responses.append("
|
106 |
return responses
|
107 |
|
108 |
-
# Function to aggregate responses based on similarity and voting mechanism
|
109 |
def aggregate_responses(responses):
|
110 |
-
# Use a simple voting mechanism to select the most common response
|
111 |
response_counter = Counter(responses)
|
112 |
-
most_common_response = response_counter.most_common(1)[0]
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
# Return the most similar response to the first response
|
123 |
-
return responses[top_response_index]
|
124 |
-
|
125 |
-
# Function to generate answers using the groq API with Llama model
|
126 |
-
def generate_answer_with_grog(query, vector_index, sentences, sentence_transformer_model):
|
127 |
-
# Check cache for previous queries
|
128 |
if query in cache:
|
129 |
logging.info(f"Cache hit for query: {query}")
|
130 |
return cache[query]
|
131 |
|
132 |
try:
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
except Exception as e:
|
172 |
-
logging.error(f"Error
|
173 |
-
return
|
174 |
|
175 |
-
# Gradio
|
176 |
-
def gradio_interface(query):
|
177 |
-
global vector_index, sentences
|
178 |
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
# Generate the answer using the groq API and Llama model with varied responses
|
186 |
-
answer = generate_answer_with_grog(query, vector_index, sentences, sentence_transformer_model)
|
187 |
-
|
188 |
-
# Log the query and answer for monitoring
|
189 |
-
logging.info(f"Query: {query}, Answer: {answer}")
|
190 |
-
|
191 |
-
return f"### Here's your response:\n\n{answer}"
|
192 |
|
193 |
# Create the Gradio interface
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
|
202 |
# Launch the Gradio app
|
203 |
if __name__ == "__main__":
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
+
import logging
|
4 |
from groq import Groq
|
5 |
from sentence_transformers import SentenceTransformer
|
6 |
import faiss
|
7 |
import numpy as np
|
8 |
import PyPDF2
|
|
|
|
|
9 |
from sklearn.metrics.pairwise import cosine_similarity
|
10 |
+
from collections import Counter
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
# --------------------- Setup ---------------------
|
|
|
13 |
|
14 |
+
logging.basicConfig(
|
15 |
+
filename='query_logs.log',
|
16 |
+
level=logging.INFO,
|
17 |
+
format='%(asctime)s:%(levelname)s:%(message)s'
|
18 |
+
)
|
19 |
|
20 |
+
GROQ_API_KEY = "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"
|
21 |
+
client = Groq(api_key=GROQ_API_KEY)
|
22 |
+
PDF_PATH = 'Generative_AI_Foundations_in_Python_Discover_key_techniques_and.pdf'
|
23 |
+
sentence_transformer_model = SentenceTransformer('all-MiniLM-L6-v2')
|
24 |
cache = {}
|
25 |
|
26 |
+
# --------------------- PDF Processing ---------------------
|
|
|
|
|
|
|
|
|
27 |
|
|
|
28 |
def read_pdf(file_path):
|
29 |
+
if not os.path.exists(file_path):
|
30 |
+
logging.error(f"PDF file not found at: {file_path}")
|
31 |
+
return []
|
32 |
+
|
33 |
+
sentences_with_pages = []
|
34 |
with open(file_path, 'rb') as file:
|
35 |
reader = PyPDF2.PdfReader(file)
|
36 |
+
for page_num, page in enumerate(reader.pages):
|
37 |
+
text = page.extract_text()
|
38 |
+
if text:
|
39 |
+
sentences = [sentence.strip() for sentence in text.split('\n') if sentence.strip()]
|
40 |
+
for sentence in sentences:
|
41 |
+
sentences_with_pages.append({'sentence': sentence, 'page_number': page_num + 1})
|
42 |
+
return sentences_with_pages
|
43 |
+
|
44 |
+
sentences_with_pages = read_pdf(PDF_PATH)
|
45 |
+
vector_index, sentences_with_pages = vectorize_text(sentences_with_pages)
|
46 |
+
|
47 |
+
def vectorize_text(sentences_with_pages):
|
|
|
48 |
try:
|
49 |
+
sentences = [item['sentence'] for item in sentences_with_pages]
|
50 |
+
embeddings = sentence_transformer_model.encode(sentences, show_progress_bar=True)
|
51 |
+
index = faiss.IndexFlatL2(embeddings.shape[1])
|
52 |
+
index.add(np.array(embeddings))
|
53 |
+
logging.info(f"Added {len(sentences)} sentences to the vector store.")
|
54 |
+
return index, sentences_with_pages
|
|
|
|
|
|
|
|
|
|
|
55 |
except Exception as e:
|
56 |
+
logging.error(f"Error during vectorization: {str(e)}")
|
57 |
return None, None
|
58 |
|
59 |
+
# --------------------- Query Handling ---------------------
|
|
|
60 |
|
61 |
+
def generate_query_embedding(query):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
return sentence_transformer_model.encode([query])
|
63 |
|
64 |
+
def is_query_relevant(distances, threshold=1.0):
|
65 |
+
return distances[0][0] <= threshold
|
|
|
|
|
|
|
66 |
|
67 |
+
def generate_diverse_responses(prompt, n=3):
|
|
|
68 |
responses = []
|
69 |
for i in range(n):
|
70 |
+
temperature = 0.7 + (i * 0.1)
|
71 |
+
top_p = 0.9 - (i * 0.1)
|
72 |
try:
|
73 |
chat_completion = client.chat.completions.create(
|
74 |
+
messages=[{"role": "user", "content": prompt}],
|
|
|
|
|
|
|
75 |
model="llama3-8b-8192",
|
76 |
temperature=temperature,
|
77 |
top_p=top_p
|
78 |
)
|
79 |
+
responses.append(chat_completion.choices[0].message.content.strip())
|
80 |
except Exception as e:
|
81 |
logging.error(f"Error generating response: {str(e)}")
|
82 |
+
responses.append("Error generating this response.")
|
83 |
return responses
|
84 |
|
|
|
85 |
def aggregate_responses(responses):
|
|
|
86 |
response_counter = Counter(responses)
|
87 |
+
most_common_response, count = response_counter.most_common(1)[0]
|
88 |
+
if count > 1:
|
89 |
+
return most_common_response
|
90 |
+
else:
|
91 |
+
embeddings = sentence_transformer_model.encode(responses)
|
92 |
+
avg_embedding = np.mean(embeddings, axis=0)
|
93 |
+
similarities = cosine_similarity([avg_embedding], embeddings)[0]
|
94 |
+
return responses[np.argmax(similarities)]
|
95 |
+
|
96 |
+
def generate_answer(query):
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
if query in cache:
|
98 |
logging.info(f"Cache hit for query: {query}")
|
99 |
return cache[query]
|
100 |
|
101 |
try:
|
102 |
+
query_embedding = generate_query_embedding(query)
|
103 |
+
D, I = vector_index.search(np.array(query_embedding), k=5)
|
104 |
+
|
105 |
+
if is_query_relevant(D):
|
106 |
+
relevant_items = [sentences_with_pages[i] for i in I[0]]
|
107 |
+
combined_text = " ".join([item['sentence'] for item in relevant_items])
|
108 |
+
page_numbers = sorted(set([item['page_number'] for item in relevant_items]))
|
109 |
+
page_numbers_str = ', '.join(map(str, page_numbers))
|
110 |
+
|
111 |
+
# Construct primary prompt
|
112 |
+
prompt = f"""
|
113 |
+
Use the following context from "Generative AI Foundations" to answer the question. If additional explanation is needed, provide an example.
|
114 |
+
|
115 |
+
**Context (Pages {page_numbers_str}):**
|
116 |
+
{combined_text}
|
117 |
+
|
118 |
+
**User's question:**
|
119 |
+
{query}
|
120 |
+
|
121 |
+
**Remember to indicate the specific page numbers.**
|
122 |
+
"""
|
123 |
+
primary_responses = generate_diverse_responses(prompt)
|
124 |
+
primary_answer = aggregate_responses(primary_responses)
|
125 |
+
|
126 |
+
# Construct additional prompt for explanations
|
127 |
+
explanation_prompt = f"""
|
128 |
+
The user has a question about a complex topic. Could you provide an explanation or example for better understanding?
|
129 |
+
|
130 |
+
**User's question:**
|
131 |
+
{query}
|
132 |
+
|
133 |
+
**Primary answer:**
|
134 |
+
{primary_answer}
|
135 |
+
"""
|
136 |
+
explanation_responses = generate_diverse_responses(explanation_prompt)
|
137 |
+
explanation_answer = aggregate_responses(explanation_responses)
|
138 |
+
|
139 |
+
# Combine primary answer and explanation
|
140 |
+
full_response = f"{primary_answer}\n\n{explanation_answer}\n\n_From 'Generative AI Foundations,' pages {page_numbers_str}_"
|
141 |
+
cache[query] = full_response
|
142 |
+
logging.info(f"Generated response for query: {query}")
|
143 |
+
return full_response
|
144 |
+
|
145 |
+
else:
|
146 |
+
# General knowledge fallback
|
147 |
+
prompt = f"""
|
148 |
+
The user asked a question that is not covered in "Generative AI Foundations." Please provide a helpful answer using general knowledge.
|
149 |
+
|
150 |
+
**User's question:**
|
151 |
+
{query}
|
152 |
+
"""
|
153 |
+
fallback_responses = generate_diverse_responses(prompt)
|
154 |
+
fallback_answer = aggregate_responses(fallback_responses)
|
155 |
+
cache[query] = fallback_answer
|
156 |
+
return fallback_answer
|
157 |
|
158 |
except Exception as e:
|
159 |
+
logging.error(f"Error generating answer: {str(e)}")
|
160 |
+
return "Sorry, an error occurred while generating the answer."
|
161 |
|
162 |
+
# --------------------- Gradio Interface ---------------------
|
|
|
|
|
163 |
|
164 |
+
def gradio_interface(user_query, history):
|
165 |
+
response = generate_answer(user_query)
|
166 |
+
history = history or []
|
167 |
+
history.append({"role": "user", "content": user_query})
|
168 |
+
history.append({"role": "assistant", "content": response})
|
169 |
+
return history, history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
# Create the Gradio interface
|
172 |
+
with gr.Blocks(css=".gradio-container {background-color: #f0f0f0}") as iface:
|
173 |
+
gr.Markdown("""
|
174 |
+
# **Generative AI Foundations Assistant**
|
175 |
+
*Explore insights and get explanations with real-life examples from "Generative AI Foundations in Python".*
|
176 |
+
""")
|
177 |
+
|
178 |
+
chatbot = gr.Chatbot(height=500, type='messages')
|
179 |
+
state = gr.State([])
|
180 |
+
|
181 |
+
with gr.Row():
|
182 |
+
txt = gr.Textbox(
|
183 |
+
show_label=False,
|
184 |
+
placeholder="Type your message here and press Enter",
|
185 |
+
container=False
|
186 |
+
)
|
187 |
+
submit_btn = gr.Button("Send")
|
188 |
+
|
189 |
+
def submit_message(user_query, history):
|
190 |
+
history = history or []
|
191 |
+
history.append({"role": "user", "content": user_query})
|
192 |
+
return "", history
|
193 |
+
|
194 |
+
def bot_response(history):
|
195 |
+
user_query = history[-1]['content']
|
196 |
+
response = generate_answer(user_query)
|
197 |
+
history.append({"role": "assistant", "content": response})
|
198 |
+
return history
|
199 |
+
|
200 |
+
txt.submit(submit_message, [txt, state], [txt, state], queue=False).then(
|
201 |
+
bot_response, state, chatbot
|
202 |
+
)
|
203 |
+
submit_btn.click(submit_message, [txt, state], [txt, state], queue=False).then(
|
204 |
+
bot_response, state, chatbot
|
205 |
+
)
|
206 |
+
|
207 |
+
reset_btn = gr.Button("Reset Chat")
|
208 |
+
reset_btn.click(lambda: ([], []), outputs=[chatbot, state], queue=False)
|
209 |
|
210 |
# Launch the Gradio app
|
211 |
if __name__ == "__main__":
|