File size: 7,899 Bytes
78549c1 f91105f 78549c1 2e5073c f91105f 78549c1 c0437d5 78549c1 f91105f 2e5073c f91105f 2e5073c 78549c1 c836f81 f91105f 78549c1 f91105f 78549c1 f91105f c836f81 f91105f 78549c1 f91105f 78549c1 f91105f 2e5073c f91105f 2e5073c f91105f 2e5073c f91105f 2e5073c f91105f 2e5073c f91105f 2e5073c f91105f 2e5073c 78549c1 f91105f 2e5073c 78549c1 f91105f 78549c1 f91105f 78549c1 f91105f 78549c1 f91105f 78549c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
import os
import gradio as gr
import logging
from groq import Groq
from sentence_transformers import SentenceTransformer
import faiss
import numpy as np
import PyPDF2
from sklearn.metrics.pairwise import cosine_similarity
from collections import Counter
# -------------------- Setup ---------------------
logging.basicConfig(
filename='query_logs.log',
level=logging.INFO,
format='%(asctime)s:%(levelname)s:%(message)s'
)
GROQ_API_KEY = "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"
client = Groq(api_key=GROQ_API_KEY)
PDF_PATH = 'Generative_AI_Foundations_in_Python_Discover_key_techniques_and.pdf'
sentence_transformer_model = SentenceTransformer('all-MiniLM-L6-v2')
cache = {}
# --------------------- Vectorization Function ---------------------
def vectorize_text(sentences_with_pages):
"""Vectorize sentences using SentenceTransformer and create a FAISS index."""
try:
sentences = [item['sentence'] for item in sentences_with_pages]
embeddings = sentence_transformer_model.encode(sentences, show_progress_bar=True)
index = faiss.IndexFlatL2(embeddings.shape[1])
index.add(np.array(embeddings))
logging.info(f"Added {len(sentences)} sentences to the vector store.")
return index, sentences_with_pages
except Exception as e:
logging.error(f"Error during vectorization: {str(e)}")
return None, None
# --------------------- PDF Processing ---------------------
def read_pdf(file_path):
if not os.path.exists(file_path):
logging.error(f"PDF file not found at: {file_path}")
return []
sentences_with_pages = []
with open(file_path, 'rb') as file:
reader = PyPDF2.PdfReader(file)
for page_num, page in enumerate(reader.pages):
text = page.extract_text()
if text:
sentences = [sentence.strip() for sentence in text.split('\n') if sentence.strip()]
for sentence in sentences:
sentences_with_pages.append({'sentence': sentence, 'page_number': page_num + 1})
return sentences_with_pages
# Read and Vectorize PDF Content
sentences_with_pages = read_pdf(PDF_PATH)
vector_index, sentences_with_pages = vectorize_text(sentences_with_pages)
# --------------------- Query Handling ---------------------
def generate_query_embedding(query):
return sentence_transformer_model.encode([query])
def is_query_relevant(distances, threshold=1.0):
return distances[0][0] <= threshold
def generate_diverse_responses(prompt, n=3):
responses = []
for i in range(n):
temperature = 0.7 + (i * 0.1)
top_p = 0.9 - (i * 0.1)
try:
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model="llama3-8b-8192",
temperature=temperature,
top_p=top_p
)
responses.append(chat_completion.choices[0].message.content.strip())
except Exception as e:
logging.error(f"Error generating response: {str(e)}")
responses.append("Error generating this response.")
return responses
def aggregate_responses(responses):
response_counter = Counter(responses)
most_common_response, count = response_counter.most_common(1)[0]
if count > 1:
return most_common_response
else:
embeddings = sentence_transformer_model.encode(responses)
avg_embedding = np.mean(embeddings, axis=0)
similarities = cosine_similarity([avg_embedding], embeddings)[0]
return responses[np.argmax(similarities)]
def generate_answer(query):
if query in cache:
logging.info(f"Cache hit for query: {query}")
return cache[query]
try:
query_embedding = generate_query_embedding(query)
D, I = vector_index.search(np.array(query_embedding), k=5)
if is_query_relevant(D):
relevant_items = [sentences_with_pages[i] for i in I[0]]
combined_text = " ".join([item['sentence'] for item in relevant_items])
page_numbers = sorted(set([item['page_number'] for item in relevant_items]))
page_numbers_str = ', '.join(map(str, page_numbers))
# Construct primary prompt
prompt = f"""
Use the following context from "Generative AI Foundations" to answer the question. If additional explanation is needed, provide an example.
**Context (Pages {page_numbers_str}):**
{combined_text}
**User's question:**
{query}
**Remember to indicate the specific page numbers.**
"""
primary_responses = generate_diverse_responses(prompt)
primary_answer = aggregate_responses(primary_responses)
# Construct additional prompt for explanations
explanation_prompt = f"""
The user has a question about a complex topic. Could you provide an explanation or example for better understanding?
**User's question:**
{query}
**Primary answer:**
{primary_answer}
"""
explanation_responses = generate_diverse_responses(explanation_prompt)
explanation_answer = aggregate_responses(explanation_responses)
# Combine primary answer and explanation
full_response = f"{primary_answer}\n\n{explanation_answer}\n\n_From 'Generative AI Foundations,' pages {page_numbers_str}_"
cache[query] = full_response
logging.info(f"Generated response for query: {query}")
return full_response
else:
# General knowledge fallback
prompt = f"""
The user asked a question that is not covered in "Generative AI Foundations." Please provide a helpful answer using general knowledge.
**User's question:**
{query}
"""
fallback_responses = generate_diverse_responses(prompt)
fallback_answer = aggregate_responses(fallback_responses)
cache[query] = fallback_answer
return fallback_answer
except Exception as e:
logging.error(f"Error generating answer: {str(e)}")
return "Sorry, an error occurred while generating the answer."
# --------------------- Gradio Interface ---------------------
def gradio_interface(user_query, history):
response = generate_answer(user_query)
history = history or []
history.append({"role": "user", "content": user_query})
history.append({"role": "assistant", "content": response})
return history, history
# Create the Gradio interface
with gr.Blocks(css=".gradio-container {background-color: #f0f0f0}") as iface:
gr.Markdown("""
# **Generative AI Foundations Assistant**
*Explore insights and get explanations with real-life examples from "Generative AI Foundations in Python".*
""")
chatbot = gr.Chatbot(height=500, type='messages')
state = gr.State([])
with gr.Row():
txt = gr.Textbox(
show_label=False,
placeholder="Type your message here and press Enter",
container=False
)
submit_btn = gr.Button("Send")
def submit_message(user_query, history):
history = history or []
history.append({"role": "user", "content": user_query})
return "", history
def bot_response(history):
user_query = history[-1]['content']
response = generate_answer(user_query)
history.append({"role": "assistant", "content": response})
return history
txt.submit(submit_message, [txt, state], [txt, state], queue=False).then(
bot_response, state, chatbot
)
submit_btn.click(submit_message, [txt, state], [txt, state], queue=False).then(
bot_response, state, chatbot
)
reset_btn = gr.Button("Reset Chat")
reset_btn.click(lambda: ([], []), outputs=[chatbot, state], queue=False)
# Launch the Gradio app
if __name__ == "__main__":
iface.launch()
|