RAGBOT / app.py
Rahatara's picture
Rename app,py to app.py
9f2b9c7 verified
raw
history blame
5.89 kB
import gradio as gr
from huggingface_hub import InferenceClient
from typing import List, Tuple
import fitz # PyMuPDF
from sentence_transformers import SentenceTransformer
import numpy as np
import faiss
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
#client = InferenceClient("meta-llama/Llama-2-7b-chat-hf")
# Placeholder for the app's state
class MyApp:
def __init__(self) -> None:
self.documents = []
self.embeddings = None
self.index = None
def load_pdfs(self, file_paths: List[str]) -> None:
"""Extracts text from multiple PDF files and stores it in the app's documents."""
self.documents = []
for file_path in file_paths:
doc = fitz.open(file_path)
for page_num in range(len(doc)):
page = doc[page_num]
text = page.get_text()
self.documents.append({"page": page_num + 1, "content": text, "file": file_path})
print("PDFs processed successfully!")
def build_vector_db(self) -> None:
"""Builds a vector database using the content of the PDFs."""
model = SentenceTransformer('all-MiniLM-L6-v2')
self.embeddings = model.encode([doc["content"] for doc in self.documents], show_progress_bar=True)
self.index = faiss.IndexFlatL2(self.embeddings.shape[1])
self.index.add(np.array(self.embeddings))
print("Vector database built successfully!")
def search_documents(self, query: str, k: int = 3) -> List[str]:
"""Searches for relevant documents using vector similarity."""
model = SentenceTransformer('all-MiniLM-L6-v2')
query_embedding = model.encode([query], show_progress_bar=False)
D, I = self.index.search(np.array(query_embedding), k)
results = [self.documents[i]["content"] for i in I[0]]
return results if results else ["No relevant documents found."]
app = MyApp()
def preprocess_response(response: str) -> str:
"""Preprocesses the response to make it more polished and empathetic."""
response = response.strip()
response = response.replace("\n\n", "\n")
response = response.replace(" ,", ",")
response = response.replace(" .", ".")
response = " ".join(response.split())
if not any(word in response.lower() for word in ["sorry", "apologize", "empathy"]):
response = "I'm here to help. " + response
return response
def shorten_response(response: str) -> str:
"""Uses the Zephyr model to shorten and refine the response."""
messages = [{"role": "system", "content": "Greet, Shorten and refine this response in a supportive and empathetic manner."}, {"role": "user", "content": response}]
result = client.chat_completion(messages, max_tokens=512, temperature=0.5, top_p=0.9)
return result.choices[0].message['content'].strip()
def respond(message: str, history: List[Tuple[str, str]], system_message: str):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
# RAG - Retrieve relevant documents if the query suggests exercises or specific information
if any(keyword in message.lower() for keyword in ["exercise", "technique", "information", "guide", "help", "how to"]):
retrieved_docs = app.search_documents(message)
context = "\n".join(retrieved_docs)
if context.strip():
messages.append({"role": "system", "content": "Relevant documents: " + context})
response = client.chat_completion(messages, max_tokens=1024, temperature=0.7, top_p=0.9)
response_content = "".join([choice.message['content'] for choice in response.choices if 'content' in choice.message])
polished_response = preprocess_response(response_content)
shortened_response = shorten_response(polished_response)
history.append((message, shortened_response))
return history, ""
with gr.Blocks() as demo:
gr.Markdown("# 🧘‍♀️ **Dialectical Behaviour Therapy**")
gr.Markdown(
"‼️Disclaimer: This chatbot is based on a DBT exercise book that is publicly available. "
"We are not medical practitioners, and the use of this chatbot is at your own responsibility."
)
chatbot = gr.Chatbot()
with gr.Row():
pdf_input = gr.File(label="Upload PDFs (minimum 5)", file_count="multiple", type="file")
txt_input = gr.Textbox(
show_label=False,
placeholder="Type your message here...",
lines=1
)
system_input = gr.Textbox(
label="System Message",
placeholder="Enter a system message here...",
lines=2
)
submit_btn = gr.Button("Submit", scale=1)
refresh_btn = gr.Button("Refresh Chat", scale=1, variant="secondary")
example_questions = [
["What are some ways to cope with stress using DBT?"],
["Can you guide me through a grounding exercise?"],
["How do I use DBT skills to handle intense emotions?"],
["What are some self-soothing techniques I can practice?"]
]
gr.Examples(examples=example_questions, inputs=[txt_input])
def load_and_build_pdfs(pdfs):
file_paths = [pdf.name for pdf in pdfs]
if len(file_paths) < 5:
return [], "Please upload at least 5 PDFs."
app.load_pdfs(file_paths)
app.build_vector_db()
return []
submit_btn.click(fn=load_and_build_pdfs, inputs=[pdf_input], outputs=[])
submit_btn.click(fn=respond, inputs=[txt_input, chatbot, system_input], outputs=[chatbot, txt_input])
refresh_btn.click(lambda: [], None, chatbot)
if __name__ == "__main__":
demo.launch()