File size: 6,180 Bytes
eb2a41f
 
 
 
 
 
 
 
 
 
12f6335
 
 
eb2a41f
 
 
 
12f6335
 
 
 
 
 
eb2a41f
 
12f6335
eb2a41f
 
 
 
 
 
12f6335
eb2a41f
 
 
12f6335
 
 
 
 
eb2a41f
12f6335
eb2a41f
 
 
12f6335
eb2a41f
 
12f6335
 
eb2a41f
 
12f6335
eb2a41f
 
12f6335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb2a41f
12f6335
eb2a41f
12f6335
 
eb2a41f
 
 
 
 
12f6335
 
eb2a41f
12f6335
 
eb2a41f
 
12f6335
 
eb2a41f
12f6335
 
 
eb2a41f
12f6335
 
 
eb2a41f
12f6335
 
eb2a41f
12f6335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb2a41f
12f6335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb2a41f
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import os
import streamlit as st
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import FAISS
from langchain_core.prompts import PromptTemplate
from langchain_huggingface import HuggingFaceEndpoint
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter

# Use environment variable for Hugging Face token
HF_TOKEN = os.environ.get("HF_TOKEN")
HUGGINGFACE_REPO_ID = "mistralai/Mistral-7B-Instruct-v0.3"
DATA_PATH = "data/"
DB_FAISS_PATH = "vectorstore/db_faiss"

def load_pdf_files(data_path):
    """Load PDF files from the specified directory"""
    loader = DirectoryLoader(data_path,
                           glob='*.pdf',
                           loader_cls=PyPDFLoader)
    documents = loader.load()
    return documents

def create_chunks(extracted_data):
    """Split documents into chunks"""
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,
                                                chunk_overlap=50)
    text_chunks = text_splitter.split_documents(extracted_data)
    return text_chunks

def get_embedding_model():
    """Get the embedding model"""
    embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
    return embedding_model

def create_embeddings():
    """Create embeddings and save to FAISS database"""
    # Step 1: Load PDFs
    documents = load_pdf_files(data_path=DATA_PATH)
    st.info(f"Loaded {len(documents)} documents")
    
    # Step 2: Create chunks
    text_chunks = create_chunks(extracted_data=documents)
    st.info(f"Created {len(text_chunks)} text chunks")
    
    # Step 3: Get embedding model
    embedding_model = get_embedding_model()
    
    # Step 4: Create and save embeddings
    os.makedirs(os.path.dirname(DB_FAISS_PATH), exist_ok=True)
    db = FAISS.from_documents(text_chunks, embedding_model)
    db.save_local(DB_FAISS_PATH)
    st.success("Embeddings created and saved successfully!")
    return db

def set_custom_prompt(custom_prompt_template):
    """Set custom prompt template"""
    prompt = PromptTemplate(template=custom_prompt_template, input_variables=["context", "question"])
    return prompt

def load_llm(huggingface_repo_id):
    """Load Hugging Face LLM"""
    llm = HuggingFaceEndpoint(
        repo_id=huggingface_repo_id,
        task="text-generation",
        temperature=0.5,
        model_kwargs={
            "token": HF_TOKEN,
            "max_length": 512
        }
    )
    return llm

def get_vectorstore():
    """Get or create vector store"""
    if os.path.exists(DB_FAISS_PATH):
        st.info("Loading existing vector store...")
        embedding_model = get_embedding_model()
        try:
            db = FAISS.load_local(DB_FAISS_PATH, embedding_model, allow_dangerous_deserialization=True)
            return db
        except Exception as e:
            st.error(f"Error loading vector store: {e}")
            st.info("Creating new vector store...")
            return create_embeddings()
    else:
        st.info("Creating new vector store...")
        return create_embeddings()

def main():
    st.title("BeepKart FAQ Chatbot")
    st.markdown("Ask questions about buying or selling bikes on BeepKart!")
    
    # Initialize session state for messages
    if 'messages' not in st.session_state:
        st.session_state.messages = []
    
    # Display chat history
    for message in st.session_state.messages:
        st.chat_message(message['role']).markdown(message['content'])
    
    # Get user input
    prompt = st.chat_input("Ask a question about BeepKart...")
    
    # Custom prompt template
    CUSTOM_PROMPT_TEMPLATE = """
    Use the pieces of information provided in the context to answer user's question.
    If you don't know the answer, just say that you don't know, don't try to make up an answer.
    
    Don't provide anything out of the given context
    
    Context: {context}
    Question: {question}
    
    Start the answer directly. No small talk please.
    """
    
    if prompt:
        # Display user message
        st.chat_message('user').markdown(prompt)
        st.session_state.messages.append({'role': 'user', 'content': prompt})
        
        try:
            with st.spinner("Thinking..."):
                # Get vector store
                vectorstore = get_vectorstore()
                
                # Create QA chain
                qa_chain = RetrievalQA.from_chain_type(
                    llm=load_llm(huggingface_repo_id=HUGGINGFACE_REPO_ID),
                    chain_type="stuff",
                    retriever=vectorstore.as_retriever(search_kwargs={'k': 3}),
                    return_source_documents=True,
                    chain_type_kwargs={'prompt': set_custom_prompt(CUSTOM_PROMPT_TEMPLATE)}
                )
                
                # Get response
                response = qa_chain.invoke({'query': prompt})
                
                # Extract result and sources
                result = response["result"]
                source_documents = response["source_documents"]
                
                # Format source documents
                source_docs_text = "\n\n**Sources:**\n"
                for i, doc in enumerate(source_documents, 1):
                    source_docs_text += f"{i}. Page {doc.metadata.get('page', 'N/A')}: {doc.page_content[:100]}...\n\n"
                
                # Display result and sources
                result_to_show = f"{result}\n{source_docs_text}"
                
                st.chat_message('assistant').markdown(result_to_show)
                st.session_state.messages.append({'role': 'assistant', 'content': result_to_show})
                
        except Exception as e:
            error_message = f"Error: {str(e)}"
            st.error(error_message)
            st.error("Please check your HuggingFace token and model access permissions")
            st.session_state.messages.append({'role': 'assistant', 'content': error_message})

if __name__ == "__main__":
    main()