DrishtiSharma commited on
Commit
b766313
Β·
verified Β·
1 Parent(s): a72c887

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -125
app.py CHANGED
@@ -1,145 +1,84 @@
 
 
 
1
  import streamlit as st
2
- from dotenv import load_dotenv
3
- from PyPDF2 import PdfReader
4
- from langchain.text_splitter import CharacterTextSplitter
5
- from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings
6
- from langchain.vectorstores import FAISS
7
- from langchain.chat_models import ChatOpenAI
8
  from langchain.memory import ConversationBufferMemory
9
  from langchain.chains import ConversationalRetrievalChain
10
- from langchain.chat_models.gigachat import GigaChat
11
- from htmlTemplates import css, bot_template, user_template
12
- from langchain.llms import HuggingFaceHub, LlamaCpp
13
- from huggingface_hub import snapshot_download, hf_hub_download
14
-
15
-
16
-
17
- repo_name = "IlyaGusev/saiga_mistral_7b_gguf"
18
- model_name = "model-q4_K.gguf"
19
-
20
- #snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name)
21
-
22
- from transformers import pipeline
23
-
24
- # Initialize the summarization pipeline
25
- summarizer = pipeline("summarization")
26
-
27
-
28
-
29
- def get_pdf_text(pdf_docs):
30
- text = ""
31
- for pdf in pdf_docs:
32
- pdf_reader = PdfReader(pdf)
33
- for page in pdf_reader.pages:
34
- text += page.extract_text()
35
-
36
- return text
37
-
38
-
39
- def get_text_chunks(text):
40
- text_splitter = CharacterTextSplitter(separator="\n",
41
- chunk_size=1000, # 1000
42
- chunk_overlap=200, # 200
43
- length_function=len
44
- )
45
- chunks = text_splitter.split_text(text)
46
-
47
- return chunks
48
 
 
49
 
50
- #def get_vectorstore(text_chunks):
51
- #embeddings = OpenAIEmbeddings()
52
- #embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
53
- #embeddings = HuggingFaceEmbeddings(model_name="intfloat/multilingual-e5-large")
54
- #embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2")
55
- #vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
56
 
57
- #return vectorstore
58
 
59
-
60
- def get_vectorstore(text_chunks, embedding_model_name="intfloat/multilingual-e5-large"):
61
- embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name)
62
- vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
 
63
  return vectorstore
64
 
65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
- def get_conversation_chain(vectorstore, model_name):
68
-
69
-
70
- llm = GigaChat(profanity=False,
71
- verify_ssl_certs=False
72
- )
73
-
74
- memory = ConversationBufferMemory(memory_key='chat_history',
75
- input_key='question',
76
- output_key='answer',
77
- return_messages=True)
78
-
79
- conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm,
80
- retriever=vectorstore.as_retriever(),
81
- memory=memory,
82
- return_source_documents=True
83
- )
84
-
85
- return conversation_chain
86
-
87
-
88
- def summarize_text(text):
89
- summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
90
- return summary[0]['summary_text']
91
-
92
-
93
- def handle_userinput(user_question):
94
- response = st.session_state.conversation({'question': user_question})
95
-
96
- st.session_state.chat_history = response['chat_history']
97
- st.session_state.retrieved_text = response['source_documents']
98
-
99
- for i, (message, text) in enumerate(zip(st.session_state.chat_history, st.session_state.retrieved_text)):
100
- if i % 2 == 0: # User messages
101
- st.write(user_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
102
- else: # Bot messages
103
- st.write(bot_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
104
- if summarize_option and text.page_content: # Check if summarization is enabled
105
- summarized_text = summarize_text(text.page_content)
106
- st.write(bot_template.replace("{{MSG}}", summarized_text), unsafe_allow_html=True)
107
- else:
108
- st.write(bot_template.replace("{{MSG}}", text.page_content), unsafe_allow_html=True)
109
-
110
 
 
 
111
 
112
- st.set_page_config(page_title="Chat with multiple PDFs",
113
- page_icon=":books:")
114
- st.write(css, unsafe_allow_html=True)
115
 
116
- if "conversation" not in st.session_state:
117
- st.session_state.conversation = None
118
- if "chat_history" not in st.session_state:
119
- st.session_state.chat_history = None
120
 
121
- st.header("Chat with multiple PDFs :books:")
122
- user_question = st.text_input("Ask a question about your documents: ")
 
123
 
124
- if user_question:
125
- handle_userinput(user_question)
126
 
127
- with st.sidebar:
128
- st.subheader("Your documents")
129
- embedding_model_name = st.selectbox("Select embedding model", ["intfloat/multilingual-e5-large", "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"])
130
- summarize_option = st.sidebar.checkbox("Enable Summarization", value=False)
131
- pdf_docs = st.file_uploader(
132
- "Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
133
- if st.button("Process"):
134
- with st.spinner("Processing"):
135
- # get pdf text
136
- raw_text = get_pdf_text(pdf_docs)
137
 
138
- # get the text chunks
139
- text_chunks = get_text_chunks(raw_text)
140
 
141
- # create vector store
142
- vectorstore = get_vectorstore(text_chunks, embedding_model_name)
143
 
144
- # create conversation chain
145
- st.session_state.conversation = get_conversation_chain(vectorstore, model_name)
 
 
 
 
1
+ import os
2
+ import json
3
+
4
  import streamlit as st
5
+ from langchain_huggingface import HuggingFaceEmbeddings
6
+ from langchain_chroma import Chroma
7
+ from langchain_groq import ChatGroq
 
 
 
8
  from langchain.memory import ConversationBufferMemory
9
  from langchain.chains import ConversationalRetrievalChain
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ from vectorize_documents import embeddings
12
 
13
+ working_dir = os.path.dirname(os.path.abspath(__file__))
14
+ config_data = json.load(open(f"{working_dir}/config.json"))
15
+ GROQ_API_KEY = config_data["GROQ_API_KEY"]
16
+ os.environ["GROQ_API_KEY"] = GROQ_API_KEY
 
 
17
 
 
18
 
19
+ def setup_vectorstore():
20
+ persist_directory = f"{working_dir}/vector_db_dir"
21
+ embedddings = HuggingFaceEmbeddings()
22
+ vectorstore = Chroma(persist_directory=persist_directory,
23
+ embedding_function=embeddings)
24
  return vectorstore
25
 
26
 
27
+ def chat_chain(vectorstore):
28
+ llm = ChatGroq(model="llama-3.1-70b-versatile",
29
+ temperature=0)
30
+ retriever = vectorstore.as_retriever()
31
+ memory = ConversationBufferMemory(
32
+ llm=llm,
33
+ output_key="answer",
34
+ memory_key="chat_history",
35
+ return_messages=True
36
+ )
37
+ chain = ConversationalRetrievalChain.from_llm(
38
+ llm=llm,
39
+ retriever=retriever,
40
+ chain_type="stuff",
41
+ memory=memory,
42
+ verbose=True,
43
+ return_source_documents=True
44
+ )
45
+
46
+ return chain
47
+
48
+
49
+ st.set_page_config(
50
+ page_title="Multi Doc Chat",
51
+ page_icon = "πŸ“š",
52
+ layout="centered"
53
+ )
54
+
55
+ st.title("πŸ“š Multi Documents Chatbot")
56
 
57
+ if "chat_history" not in st.session_state:
58
+ st.session_state.chat_history = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
+ if "vectorstore" not in st.session_state:
61
+ st.session_state.vectorstore = setup_vectorstore()
62
 
63
+ if "conversationsal_chain" not in st.session_state:
64
+ st.session_state.conversationsal_chain = chat_chain(st.session_state.vectorstore)
 
65
 
 
 
 
 
66
 
67
+ for message in st.session_state.chat_history:
68
+ with st.chat_message(message["role"]):
69
+ st.markdown(message["content"])
70
 
71
+ user_input = st.chat_input("Ask AI...")
 
72
 
73
+ if user_input:
74
+ st.session_state.chat_history.append({"role": "user", "content": user_input})
 
 
 
 
 
 
 
 
75
 
76
+ with st.chat_message("user"):
77
+ st.markdown(user_input)
78
 
 
 
79
 
80
+ with st.chat_message("assistant"):
81
+ response = st.session_state.conversationsal_chain({"question": user_input})
82
+ assistant_response = response["answer"]
83
+ st.markdown(assistant_response)
84
+ st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})