ashish-001 commited on
Commit
8a6526f
·
verified ·
1 Parent(s): ce18c57

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +86 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import google.generativeai as genai
3
+ from langchain_community.vectorstores import FAISS
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
6
+ from langchain_core.prompts import PromptTemplate
7
+ from langchain.chains.question_answering import load_qa_chain
8
+ import fitz
9
+ from dotenv import load_dotenv
10
+ import os
11
+ load_dotenv()
12
+
13
+ genai.configure(api_key=os.getenv('GOOGLE_API_KEY'))
14
+ st.title('Document Q&A')
15
+ data_uploaded=False
16
+
17
+ def get_chain():
18
+ model = ChatGoogleGenerativeAI(model='gemini-1.5-pro-latest', temperature=0.1)
19
+ prompt_ = """
20
+ Answer the questions as detailed as possible from the provided context, make sure to provide all the
21
+ details, if the answer is not in the provided context just say, "answer is not available in context",
22
+ don't provide the wrong answer\n.
23
+ context: {context}
24
+ Questions:{question}
25
+ Answer:
26
+ """
27
+ prompt = PromptTemplate(template=prompt_, input_variables=["context", "question"])
28
+ chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
29
+ return chain
30
+
31
+
32
+ def get_pdf_content(pdffile):
33
+ with fitz.open(stream=pdffile.read(), filetype="pdf") as doc:
34
+ text = ""
35
+ for page in doc:
36
+ text += page.get_text()
37
+ return text
38
+
39
+
40
+ def create_database(data):
41
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
42
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
43
+ final_document = text_splitter.split_text(data)
44
+ vectors = FAISS.from_texts(final_document, embeddings)
45
+ vectors.save_local("faiss_index")
46
+
47
+
48
+ def user_input(u_question):
49
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
50
+ new_db = FAISS.load_local('faiss_index', embeddings, allow_dangerous_deserialization=True)
51
+ docs = new_db.similarity_search(u_question)
52
+ chain = get_chain()
53
+ response = chain(
54
+ {"input_documents": docs, "question": u_question}, return_only_outputs=True
55
+ )
56
+ return response["output_text"]
57
+
58
+
59
+ with st.sidebar:
60
+ uploaded_file = st.file_uploader("Upload pdf file", key="pdf_uploader")
61
+ if st.button('Create vector store'):
62
+ if uploaded_file is not None:
63
+ data = get_pdf_content(uploaded_file)
64
+ create_database(data)
65
+ st.write("Vector store created")
66
+ else:
67
+ st.write("Please upload pdf file")
68
+
69
+ if "messages" not in st.session_state:
70
+ st.session_state.messages = []
71
+
72
+ for message in st.session_state.messages:
73
+ with st.chat_message(message["role"]):
74
+ st.markdown(message["content"])
75
+
76
+
77
+ if prompt := st.chat_input("Ask questions"):
78
+ st.session_state.messages.append({"role": "user", "content": prompt})
79
+ with st.chat_message("user"):
80
+ st.markdown(prompt)
81
+ with st.chat_message("assistant"):
82
+ message_placeholder = st.empty()
83
+ with st.spinner(text="Fetching details..."):
84
+ response = user_input(prompt)
85
+ message_placeholder.markdown(response)
86
+ st.session_state.messages.append({"role": "assistant", "content": response})
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ streamlit==1.36.0
2
+ langchain==0.2.3
3
+ langchain-google-genai==1.0.7
4
+ langchain-community==0.2.4
5
+ langchain-core==0.2.10
6
+ PyMuPDF==1.24.7
7
+ faiss-cpu==1.8.0