zahraanaji commited on
Commit
840b10b
·
verified ·
1 Parent(s): e6e02d8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -37
app.py CHANGED
@@ -27,41 +27,42 @@ Helpful answer:
27
 
28
  prompt = PromptTemplate(template=template, input_variables=["context", "question"])
29
 
30
-
31
- # Load and process the PDF
32
- loader = PyPDFLoader(pdf_file.name)
33
- pdf_data = loader.load()
34
-
35
- # Split the text into chunks
36
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
37
- docs = text_splitter.split_documents(pdf_data)
38
-
39
- # Create a Chroma vector store
40
- embeddings = HuggingFaceEmbeddings(model_name="embaas/sentence-transformers-multilingual-e5-base")
41
- db = Chroma.from_documents(docs, embeddings)
42
-
43
- # Initialize message history for conversation
44
- message_history = ChatMessageHistory()
45
-
46
- # Memory for conversational context
47
- memory = ConversationBufferMemory(
48
- memory_key="chat_history",
49
- output_key="answer",
50
- chat_memory=message_history,
51
- return_messages=True,
52
- )
53
-
54
- # Create a chain that uses the Chroma vector store
55
- chain = ConversationalRetrievalChain.from_llm(
56
- llm=llm,
57
- chain_type="stuff",
58
- retriever=db.as_retriever(),
59
- memory=memory,
60
- return_source_documents=False,
61
- combine_docs_chain_kwargs={'prompt': prompt}
62
- )
63
-
64
- # Process the question
65
- res = chain({"question": question})
66
- answer = res["answer"]
 
67
 
 
27
 
28
  prompt = PromptTemplate(template=template, input_variables=["context", "question"])
29
 
30
+ def process_pdf_and_ask_question(pdf_file,question):
31
+ # Load and process the PDF
32
+ loader = PyPDFLoader(pdf_file.name)
33
+ pdf_data = loader.load()
34
+
35
+ # Split the text into chunks
36
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
37
+ docs = text_splitter.split_documents(pdf_data)
38
+
39
+ # Create a Chroma vector store
40
+ embeddings = HuggingFaceEmbeddings(model_name="embaas/sentence-transformers-multilingual-e5-base")
41
+ db = Chroma.from_documents(docs, embeddings)
42
+
43
+ # Initialize message history for conversation
44
+ message_history = ChatMessageHistory()
45
+
46
+ # Memory for conversational context
47
+ memory = ConversationBufferMemory(
48
+ memory_key="chat_history",
49
+ output_key="answer",
50
+ chat_memory=message_history,
51
+ return_messages=True,
52
+ )
53
+
54
+ # Create a chain that uses the Chroma vector store
55
+ chain = ConversationalRetrievalChain.from_llm(
56
+ llm=llm,
57
+ chain_type="stuff",
58
+ retriever=db.as_retriever(),
59
+ memory=memory,
60
+ return_source_documents=False,
61
+ combine_docs_chain_kwargs={'prompt': prompt}
62
+ )
63
+
64
+ # Process the question
65
+ res = chain({"question": question})
66
+ return res["answer"]
67
+
68