NHZ commited on
Commit
e7ac282
·
verified ·
1 Parent(s): c4191ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -16
app.py CHANGED
@@ -1,11 +1,8 @@
1
  import os
2
  import streamlit as st
3
  import requests
4
- import PyPDF2 # <-- Add this import for PyPDF2
5
  from groq import Groq
6
- from langchain.chains import AnalyzeDocumentChain
7
- from langchain.prompts import PromptTemplate
8
- from langchain.document_loaders import TextLoader
9
  from langchain.vectorstores import FAISS
10
  from langchain.embeddings import HuggingFaceEmbeddings
11
  from sentence_transformers import SentenceTransformer
@@ -24,7 +21,7 @@ def extract_text_from_pdf(pdf_url):
24
 
25
  # Read the PDF content
26
  with open("temp.pdf", "rb") as f:
27
- reader = PyPDF2.PdfReader(f) # Now PyPDF2 is defined
28
  text = ""
29
  for page in reader.pages:
30
  text += page.extract_text()
@@ -94,17 +91,28 @@ def main():
94
  query = st.text_input("Enter your question here")
95
  if st.button("Query Document"):
96
  results = query_faiss(st.session_state['faiss_index'], query)
97
- st.write("### Results from Document:")
98
- for i, result in enumerate(results):
99
- st.write(f"**Result {i+1}:** {result}")
100
-
101
- # Use Groq API for additional insights
102
- chat_completion = client.chat.completions.create(
103
- messages=[{"role": "user", "content": query}],
104
- model="llama-3.3-70b-versatile",
105
- )
106
- st.write("### Insights from Groq-powered Model:")
107
- st.write(chat_completion.choices[0].message.content)
 
 
 
 
 
 
 
 
 
 
 
108
 
109
  if __name__ == "__main__":
110
  main()
 
1
  import os
2
  import streamlit as st
3
  import requests
4
+ import PyPDF2
5
  from groq import Groq
 
 
 
6
  from langchain.vectorstores import FAISS
7
  from langchain.embeddings import HuggingFaceEmbeddings
8
  from sentence_transformers import SentenceTransformer
 
21
 
22
  # Read the PDF content
23
  with open("temp.pdf", "rb") as f:
24
+ reader = PyPDF2.PdfReader(f)
25
  text = ""
26
  for page in reader.pages:
27
  text += page.extract_text()
 
91
  query = st.text_input("Enter your question here")
92
  if st.button("Query Document"):
93
  results = query_faiss(st.session_state['faiss_index'], query)
94
+ if not results:
95
+ st.warning("No relevant context found in the document.")
96
+ else:
97
+ st.write("### Results from Document:")
98
+ for i, result in enumerate(results):
99
+ st.write(f"**Result {i+1}:** {result}")
100
+
101
+ # Combine results to provide context
102
+ context = "\n".join(results)
103
+ st.write("### Insights based on Document Context:")
104
+ prompt = (
105
+ f"The following context is from the document:\n\n"
106
+ f"{context}\n\n"
107
+ f"Based on this context, answer the question:\n"
108
+ f"{query}"
109
+ )
110
+
111
+ chat_completion = client.chat.completions.create(
112
+ messages=[{"role": "user", "content": prompt}],
113
+ model="llama-3.3-70b-versatile",
114
+ )
115
+ st.write(chat_completion.choices[0].message.content)
116
 
117
  if __name__ == "__main__":
118
  main()