Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +100 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate
|
3 |
+
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
6 |
+
from llama_index.core import Settings
|
7 |
+
import os
|
8 |
+
import base64
|
9 |
+
|
10 |
+
# Load environment variables
|
11 |
+
load_dotenv()
|
12 |
+
|
13 |
+
# Configure the Llama index settings
|
14 |
+
Settings.llm = HuggingFaceInferenceAPI(
|
15 |
+
model_name="google/gemma-1.1-7b-it",
|
16 |
+
tokenizer_name="google/gemma-1.1-7b-it",
|
17 |
+
context_window=3900,
|
18 |
+
token=os.getenv("HF_TOKEN"),
|
19 |
+
max_new_tokens=1000,
|
20 |
+
generate_kwargs={"temperature": 0.1},
|
21 |
+
)
|
22 |
+
Settings.embed_model = HuggingFaceEmbedding(
|
23 |
+
model_name="BAAI/bge-small-en-v1.5"
|
24 |
+
)
|
25 |
+
|
26 |
+
# Define the directory for persistent storage and data
|
27 |
+
PERSIST_DIR = "./db"
|
28 |
+
DATA_DIR = "data"
|
29 |
+
|
30 |
+
# Ensure data directory exists
|
31 |
+
os.makedirs(DATA_DIR, exist_ok=True)
|
32 |
+
os.makedirs(PERSIST_DIR, exist_ok=True)
|
33 |
+
|
34 |
+
def displayPDF(file):
|
35 |
+
with open(file, "rb") as f:
|
36 |
+
base64_pdf = base64.b64encode(f.read()).decode('utf-8')
|
37 |
+
pdf_display = f'<iframe src="data:application/pdf;base64,{base64_pdf}" width="100%" height="600" type="application/pdf"></iframe>'
|
38 |
+
st.markdown(pdf_display, unsafe_allow_html=True)
|
39 |
+
|
40 |
+
def data_ingestion():
|
41 |
+
documents = SimpleDirectoryReader(DATA_DIR).load_data()
|
42 |
+
storage_context = StorageContext.from_defaults()
|
43 |
+
index = VectorStoreIndex.from_documents(documents)
|
44 |
+
index.storage_context.persist(persist_dir=PERSIST_DIR)
|
45 |
+
|
46 |
+
def handle_query(query):
|
47 |
+
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
48 |
+
index = load_index_from_storage(storage_context)
|
49 |
+
chat_text_qa_msgs = [
|
50 |
+
(
|
51 |
+
"user",
|
52 |
+
"""You are a Q&A assistant named MAXIMUS, created by Suriya. Whenever users inquire about your creator or Suriya, respond with: "I was created by Suriya, an enthusiast in Artificial Intelligence. He is dedicated to solving complex problems and delivering innovative solutions. With a strong focus on machine learning, deep learning, Python, generative AI, NLP, and computer vision, Suriya is passionate about pushing the boundaries of AI to explore new possibilities." Your main goal is to greet users and provide answers to their questions as accurately as possible, based on the instructions and context you have been given. If a question does not match the provided context or is outside the scope of the document, kindly advise the user to ask questions within the context of the document.
|
53 |
+
|
54 |
+
Context:
|
55 |
+
|
56 |
+
{context_str}
|
57 |
+
|
58 |
+
Question:
|
59 |
+
|
60 |
+
{query_str}
|
61 |
+
"""
|
62 |
+
)
|
63 |
+
]
|
64 |
+
|
65 |
+
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
|
66 |
+
query_engine = index.as_query_engine(text_qa_template=text_qa_template)
|
67 |
+
answer = query_engine.query(query)
|
68 |
+
|
69 |
+
if hasattr(answer, 'response'):
|
70 |
+
return answer.response
|
71 |
+
elif isinstance(answer, dict) and 'response' in answer:
|
72 |
+
return answer['response']
|
73 |
+
else:
|
74 |
+
return "Sorry, I couldn't find an answer."
|
75 |
+
|
76 |
+
# Streamlit app initialization
|
77 |
+
st.title("Chat with your PDF 🦜📄")
|
78 |
+
st.markdown("Built by [Suriya❤️](https://github.com/theSuriya)")
|
79 |
+
st.markdown("Upload your PDF 👇")
|
80 |
+
|
81 |
+
if 'messages' not in st.session_state:
|
82 |
+
st.session_state.messages = [{'role': 'assistant', "content": 'Hello! Upload a PDF and ask me anything about its content.'}]
|
83 |
+
|
84 |
+
uploaded_file = st.file_uploader("", type=["pdf"])
|
85 |
+
if uploaded_file is not None:
|
86 |
+
filepath = "data/saved_pdf.pdf"
|
87 |
+
with open(filepath, "wb") as f:
|
88 |
+
f.write(uploaded_file.getbuffer())
|
89 |
+
displayPDF(filepath) # Display the uploaded PDF
|
90 |
+
data_ingestion() # Process PDF every time new file is uploaded
|
91 |
+
|
92 |
+
user_prompt = st.chat_input("Ask me anything about the content of the PDF:")
|
93 |
+
if user_prompt:
|
94 |
+
st.session_state.messages.append({'role': 'user', "content": user_prompt})
|
95 |
+
response = handle_query(user_prompt)
|
96 |
+
st.session_state.messages.append({'role': 'assistant', "content": response})
|
97 |
+
|
98 |
+
for message in st.session_state.messages:
|
99 |
+
with st.chat_message(message['role']):
|
100 |
+
st.write(message['content'])
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
python-dotenv
|
3 |
+
llama-index
|
4 |
+
llama-index-embeddings-huggingface
|
5 |
+
llama-index-llms-huggingface
|