Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -10,20 +10,30 @@ import nest_asyncio
|
|
10 |
from langchain.memory import ConversationBufferWindowMemory
|
11 |
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
|
12 |
from dotenv import load_dotenv
|
13 |
-
nest_asyncio.apply()
|
14 |
|
|
|
15 |
load_dotenv()
|
|
|
16 |
st.set_page_config(layout='wide', page_title="InsightFusion Chat")
|
|
|
17 |
memory_storage = StreamlitChatMessageHistory(key="chat_messages")
|
18 |
-
memory = ConversationBufferWindowMemory(
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
image_bg = r"data/pexels-andreea-ch-371539-1166644.jpg"
|
21 |
|
22 |
def add_bg_from_local(image_file):
|
23 |
with open(image_file, "rb") as image_file:
|
24 |
encoded_string = base64.b64encode(image_file.read())
|
25 |
-
st.markdown(f"""<style>.stApp {{
|
26 |
-
|
|
|
|
|
|
|
27 |
add_bg_from_local(image_bg)
|
28 |
|
29 |
st.markdown("""
|
@@ -43,18 +53,43 @@ def get_answer(query, chain):
|
|
43 |
return None
|
44 |
|
45 |
uploaded_file = st.file_uploader("File upload", type="pdf")
|
|
|
|
|
|
|
46 |
if uploaded_file is not None:
|
47 |
temp_file_path = os.path.join("temp", uploaded_file.name)
|
48 |
os.makedirs("temp", exist_ok=True)
|
49 |
with open(temp_file_path, "wb") as f:
|
50 |
f.write(uploaded_file.getbuffer())
|
51 |
-
|
52 |
path = os.path.abspath(temp_file_path)
|
53 |
st.write(f"File saved to: {path}")
|
54 |
st.write("Document uploaded successfully!")
|
55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
if st.button("Start Processing"):
|
57 |
-
if
|
58 |
with st.spinner("Processing"):
|
59 |
try:
|
60 |
client = create_vector_database(path)
|
@@ -66,16 +101,18 @@ if st.button("Start Processing"):
|
|
66 |
except Exception as e:
|
67 |
st.error(f"Error during processing: {e}")
|
68 |
else:
|
69 |
-
st.error("Please upload a file before starting processing.")
|
70 |
|
|
|
71 |
st.markdown("""
|
72 |
<style>
|
73 |
.stChatInputContainer > div {
|
74 |
-
|
75 |
}
|
76 |
</style>
|
77 |
-
|
78 |
|
|
|
79 |
if user_input := st.chat_input("User Input"):
|
80 |
if 'chain' in st.session_state and 'image_vdb' in st.session_state:
|
81 |
chain = st.session_state['chain']
|
@@ -90,13 +127,11 @@ if user_input := st.chat_input("User Input"):
|
|
90 |
with st.chat_message("assistant"):
|
91 |
st.markdown(response)
|
92 |
|
93 |
-
# Save context in memory
|
94 |
memory.save_context(
|
95 |
{"input": user_input},
|
96 |
{"output": response}
|
97 |
)
|
98 |
|
99 |
-
# Append messages to session state for display
|
100 |
st.session_state.messages.append({"role": "user", "content": user_input})
|
101 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
102 |
|
@@ -109,13 +144,16 @@ if user_input := st.chat_input("User Input"):
|
|
109 |
else:
|
110 |
st.error("Please start processing before entering user input.")
|
111 |
|
|
|
112 |
if "messages" not in st.session_state:
|
113 |
st.session_state.messages = []
|
114 |
|
|
|
115 |
for message in st.session_state.messages:
|
116 |
with st.chat_message(message["role"]):
|
117 |
st.write(message["content"])
|
118 |
|
|
|
119 |
for i, msg in enumerate(memory_storage.messages):
|
120 |
name = "user" if i % 2 == 0 else "assistant"
|
121 |
st.chat_message(name).markdown(msg.content)
|
|
|
10 |
from langchain.memory import ConversationBufferWindowMemory
|
11 |
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
|
12 |
from dotenv import load_dotenv
|
|
|
13 |
|
14 |
+
nest_asyncio.apply()
|
15 |
load_dotenv()
|
16 |
+
|
17 |
st.set_page_config(layout='wide', page_title="InsightFusion Chat")
|
18 |
+
|
19 |
memory_storage = StreamlitChatMessageHistory(key="chat_messages")
|
20 |
+
memory = ConversationBufferWindowMemory(
|
21 |
+
memory_key="chat_history",
|
22 |
+
human_prefix="User",
|
23 |
+
chat_memory=memory_storage,
|
24 |
+
k=3
|
25 |
+
)
|
26 |
|
27 |
image_bg = r"data/pexels-andreea-ch-371539-1166644.jpg"
|
28 |
|
29 |
def add_bg_from_local(image_file):
|
30 |
with open(image_file, "rb") as image_file:
|
31 |
encoded_string = base64.b64encode(image_file.read())
|
32 |
+
st.markdown(f"""<style>.stApp {{
|
33 |
+
background-image: url(data:image/{"png"};base64,{encoded_string.decode()});
|
34 |
+
background-size: cover
|
35 |
+
}}</style>""", unsafe_allow_html=True)
|
36 |
+
|
37 |
add_bg_from_local(image_bg)
|
38 |
|
39 |
st.markdown("""
|
|
|
53 |
return None
|
54 |
|
55 |
uploaded_file = st.file_uploader("File upload", type="pdf")
|
56 |
+
path = None
|
57 |
+
|
58 |
+
# Handle uploaded file
|
59 |
if uploaded_file is not None:
|
60 |
temp_file_path = os.path.join("temp", uploaded_file.name)
|
61 |
os.makedirs("temp", exist_ok=True)
|
62 |
with open(temp_file_path, "wb") as f:
|
63 |
f.write(uploaded_file.getbuffer())
|
|
|
64 |
path = os.path.abspath(temp_file_path)
|
65 |
st.write(f"File saved to: {path}")
|
66 |
st.write("Document uploaded successfully!")
|
67 |
|
68 |
+
# Option to use a predefined demo PDF from pdf_resource folder
|
69 |
+
st.markdown("### Or use a demo file:")
|
70 |
+
if st.button("Use Demo PDF"):
|
71 |
+
demo_file_path = os.path.join("pdf_resource", "sample.pdf") # Replace with actual demo file name
|
72 |
+
if os.path.exists(demo_file_path):
|
73 |
+
path = os.path.abspath(demo_file_path)
|
74 |
+
st.write(f"Using demo file: {path}")
|
75 |
+
st.success("Demo file loaded successfully!")
|
76 |
+
|
77 |
+
with st.spinner("Processing demo file..."):
|
78 |
+
try:
|
79 |
+
client = create_vector_database(path)
|
80 |
+
image_vdb = extract_and_store_images(path)
|
81 |
+
chain = qa_bot(client)
|
82 |
+
st.session_state['chain'] = chain
|
83 |
+
st.session_state['image_vdb'] = image_vdb
|
84 |
+
st.success("Demo file processing complete.")
|
85 |
+
except Exception as e:
|
86 |
+
st.error(f"Error processing demo PDF: {e}")
|
87 |
+
else:
|
88 |
+
st.error("Demo file not found. Make sure 'pdf_resource/sample.pdf' exists.")
|
89 |
+
|
90 |
+
# Process uploaded file on button click
|
91 |
if st.button("Start Processing"):
|
92 |
+
if path is not None:
|
93 |
with st.spinner("Processing"):
|
94 |
try:
|
95 |
client = create_vector_database(path)
|
|
|
101 |
except Exception as e:
|
102 |
st.error(f"Error during processing: {e}")
|
103 |
else:
|
104 |
+
st.error("Please upload a file or use the demo before starting processing.")
|
105 |
|
106 |
+
# Custom input background
|
107 |
st.markdown("""
|
108 |
<style>
|
109 |
.stChatInputContainer > div {
|
110 |
+
background-color: #000000;
|
111 |
}
|
112 |
</style>
|
113 |
+
""", unsafe_allow_html=True)
|
114 |
|
115 |
+
# Chat logic
|
116 |
if user_input := st.chat_input("User Input"):
|
117 |
if 'chain' in st.session_state and 'image_vdb' in st.session_state:
|
118 |
chain = st.session_state['chain']
|
|
|
127 |
with st.chat_message("assistant"):
|
128 |
st.markdown(response)
|
129 |
|
|
|
130 |
memory.save_context(
|
131 |
{"input": user_input},
|
132 |
{"output": response}
|
133 |
)
|
134 |
|
|
|
135 |
st.session_state.messages.append({"role": "user", "content": user_input})
|
136 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
137 |
|
|
|
144 |
else:
|
145 |
st.error("Please start processing before entering user input.")
|
146 |
|
147 |
+
# Initialize message state
|
148 |
if "messages" not in st.session_state:
|
149 |
st.session_state.messages = []
|
150 |
|
151 |
+
# Display message history
|
152 |
for message in st.session_state.messages:
|
153 |
with st.chat_message(message["role"]):
|
154 |
st.write(message["content"])
|
155 |
|
156 |
+
# Display chat memory history (LangChain)
|
157 |
for i, msg in enumerate(memory_storage.messages):
|
158 |
name = "user" if i % 2 == 0 else "assistant"
|
159 |
st.chat_message(name).markdown(msg.content)
|