Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -28,7 +28,7 @@ qa_engine = QAEngine()
|
|
28 |
os.makedirs("temp", exist_ok=True)
|
29 |
|
30 |
# Sidebar for file upload
|
31 |
-
st.sidebar.header("Upload a Document")
|
32 |
uploaded_file = st.sidebar.file_uploader("Choose a file", type=["pdf", "docx", "html", "pptx", "txt"])
|
33 |
|
34 |
# Initialize chat memory
|
@@ -44,66 +44,45 @@ if uploaded_file and "document_uploaded" not in st.session_state:
|
|
44 |
with open(file_path, "wb") as f:
|
45 |
f.write(uploaded_file.read())
|
46 |
|
47 |
-
st.sidebar.success("File uploaded successfully!")
|
48 |
|
49 |
-
with st.spinner("Processing document..."):
|
50 |
document_processor.process_document(file_path)
|
51 |
|
52 |
-
st.sidebar.success("Document processed successfully!")
|
53 |
st.session_state["document_uploaded"] = True
|
54 |
|
55 |
# Chat UI Header
|
56 |
-
st.
|
57 |
-
st.
|
58 |
|
59 |
-
# Display chat history
|
60 |
for message in memory_storage.messages:
|
61 |
role = "user" if message.type == "human" else "assistant"
|
62 |
avatar = user_avatar if role == "user" else ai_avatar # Assign appropriate avatar
|
63 |
|
64 |
-
st.
|
65 |
-
|
66 |
-
<div style="display: flex; align-items: center; margin-bottom: 10px;">
|
67 |
-
<img src="data:image/jpeg;base64,{avatar}" width="40" height="40" style="border-radius: 50%; margin-right: 10px;">
|
68 |
-
<div style="background-color: #f1f1f1; padding: 10px; border-radius: 10px; max-width: 80%;">{message.content}</div>
|
69 |
-
</div>
|
70 |
-
""",
|
71 |
-
unsafe_allow_html=True
|
72 |
-
)
|
73 |
|
74 |
# User input at the bottom
|
75 |
-
user_input = st.chat_input("
|
76 |
|
77 |
if user_input:
|
78 |
memory_storage.add_user_message(user_input)
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
</div>
|
86 |
-
""",
|
87 |
-
unsafe_allow_html=True
|
88 |
-
):
|
89 |
-
pass
|
90 |
-
|
91 |
-
with st.spinner("Generating response..."):
|
92 |
if st.session_state.get("document_uploaded", False):
|
93 |
answer = qa_engine.query(user_input)
|
94 |
else:
|
95 |
answer = llm_processor.generate_answer("", user_input)
|
96 |
-
st.warning("No document uploaded.
|
97 |
|
98 |
memory_storage.add_ai_message(answer)
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
<img src="data:image/jpeg;base64,{ai_avatar}" width="40" height="40" style="border-radius: 50%; margin-right: 10px;">
|
104 |
-
<div style="background-color: #e6f7ff; padding: 10px; border-radius: 10px; max-width: 80%;">{answer.content}</div>
|
105 |
-
</div>
|
106 |
-
""",
|
107 |
-
unsafe_allow_html=True
|
108 |
-
):
|
109 |
-
pass
|
|
|
28 |
os.makedirs("temp", exist_ok=True)
|
29 |
|
30 |
# Sidebar for file upload
|
31 |
+
st.sidebar.header("π Upload a Document")
|
32 |
uploaded_file = st.sidebar.file_uploader("Choose a file", type=["pdf", "docx", "html", "pptx", "txt"])
|
33 |
|
34 |
# Initialize chat memory
|
|
|
44 |
with open(file_path, "wb") as f:
|
45 |
f.write(uploaded_file.read())
|
46 |
|
47 |
+
st.sidebar.success("β
File uploaded successfully!")
|
48 |
|
49 |
+
with st.spinner("π Processing document..."):
|
50 |
document_processor.process_document(file_path)
|
51 |
|
52 |
+
st.sidebar.success("β
Document processed successfully!")
|
53 |
st.session_state["document_uploaded"] = True
|
54 |
|
55 |
# Chat UI Header
|
56 |
+
st.title("π¬ AI Chat Assistant")
|
57 |
+
st.divider()
|
58 |
|
59 |
+
# Display chat history
|
60 |
for message in memory_storage.messages:
|
61 |
role = "user" if message.type == "human" else "assistant"
|
62 |
avatar = user_avatar if role == "user" else ai_avatar # Assign appropriate avatar
|
63 |
|
64 |
+
with st.chat_message(role, avatar=f"data:image/jpeg;base64,{avatar}"):
|
65 |
+
st.markdown(message.content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
# User input at the bottom
|
68 |
+
user_input = st.chat_input("Type your message here...")
|
69 |
|
70 |
if user_input:
|
71 |
memory_storage.add_user_message(user_input)
|
72 |
|
73 |
+
# Display user message
|
74 |
+
with st.chat_message("user", avatar=f"data:image/jpeg;base64,{user_avatar}"):
|
75 |
+
st.markdown(user_input)
|
76 |
+
|
77 |
+
with st.spinner("π€ Thinking..."):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
if st.session_state.get("document_uploaded", False):
|
79 |
answer = qa_engine.query(user_input)
|
80 |
else:
|
81 |
answer = llm_processor.generate_answer("", user_input)
|
82 |
+
st.warning("β οΈ No document uploaded. Response is from general AI knowledge.")
|
83 |
|
84 |
memory_storage.add_ai_message(answer)
|
85 |
|
86 |
+
# Display AI response
|
87 |
+
with st.chat_message("assistant", avatar=f"data:image/jpeg;base64,{ai_avatar}"):
|
88 |
+
st.markdown(answer.content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|