File size: 3,703 Bytes
b1b6964
 
212478c
 
 
b1b6964
 
4afa186
b1b6964
212478c
8a06a9e
bd6665e
66ad911
 
 
 
 
 
 
 
212478c
 
b1b6964
 
 
 
6e1201a
 
 
3f6512f
8c03cc4
3571b6c
b1b6964
212478c
 
 
 
 
 
3f6512f
212478c
3571b6c
212478c
3571b6c
b1b6964
 
8c03cc4
b1b6964
8c03cc4
3571b6c
6e1201a
8c03cc4
3f6512f
 
66ad911
8c03cc4
 
3f6512f
8c03cc4
e15d896
212478c
66ad911
 
8c03cc4
 
b1b6964
e15d896
 
 
 
 
 
212478c
8c03cc4
b1b6964
212478c
 
a8e2b6e
8c03cc4
 
 
 
 
212478c
 
 
 
8c03cc4
212478c
 
 
e15d896
8c03cc4
 
e15d896
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import streamlit as st
import os
import base64
from langchain.memory import ConversationBufferWindowMemory
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
from utils.ingestion import DocumentProcessor
from utils.llm import LLMProcessor
from utils.qa import QAEngine

# Configure Streamlit page
st.set_page_config(page_title="AI-Powered Document QA", layout="wide")

# Function to encode image in Base64 for avatars
def encode_image(image_path):
    with open(image_path, "rb") as file:
        return base64.b64encode(file.read()).decode()

# Load avatar images
user_avatar = encode_image("./icons/user.jpg")  # Change path if needed
ai_avatar = encode_image("./icons/ai.jpg")

# Initialize document processing & AI components
document_processor = DocumentProcessor()
llm_processor = LLMProcessor()
qa_engine = QAEngine()

# Ensure temp directory exists
os.makedirs("temp", exist_ok=True)

# Sidebar for file upload
st.sidebar.header("πŸ“‚ Upload a Document")
uploaded_file = st.sidebar.file_uploader("Choose a file", type=["pdf", "docx", "html", "pptx", "txt"])

# Initialize chat memory
memory_storage = StreamlitChatMessageHistory(key="chat_messages")
memory = ConversationBufferWindowMemory(
    memory_key="chat_history", human_prefix="User", chat_memory=memory_storage, k=5
)

# Document upload & processing
if uploaded_file and "document_uploaded" not in st.session_state:
    file_path = os.path.join("temp", uploaded_file.name)

    with open(file_path, "wb") as f:
        f.write(uploaded_file.read())

    st.sidebar.success("βœ… File uploaded successfully!")

    with st.spinner("πŸ”„ Processing document..."):
        document_processor.process_document(file_path)

    st.sidebar.success("βœ… Document processed successfully!")
    st.session_state["document_uploaded"] = True

# Chat UI Header
st.title("πŸ’¬ AI Chat Assistant")
st.divider()

# Display chat history
for idx, message in enumerate(memory_storage.messages):
    role = "user" if message.type == "human" else "assistant"
    avatar = user_avatar if role == "user" else ai_avatar  # Assign appropriate avatar

    with st.chat_message(role, avatar=f"data:image/jpeg;base64,{avatar}"):
        st.markdown(message.content)

        # Add copy button only for AI messages
        if role == "assistant":
            copy_key = f"copy_btn_{idx}"  # Unique key for each copy button
            st.code(message.content, language="text")  # Display code block with text formatting
            st.button("πŸ“‹ Copy", key=copy_key, on_click=lambda text=message.content: st.session_state.update({"copied_text": text}))

# User input at the bottom
user_input = st.chat_input("Type your message here...")

if user_input:
    memory_storage.add_user_message(user_input)

    # Display user message
    with st.chat_message("user", avatar=f"data:image/jpeg;base64,{user_avatar}"):
        st.markdown(user_input)

    with st.spinner("πŸ€– Thinking..."):
        if st.session_state.get("document_uploaded", False):
            answer = qa_engine.query(user_input)
        else:
            answer = llm_processor.generate_answer("", user_input)
            st.warning("⚠️ No document uploaded. Response is from general AI knowledge.")

    memory_storage.add_ai_message(answer)

    # Display AI response with copy button
    with st.chat_message("assistant", avatar=f"data:image/jpeg;base64,{ai_avatar}"):
        st.markdown(answer.content)

        # Copy button for AI response
        st.code(answer.content, language="text")
        if st.button("πŸ“‹ Copy", key="copy_latest"):
            st.session_state["copied_text"] = answer.content
            st.success("βœ… Copied to clipboard!")