Spaces:
Running
Running
File size: 5,422 Bytes
8515a17 2d4455b e50c54a 7d988b6 7f98036 7d988b6 a63eb02 e50c54a 2d4455b e50c54a a2f8e12 8515a17 e50c54a a63eb02 e50c54a 7f98036 e50c54a 2d4455b 7d9087b e50c54a 7f98036 e50c54a 7f98036 e50c54a 7f98036 e50c54a 7f98036 e50c54a f8d987f e50c54a 2d4455b e50c54a 2d4455b e50c54a 7f98036 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
import streamlit as st
import os
import time
from services.llm import initialize_llm, initialize_embeddings
from services.vector_store import create_vector_store, retrive_vector_store, generate_prompt
from services.pdf_processing import load_and_split_pdf
from utils.helpers import extract_thoughts, response_generator
import subprocess
# try:
# print("π Checking and starting Ollama...")
# subprocess.run(["bash", "install_ollama.sh"], check=True)
# print("β
Ollama is running!")
# except subprocess.CalledProcessError as e:
# print(f"β Error: {e}")
# Custom CSS for chat styling
CHAT_CSS = """
<style>
.user-message {
text-align: right;
background-color: #3c8ce7;
color: white;
padding: 10px;
border-radius: 10px;
margin-bottom: 10px;
display: inline-block;
width: fit-content;
max-width: 70%;
margin-left: auto;
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
}
.assistant-message {
text-align: left;
background-color: #d16ba5;
color: white;
padding: 10px;
border-radius: 10px;
margin-bottom: 10px;
display: inline-block;
width: fit-content;
max-width: 70%;
margin-right: auto;
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
}
</style>
"""
# Streamlit UI Setup
st.set_page_config(page_title="DocChatAI", layout="wide")
st.title("π DocChatAI | Chat Using Documents")
# Expandable Disclaimer Section
with st.expander("β οΈ Disclaimer (Click to expand)"):
st.markdown("""
- This AI chatbot provides **informational responses only** and should not be used as **legal, medical, or financial advice**.
- The accuracy of responses depends on the provided **context and training data**.
- **Use at your own discretion** and always verify important information from reliable sources.
""")
# Sidebar
st.sidebar.title("DocChatAI")
st.sidebar.subheader("Chat using PDF Document")
st.sidebar.write("---")
# Model Selection
selected_model = st.sidebar.radio("Choose Model", ["deepseek-r1:1.5b"])
st.sidebar.write("---")
# Hyperparameters
temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.7, 0.1)
top_p = st.sidebar.slider("Top-p (Nucleus Sampling)", 0.0, 1.0, 0.9, 0.05)
max_tokens = st.sidebar.number_input("Max Tokens", 10, 2048, 1024, 10)
st.sidebar.write("---")
# File Upload
uploaded_file = st.sidebar.file_uploader("π Upload a PDF", type=["pdf"])
st.sidebar.write("---")
# About Section
st.sidebar.write("π **About Me**")
st.sidebar.write("π€ **Name:** Deepak Yadav")
st.sidebar.write("π‘ **Bio:** Passionate about AI and Machine Learning.")
st.sidebar.markdown("[GitHub](https://github.com/deepak7376) | [LinkedIn](https://www.linkedin.com/in/dky7376/)")
st.sidebar.write("---")
# Initialize LLM
llm = initialize_llm(selected_model, temperature, top_p, max_tokens)
embeddings = initialize_embeddings()
# Document Handling
retriever = None
if uploaded_file:
os.makedirs("docs", exist_ok=True)
filepath = os.path.join("docs", uploaded_file.name)
with open(filepath, "wb") as f:
f.write(uploaded_file.read())
# Load and process PDF
splits = load_and_split_pdf(filepath)
vectorstore = create_vector_store(splits, embeddings)
retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 3})
# Apply custom CSS
st.markdown(CHAT_CSS, unsafe_allow_html=True)
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display previous messages
for message in st.session_state.messages:
if message['thinking_part']:
with st.expander("π Thought Process"):
st.markdown(message['thinking_part'])
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat Input
if user_input := st.chat_input("π¬ Ask something..."):
st.session_state.messages.append({"role": "user", "content": user_input, "thinking_part": False})
with st.chat_message("user"):
st.markdown(user_input)
# Measure response time
start_time = time.time()
# Generate response
context = retrive_vector_store(retriever, user_input) if retriever else "No context"
query = generate_prompt(context=context, question=user_input)
# response = llm.invoke(query)
response = llm.create_chat_completion(
messages = [
{
"role": "user",
"content": f"{query}"
}
]
)
# Calculate response time
response_time = round(time.time() - start_time, 2)
# Extract thoughts and main answer
thinking_part, main_answer = extract_thoughts(response['choices'][0]['message']['content'])
# Display AI response
with st.chat_message("assistant"):
if thinking_part:
with st.expander("π Thought Process"):
st.markdown(thinking_part)
# **Formatted Response Display**
formatted_response = f"""
{main_answer}
β³ **Response Time:** {response_time} seconds
"""
st.markdown(formatted_response, unsafe_allow_html=True)
# Save to session history
st.session_state.messages.append({"role": "assistant", "content": formatted_response, "thinking_part": thinking_part})
|