Spaces:
Runtime error
Runtime error
File size: 3,764 Bytes
931abd0 7a89bde 15d650d 7a89bde 15d650d cdca970 931abd0 15d650d 931abd0 15d650d 931abd0 15d650d 931abd0 15d650d 931abd0 15d650d 931abd0 15d650d 931abd0 cdca970 15d650d 931abd0 15d650d 931abd0 cdca970 931abd0 cdca970 931abd0 cdca970 15d650d cdca970 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
import requests
import streamlit as st
from layouts.mainlayout import mainlayout
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from components.file_streaming import *
from components.display import *
@mainlayout
def display():
with st.expander("What happens when I upload a PDF? π", expanded=True):
st.info(
"""
- The PDF is uploaded to the backend server. βοΈ
- The PDF is converted into small chunks for faster processing. π
- The chunks are broken down into tokens. A token is a single word or a group of words. π
- The tokens are converted into embedding vectors. π
- The embedding vectors are stored in a vector store. ποΈ
""",
icon="βΉοΈ",
)
st.divider()
display()
BASE_URL = "http://127.0.0.1:8000"
uploaded_files = st.sidebar.file_uploader(label="Upload PDF files", type=["pdf"])
if not uploaded_files:
st.info("Please upload PDF documents to continue.")
st.stop()
upload_data(uploaded_files)
if "messages" not in st.session_state.keys():
st.session_state.messages = [
{
"role": "assistant",
"content": "What's troubling you? Ask me a question right away!",
}
]
# Display or clear chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
def clear_chat_history():
st.session_state.messages = [
{
"role": "assistant",
"content": "What's troubling you? Ask me a question right away!",
}
]
st.sidebar.button("Clear Chat History", on_click=clear_chat_history)
def generate_mistral_response(question: str):
for dict_message in st.session_state.messages:
if dict_message["role"] == "user":
question = dict_message["content"]
try:
response = requests.post(
f"{BASE_URL}/api/inference",
json={"promptMessage": question}).json()
if response["status"]=="error":
st.error("Please refresh the page and try uploading the file again.")
st.stop()
answer = response["result"]["answer"]
except Exception as e:
if response.json()=='exception.ModelDeployingException()':
st.error("Model is deploying in the backend servers. Please try again after some time")
st.stop()
with st.expander("Source documents π§", expanded=True):
source_documents = requests.post(
f"{BASE_URL}/api/inference",
json={"promptMessage": question}).json()["result"]["source_documents"]
display_source_document(source_documents)
return answer
# User-provided prompt
if prompt := st.chat_input(
disabled=not st.session_state.messages[-1]["role"] == "assistant",
placeholder="Hello, please ask me a question! π€"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
# ask question
st.write(st.session_state)
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = generate_mistral_response(prompt)
placeholder = st.empty()
full_response = ""
for item in response:
full_response += item
placeholder.markdown(full_response)
placeholder.markdown(full_response)
message = {"role": "assistant", "content": full_response}
st.session_state.messages.append(message)
|