Spaces:
Runtime error
Runtime error
File size: 3,611 Bytes
931abd0 7a89bde cdca970 b22dbe5 931abd0 15d650d 931abd0 15d650d 931abd0 15d650d 931abd0 15d650d 931abd0 15d650d 931abd0 15d650d 931abd0 38ae2b6 15d650d 931abd0 15d650d 38ae2b6 931abd0 cdca970 931abd0 cdca970 931abd0 cdca970 15d650d cdca970 98861d1 38ae2b6 cdca970 98861d1 cdca970 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import requests
import streamlit as st
from components.display import *
from layouts.mainlayout import mainlayout
from components.file_streaming import upload_data
@mainlayout
def display():
with st.expander("What happens when I upload a PDF? π", expanded=True):
st.info(
"""
- The PDF is uploaded to the backend server. βοΈ
- The PDF is converted into small chunks for faster processing. π
- The chunks are broken down into tokens. A token is a single word or a group of words. π
- The tokens are converted into embedding vectors. π
- The embedding vectors are stored in a vector store. ποΈ
""",
icon="βΉοΈ",
)
st.divider()
display()
BASE_URL = "https://hemanthsai7-studybotapi.hf.space"
uploaded_files = st.sidebar.file_uploader(label="Upload PDF files", type=["pdf"])
if not uploaded_files:
st.info("Please upload PDF documents to continue.")
st.stop()
upload_data(uploaded_files, BASE_URL)
if "messages" not in st.session_state.keys():
st.session_state.messages = [
{
"role": "assistant",
"content": "What's troubling you? Ask me a question right away!",
}
]
# Display or clear chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
def clear_chat_history():
st.session_state.messages = [
{
"role": "assistant",
"content": "What's troubling you? Ask me a question right away!",
}
]
st.sidebar.button("Clear Chat History", on_click=clear_chat_history)
def generate_mistral_response(question: str):
for dict_message in st.session_state.messages:
if dict_message["role"] == "user":
question = dict_message["content"]
try:
response = requests.post(
f"{BASE_URL}/api/inference",
json={"promptMessage": question}).json()
if response["status"]=="error":
st.error("Please refresh the page and try uploading the file again.")
st.stop()
answer = response["result"]["answer"]
with st.expander("Source documents π§"):
source_documents = response["result"]["source_documents"]
display_source_document(source_documents)
except Exception as e:
if response.json()=='exception.ModelDeployingException()':
st.error("Model is deploying in the backend servers. Please try again after some time")
st.stop()
return answer
# User-provided prompt
if prompt := st.chat_input(
disabled=not st.session_state.messages[-1]["role"] == "assistant",
placeholder="Hello, please ask me a question! π€"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
# ask question
# st.write(st.session_state)
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = generate_mistral_response(prompt)
placeholder = st.empty()
full_response = ""
for item in response:
full_response += item
placeholder.markdown(full_response)
placeholder.markdown(full_response)
message = {"role": "assistant", "content": full_response}
st.session_state.messages.append(message)
|