teapotchat / app.py
zakerytclarke's picture
Update app.py
e7373ce verified
raw
history blame
4.93 kB
import streamlit as st
from teapotai import TeapotAI, TeapotAISettings
import hashlib
import os
import requests
default_documents = []
API_KEY = os.environ.get("brave_api_key")
def brave_search(query, count=3):
url = "https://api.search.brave.com/res/v1/web/search"
headers = {"Accept": "application/json", "X-Subscription-Token": API_KEY}
params = {
"q": query,
"count": count,
"extra_snippets": True
}
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
results = response.json().get("web", {}).get("results", [])
print(results)
return [(res["title"], res["description"], res["url"]) for res in results]
else:
print(f"Error: {response.status_code}, {response.text}")
return []
# Function to handle the chat with TeapotAI
def handle_chat(user_input, teapot_ai):
results = brave_search(user_input)
documents = []
for i, (title, description, url) in enumerate(results, 1):
documents.append(description.replace('<strong>','').replace('</strong>',''))
print(documents)
context="\n".join(documents)
response = teapot_ai.query(
context=context,
query=user_input
)
# response = teapot_ai.chat([
# {
# "role": "system",
# "content": "You are Teapot, an open-source AI assistant optimized for running efficiently on low-end devices. You provide short, accurate responses without hallucinating and excel at extracting information and summarizing text."
# },
# {
# "role": "user",
# "content": user_input
# }
# ])
return response
def suggestion_button(suggestion_text, teapot_ai):
if st.button(suggestion_text):
handle_chat(suggestion_text, teapot_ai)
# Function to hash documents
def hash_documents(documents):
return hashlib.sha256("\n".join(documents).encode("utf-8")).hexdigest()
# Streamlit app
def main():
st.set_page_config(page_title="TeapotAI Chat", page_icon=":robot_face:", layout="wide")
# Sidebar for document input
st.sidebar.header("Document Input (for RAG)")
user_documents = st.sidebar.text_area(
"Enter documents, each on a new line",
value="\n".join(default_documents)
)
# Parse the user input to get the documents (split by newline)
documents = user_documents.split("\n")
# Ensure non-empty documents
documents = [doc for doc in documents if doc.strip()]
# Check if documents have changed
new_documents_hash = hash_documents(documents)
# Load model if documents have changed, otherwise reuse the model from session_state
if "documents_hash" not in st.session_state or st.session_state.documents_hash != new_documents_hash:
with st.spinner('Loading Model and Embeddings...'):
teapot_ai = TeapotAI(documents=documents or default_documents, settings=TeapotAISettings(rag_num_results=3))
# Store the new hash and model in session state
st.session_state.documents_hash = new_documents_hash
st.session_state.teapot_ai = teapot_ai
else:
# Reuse the existing model
teapot_ai = st.session_state.teapot_ai
# Initialize session state and display the welcome message
if "messages" not in st.session_state:
st.session_state.messages = [{"role": "assistant", "content": "Hi, I am Teapot AI, how can I help you?"}]
# Display previous messages from chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
user_input = st.chat_input("Ask about famous landmarks")
s1, s2, s3 = st.columns([1, 2, 3])
with s1:
suggestion_button("How tall is the Eiffel Tower?", teapot_ai)
with s2:
suggestion_button("Extract the year the Eiffel Tower was constructed.", teapot_ai)
with s3:
suggestion_button("How large is the Death Star?", teapot_ai)
if user_input:
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(user_input)
# Add user message to session state
st.session_state.messages.append({"role": "user", "content": user_input})
with st.spinner('Generating Response...'):
# Get the answer from TeapotAI using chat functionality
response = handle_chat(user_input, teapot_ai)
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response)
# Add assistant response to session state
st.session_state.messages.append({"role": "assistant", "content": response})
st.markdown("### Suggested Questions")
# Run the app
if __name__ == "__main__":
main()