|
import streamlit as st |
|
|
|
from llama_index.llms.gemini import Gemini |
|
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings |
|
import os |
|
|
|
|
|
|
|
st.set_page_config(page_title="Chat with the Streamlit docs, powered by LlamaIndex", page_icon="π¦", layout="centered", initial_sidebar_state="auto", menu_items=None) |
|
|
|
st.title("Chat with the Streamlit docs, powered by LlamaIndex π¬π¦") |
|
st.info("Check out the full tutorial to build this app in our [blog post](https://blog.streamlit.io/build-a-chatbot-with-custom-data-sources-powered-by-llamaindex/)", icon="π") |
|
|
|
if "messages" not in st.session_state.keys(): |
|
st.session_state.messages = [ |
|
{ |
|
"role": "assistant", |
|
"content": "Ask me a question about Streamlit's open-source Python library!", |
|
} |
|
] |
|
|
|
@st.cache_resource(show_spinner=False) |
|
def load_data(): |
|
reader = SimpleDirectoryReader(input_dir="./data", recursive=True) |
|
docs = reader.load_data() |
|
Settings.llm = Gemini( |
|
model="gemini-2.0-flash-exp", |
|
temperature=1, |
|
system_prompt="""You are an expert on |
|
the Streamlit Python library and your |
|
job is to answer technical questions. |
|
Assume that all questions are related |
|
to the Streamlit Python library. Keep |
|
your answers technical and based on |
|
facts β do not hallucinate features.""", |
|
) |
|
index = VectorStoreIndex.from_documents(docs) |
|
return index |
|
|
|
|
|
index = load_data() |
|
|
|
if "chat_engine" not in st.session_state.keys(): |
|
st.session_state.chat_engine = index.as_chat_engine( |
|
chat_mode="condense_question", verbose=True, streaming=True |
|
) |
|
|
|
if prompt := st.chat_input( |
|
"Ask a question" |
|
): |
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.write(message["content"]) |
|
|
|
|
|
if st.session_state.messages[-1]["role"] != "assistant": |
|
with st.chat_message("assistant"): |
|
response_stream = st.session_state.chat_engine.stream_chat(prompt) |
|
st.write_stream(response_stream.response_gen) |
|
message = {"role": "assistant", "content": response_stream.response} |
|
|
|
st.session_state.messages.append(message) |