Spaces:
Sleeping
Sleeping
File size: 8,374 Bytes
7123cd4 0ab2713 8d46f1b 7123cd4 8d46f1b 1d26f0c 0ab2713 7123cd4 8d46f1b 7123cd4 8d46f1b 7123cd4 8608300 ce29aaa 548e573 123feff d33c836 7123cd4 0ab2713 7123cd4 44f88f4 7123cd4 44f88f4 a2c9bcb 0ab2713 29524de 44f88f4 7123cd4 8d46f1b 7123cd4 8d46f1b 7123cd4 0ab2713 7123cd4 8d46f1b 0ab2713 d33c836 8d46f1b 0ab2713 8d46f1b 0ab2713 8d46f1b d33c836 8d46f1b 7123cd4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
import streamlit as st
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
import os
import dotenv
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import HumanMessage, AIMessage
from langchain.memory import ConversationBufferMemory
from langchain.document_loaders import PyPDFLoader
from fuzzywuzzy import process
# Set page config
st.set_page_config(page_title="Tbank Assistant", layout="wide")
# Streamlit app header
st.title("Tbank Customer Support Chatbot")
# Sidebar for API Key input
with st.sidebar:
st.header("Configuration")
api_key = st.text_input("Enter your OpenAI API Key:", type="password")
if api_key:
os.environ["OPENAI_API_KEY"] = api_key
# Main app logic
if "OPENAI_API_KEY" in os.environ:
# Initialize components
@st.cache_resource
def initialize_components():
dotenv.load_dotenv()
chat = ChatOpenAI(model="gpt-3.5-turbo-1106", temperature=0.2)
loader1 = PyPDFLoader("Tbank resources.pdf")
loader2 = PyPDFLoader("International Banking Services.pdf")
data1 = loader1.load()
data2 = loader2.load()
data = data1 + data2
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
all_splits = text_splitter.split_documents(data)
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002")
vectorstore = Chroma.from_documents(documents=all_splits, embedding=embeddings)
retriever = vectorstore.as_retriever(search_kwargs={"k": 6, "score_threshold": 0.5})
SYSTEM_TEMPLATE = """
You are Tbank's AI assistant, a chatbot whose knowledge comes exclusively from Tbank's website content and provided PDF documents. Follow these guidelines:
1. Greet users warmly, e.g., "Hello! Welcome to Tbank. How can I assist you today?"
2. If asked about your identity, state you're Tbank's AI assistant and ask how you can help.
3. Use only information from the website content and provided PDFs. Do not infer or make up information.
4. Provide clear, concise responses using only the given information. Keep answers brief and relevant.
5. For questions outside your knowledge base, respond:
"I apologize, but I don't have information about that. My knowledge is limited to Tbank's products/services and our website/document content. Is there anything specific about Tbank I can help with?"
6. Maintain a friendly, professional tone.
7. If unsure, say:
"I'm not certain about that. For accurate information, please check our website or contact our customer support team."
8. For requests for opinions or subjective information, remind users you're an AI that provides only factual information from Tbank sources.
9. End each interaction by asking if there's anything else you can help with regarding Tbank.
10. Do not hallucinate or provide information from sources other than the website and provided PDFs.
11. If the information isn't in your knowledge base, clearly state that you don't have that information rather than guessing.
12. Regularly refer to the provided PDFs for accurate, up-to-date information about Tbank's products and services.
13. Check for the basic Grammar and Spellings and understand if the spellings or grammar is slightly incorrect.
14. Understand the user query with different angle, analyze properly, check through the possible answers and then give the answer.
15. Be forgiving of minor spelling mistakes and grammatical errors in user queries. Try to understand the intent behind the question.
16. Maintain context from previous messages in the conversation. If a user asks about a person or topic mentioned earlier, refer back to that information.
17. If a user asks about a person using only a name or title, try to identify who they're referring to based on previous context or your knowledge base.
18. When answering questions about specific people, provide their full name and title if available.
Your primary goal is to assist users with information directly related to Tbank, using only the website content and provided PDF documents. Avoid speculation and stick strictly to the provided information.
<context>
{context}
</context>
Chat History:
{chat_history}
"""
question_answering_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
SYSTEM_TEMPLATE,
),
MessagesPlaceholder(variable_name="chat_history"),
MessagesPlaceholder(variable_name="messages"),
]
)
document_chain = create_stuff_documents_chain(chat, question_answering_prompt)
important_terms = ["Tbank", "Chairman", "CEO", "products", "services"] # Add more terms as needed
return retriever, document_chain, important_terms
# Load components
with st.spinner("Initializing Tbank Assistant..."):
retriever, document_chain, important_terms = initialize_components()
# Initialize memory for each session
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# Chat interface
st.subheader("Chat with Tbank Assistant")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
def fuzzy_match(query, choices, threshold=80):
result = process.extractOne(query, choices)
if result and result[1] >= threshold:
return result[0]
return None
# React to user input
if prompt := st.chat_input("What would you like to know about Tbank?"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("assistant"):
message_placeholder = st.empty()
try:
# Fuzzy match important terms
matched_term = fuzzy_match(prompt.lower(), important_terms)
if matched_term:
prompt = f"{prompt} (Matched term: {matched_term})"
# Retrieve relevant documents
docs = retriever.get_relevant_documents(prompt)
# Include previous messages for context
previous_messages = st.session_state.messages[-5:] # Last 5 messages
# Generate response
response = document_chain.invoke(
{
"context": docs,
"chat_history": st.session_state.memory.load_memory_variables({})["chat_history"],
"messages": [HumanMessage(content=msg["content"]) for msg in previous_messages] + [HumanMessage(content=prompt)],
}
)
full_response = response
message_placeholder.markdown(full_response)
except Exception as e:
error_message = f"I apologize, but I encountered an error while processing your request. Please try rephrasing your question or ask something else. Error details: {str(e)}"
message_placeholder.markdown(error_message)
full_response = error_message
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response})
# Update memory
st.session_state.memory.save_context({"input": prompt}, {"output": full_response})
else:
st.warning("Please enter your OpenAI API Key in the sidebar to start the chatbot.")
# Add a footer
st.markdown("---")
st.markdown("By AI Planet") |