ASMR_Query_Bot / app.py
ankush-003's picture
Create app.py
03b8ca7 verified
raw
history blame
5.96 kB
import streamlit as st
import os
from collections.abc import Collection
from langchain.memory import ChatMessageHistory
from langchain_community.chat_message_histories import (
StreamlitChatMessageHistory,
)
from langchain_groq import ChatGroq
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain.chains import RetrievalQA
import nest_asyncio
nest_asyncio.apply()
st.title('ASMR Query Bot πŸ””')
# config
database = "AlertSimAndRemediation"
collection = "alert_embed"
index_name = "alert_index"
# llm
chat = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768")
# embedding model
embedding_args = {
"model_name" : "BAAI/bge-large-en-v1.5",
"model_kwargs" : {"device": "cpu"},
"encode_kwargs" : {"normalize_embeddings": True}
}
embedding_model = HuggingFaceEmbeddings(**embedding_args)
# chat history
# chat_history = ChatMessageHistory()
# vector search
vector_search = MongoDBAtlasVectorSearch.from_connection_string(
os.environ["MONGO_URI"],
f"{database}.{collection}",
embedding_model,
index_name=index_name,
)
qa_retriever = vector_search.as_retriever(
search_type="similarity",
search_kwargs={"k": 5},
)
# contextualising prev chats
contextualize_q_system_prompt = """Given a chat history and the latest user question \
which might reference context in the chat history, formulate a standalone question \
which can be understood without the chat history. Do NOT answer the question, \
just reformulate it if needed and otherwise return it as is."""
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
history_aware_retriever = create_history_aware_retriever(
chat, qa_retriever, contextualize_q_prompt
)
# prompt
system_prompt = """
You are a helpful query assistant for Alertmanager, an open-source system for monitoring and alerting on system metrics. Your goal is to accurately answer questions related to alerts triggered within the Alertmanager system based on the alert information provided to you. \
You will be given details about specific alerts, including the alert source, severity, category, and any other relevant metadata. Using this information, you should be able to respond to queries about the nature of the alert, what it signifies, potential causes, and recommended actions or troubleshooting steps. \
Your responses should be clear, concise, and tailored to the specific alert details provided, while also drawing from your broader knowledge about Alertmanager and monitoring best practices when relevant. If you cannot provide a satisfactory answer due to insufficient information, politely indicate that and ask for any additional context needed. \
<context>
{context}
</context>
"""
chat_history = []
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
question_answer_chain = create_stuff_documents_chain(chat, qa_prompt)
# output parser
response_schemas = [
ResponseSchema(name="answer", description="answer to the user's question"),
ResponseSchema(
name="source",
description="source used to answer the user's question, should be a website.",
)
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
# managing message history
# store = {}
# def get_session_history(session_id: str) -> BaseChatMessageHistory:
# if session_id not in store:
# store[session_id] = ChatMessageHistory()
# return store[session_id]
# conversational_rag_chain = RunnableWithMessageHistory(
# rag_chain,
# get_session_history,
# input_messages_key="input",
# history_messages_key="chat_history",
# output_messages_key="answer",
# )
# schema
# print(conversational_rag_chain.input_schema.schema())
# print(conversational_rag_chain.output_schema.schema())
# Retrieves documents
# retriever_chain = create_history_aware_retriever(chat, qa_retriever, prompt)
# retriever_chain.invoke({
# "chat_history": chat_history,
# "input": "Tell me about the latest alert"
# })
# conversational_rag_chain.invoke(
# {"input": "What is the remedy to the latest alert"},
# config={
# "configurable": {"session_id": "abc123"}
# }, # constructs a key "abc123" in `store`.
# )
if "chat_messages" not in st.session_state:
st.session_state.chat_messages = []
# streamlit history
history = StreamlitChatMessageHistory(key="chat_messages")
# Initialize chat history
if len(history.messages) == 0:
history.add_ai_message("How can I help you?")
conversational_rag_chain = RunnableWithMessageHistory(
rag_chain,
lambda session_id: history,
input_messages_key="input",
history_messages_key="chat_history",
output_messages_key="answer",
)
for msg in history.messages:
st.chat_message(msg.type).write(msg.content)
if prompt := st.chat_input():
st.chat_message("human").write(prompt)
# As usual, new messages are added to StreamlitChatMessageHistory when the Chain is called.
config = {"configurable": {"session_id": "any"}}
response = conversational_rag_chain.invoke({"input": prompt}, config)
st.chat_message("ai").write(response["answer"])