Spaces:
Sleeping
Sleeping
Commit
Β·
bcecdab
1
Parent(s):
9f7d04a
updated ui added sidebar
Browse files- app.py +84 -85
- requirements.txt +2 -1
app.py
CHANGED
@@ -15,18 +15,34 @@ from langchain.output_parsers import ResponseSchema, StructuredOutputParser
|
|
15 |
from langchain_core.runnables.history import RunnableWithMessageHistory
|
16 |
from langchain_core.chat_history import BaseChatMessageHistory
|
17 |
from langchain.chains import RetrievalQA
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
22 |
|
23 |
# config
|
|
|
|
|
24 |
database = "AlertSimAndRemediation"
|
25 |
collection = "alert_embed"
|
26 |
index_name = "alert_index"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
|
29 |
-
chat = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768")
|
30 |
|
31 |
# embedding model
|
32 |
embedding_args = {
|
@@ -36,9 +52,6 @@ embedding_args = {
|
|
36 |
}
|
37 |
embedding_model = HuggingFaceEmbeddings(**embedding_args)
|
38 |
|
39 |
-
# chat history
|
40 |
-
# chat_history = ChatMessageHistory()
|
41 |
-
|
42 |
# vector search
|
43 |
vector_search = MongoDBAtlasVectorSearch.from_connection_string(
|
44 |
os.environ["MONGO_URI"],
|
@@ -47,11 +60,6 @@ vector_search = MongoDBAtlasVectorSearch.from_connection_string(
|
|
47 |
index_name=index_name,
|
48 |
)
|
49 |
|
50 |
-
qa_retriever = vector_search.as_retriever(
|
51 |
-
search_type="similarity",
|
52 |
-
search_kwargs={"k": 5},
|
53 |
-
)
|
54 |
-
|
55 |
# contextualising prev chats
|
56 |
contextualize_q_system_prompt = """Given a chat history and the latest user question \
|
57 |
which might reference context in the chat history, formulate a standalone question \
|
@@ -64,9 +72,6 @@ contextualize_q_prompt = ChatPromptTemplate.from_messages(
|
|
64 |
("human", "{input}"),
|
65 |
]
|
66 |
)
|
67 |
-
history_aware_retriever = create_history_aware_retriever(
|
68 |
-
chat, qa_retriever, contextualize_q_prompt
|
69 |
-
)
|
70 |
|
71 |
# prompt
|
72 |
system_prompt = """
|
@@ -79,8 +84,6 @@ Your responses should be clear, concise, and tailored to the specific alert deta
|
|
79 |
</context>
|
80 |
"""
|
81 |
|
82 |
-
chat_history = []
|
83 |
-
|
84 |
qa_prompt = ChatPromptTemplate.from_messages(
|
85 |
[
|
86 |
("system", system_prompt),
|
@@ -88,58 +91,6 @@ qa_prompt = ChatPromptTemplate.from_messages(
|
|
88 |
("human", "{input}"),
|
89 |
]
|
90 |
)
|
91 |
-
question_answer_chain = create_stuff_documents_chain(chat, qa_prompt)
|
92 |
-
|
93 |
-
# output parser
|
94 |
-
response_schemas = [
|
95 |
-
ResponseSchema(name="answer", description="answer to the user's question"),
|
96 |
-
ResponseSchema(
|
97 |
-
name="source",
|
98 |
-
description="source used to answer the user's question, should be a website.",
|
99 |
-
)
|
100 |
-
]
|
101 |
-
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
|
102 |
-
|
103 |
-
|
104 |
-
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
|
105 |
-
|
106 |
-
# managing message history
|
107 |
-
# store = {}
|
108 |
-
|
109 |
-
# def get_session_history(session_id: str) -> BaseChatMessageHistory:
|
110 |
-
# if session_id not in store:
|
111 |
-
# store[session_id] = ChatMessageHistory()
|
112 |
-
# return store[session_id]
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
# conversational_rag_chain = RunnableWithMessageHistory(
|
117 |
-
# rag_chain,
|
118 |
-
# get_session_history,
|
119 |
-
# input_messages_key="input",
|
120 |
-
# history_messages_key="chat_history",
|
121 |
-
# output_messages_key="answer",
|
122 |
-
# )
|
123 |
-
|
124 |
-
# schema
|
125 |
-
# print(conversational_rag_chain.input_schema.schema())
|
126 |
-
# print(conversational_rag_chain.output_schema.schema())
|
127 |
-
|
128 |
-
|
129 |
-
# Retrieves documents
|
130 |
-
# retriever_chain = create_history_aware_retriever(chat, qa_retriever, prompt)
|
131 |
-
|
132 |
-
# retriever_chain.invoke({
|
133 |
-
# "chat_history": chat_history,
|
134 |
-
# "input": "Tell me about the latest alert"
|
135 |
-
# })
|
136 |
-
|
137 |
-
# conversational_rag_chain.invoke(
|
138 |
-
# {"input": "What is the remedy to the latest alert"},
|
139 |
-
# config={
|
140 |
-
# "configurable": {"session_id": "abc123"}
|
141 |
-
# }, # constructs a key "abc123" in `store`.
|
142 |
-
# )
|
143 |
|
144 |
if "chat_messages" not in st.session_state:
|
145 |
st.session_state.chat_messages = []
|
@@ -148,27 +99,75 @@ if "chat_messages" not in st.session_state:
|
|
148 |
history = StreamlitChatMessageHistory(key="chat_messages")
|
149 |
|
150 |
# Initialize chat history
|
151 |
-
|
152 |
-
|
153 |
if len(history.messages) == 0:
|
154 |
-
history.add_ai_message("
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
|
164 |
for msg in history.messages:
|
165 |
st.chat_message(msg.type).write(msg.content)
|
166 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
|
168 |
if prompt := st.chat_input():
|
169 |
-
st.chat_message("
|
|
|
170 |
|
171 |
# As usual, new messages are added to StreamlitChatMessageHistory when the Chain is called.
|
172 |
config = {"configurable": {"session_id": "any"}}
|
173 |
-
|
174 |
-
|
|
|
|
|
|
|
|
|
|
|
|
15 |
from langchain_core.runnables.history import RunnableWithMessageHistory
|
16 |
from langchain_core.chat_history import BaseChatMessageHistory
|
17 |
from langchain.chains import RetrievalQA
|
18 |
+
import nest_asyncio
|
19 |
+
import pymongo
|
20 |
+
import logging
|
21 |
+
from langchain.docstore.document import Document
|
22 |
+
import redis
|
23 |
+
import threading
|
24 |
|
25 |
# config
|
26 |
+
nest_asyncio.apply()
|
27 |
+
logging.basicConfig(level=logging.INFO)
|
28 |
database = "AlertSimAndRemediation"
|
29 |
collection = "alert_embed"
|
30 |
index_name = "alert_index"
|
31 |
+
stream_name = "alerts"
|
32 |
+
redis_port = 16652
|
33 |
+
|
34 |
+
# Streamlit Application
|
35 |
+
st.set_page_config(
|
36 |
+
page_title="ASMR Query Bot π",
|
37 |
+
page_icon="π",
|
38 |
+
layout="wide",
|
39 |
+
initial_sidebar_state="auto",
|
40 |
+
menu_items={
|
41 |
+
'About': "https://github.com/ankush-003/alerts-simulation-and-remediation"
|
42 |
+
}
|
43 |
+
)
|
44 |
|
45 |
+
st.title('ASMR Query Bot π')
|
|
|
46 |
|
47 |
# embedding model
|
48 |
embedding_args = {
|
|
|
52 |
}
|
53 |
embedding_model = HuggingFaceEmbeddings(**embedding_args)
|
54 |
|
|
|
|
|
|
|
55 |
# vector search
|
56 |
vector_search = MongoDBAtlasVectorSearch.from_connection_string(
|
57 |
os.environ["MONGO_URI"],
|
|
|
60 |
index_name=index_name,
|
61 |
)
|
62 |
|
|
|
|
|
|
|
|
|
|
|
63 |
# contextualising prev chats
|
64 |
contextualize_q_system_prompt = """Given a chat history and the latest user question \
|
65 |
which might reference context in the chat history, formulate a standalone question \
|
|
|
72 |
("human", "{input}"),
|
73 |
]
|
74 |
)
|
|
|
|
|
|
|
75 |
|
76 |
# prompt
|
77 |
system_prompt = """
|
|
|
84 |
</context>
|
85 |
"""
|
86 |
|
|
|
|
|
87 |
qa_prompt = ChatPromptTemplate.from_messages(
|
88 |
[
|
89 |
("system", system_prompt),
|
|
|
91 |
("human", "{input}"),
|
92 |
]
|
93 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
if "chat_messages" not in st.session_state:
|
96 |
st.session_state.chat_messages = []
|
|
|
99 |
history = StreamlitChatMessageHistory(key="chat_messages")
|
100 |
|
101 |
# Initialize chat history
|
|
|
|
|
102 |
if len(history.messages) == 0:
|
103 |
+
history.add_ai_message("Hey I am ASMR Query Bot, how can i help you ?")
|
104 |
+
|
105 |
+
with st.sidebar:
|
106 |
+
st.title('Settings βοΈ')
|
107 |
+
st.subheader('Models and parameters')
|
108 |
+
selected_model = st.sidebar.selectbox('Choose a model', ['Llama3-8B', 'Llama3-70B', 'Mixtral-8x7B'], key='selected_model')
|
109 |
+
if selected_model == 'Mixtral-8x7B':
|
110 |
+
model_name="mixtral-8x7b-32768"
|
111 |
+
elif selected_model == 'Llama3-70B':
|
112 |
+
model_name='Llama3-70b-8192'
|
113 |
+
elif selected_model == 'Llama3-8B':
|
114 |
+
model_name='Llama3-8b-8192'
|
115 |
+
temp = st.sidebar.slider('temperature', min_value=0.01, max_value=1.0, value=0.0, step=0.01)
|
116 |
+
k = st.sidebar.slider('number of docs retrieved', min_value=1, max_value=20, value=2, step=1)
|
117 |
+
|
118 |
+
def get_response(query, config):
|
119 |
+
chat = ChatGroq(temperature=temp, model_name=model_name)
|
120 |
+
qa_retriever = vector_search.as_retriever(
|
121 |
+
search_type="similarity",
|
122 |
+
search_kwargs={"k": k},
|
123 |
+
)
|
124 |
+
history_aware_retriever = create_history_aware_retriever(
|
125 |
+
chat, qa_retriever, contextualize_q_prompt
|
126 |
+
)
|
127 |
+
question_answer_chain = create_stuff_documents_chain(chat, qa_prompt)
|
128 |
+
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
|
129 |
+
conversational_rag_chain = RunnableWithMessageHistory(
|
130 |
+
rag_chain,
|
131 |
+
lambda session_id: history,
|
132 |
+
input_messages_key="input",
|
133 |
+
history_messages_key="chat_history",
|
134 |
+
output_messages_key="answer",
|
135 |
+
)
|
136 |
+
return conversational_rag_chain.invoke({"input": prompt}, config=config)
|
137 |
+
|
138 |
+
def clear_chat_history():
|
139 |
+
st.session_state.chat_messages = []
|
140 |
+
history.add_ai_message("Hey I am ASMR Query Bot, how can i help you ?")
|
141 |
+
|
142 |
+
st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
|
143 |
|
144 |
for msg in history.messages:
|
145 |
st.chat_message(msg.type).write(msg.content)
|
146 |
|
147 |
+
# preprocessing context
|
148 |
+
def format_docs_with_metadata(docs):
|
149 |
+
formatted_docs = []
|
150 |
+
for i, doc in enumerate(docs, start=1):
|
151 |
+
metadata_str = "\n".join([f"**{key}**: `{value}`\n" for key, value in doc.metadata.items() if key != "embedding"])
|
152 |
+
formatted_doc = f"- {doc.page_content}\n\n**Metadata:**\n{metadata_str}"
|
153 |
+
formatted_docs.append(formatted_doc)
|
154 |
+
return "\n\n".join(formatted_docs)
|
155 |
+
|
156 |
+
def stream_data(response):
|
157 |
+
for word in response.split(" "):
|
158 |
+
yield word + " "
|
159 |
+
time.sleep(0.05)
|
160 |
|
161 |
if prompt := st.chat_input():
|
162 |
+
with st.chat_message("Human"):
|
163 |
+
st.markdown(prompt)
|
164 |
|
165 |
# As usual, new messages are added to StreamlitChatMessageHistory when the Chain is called.
|
166 |
config = {"configurable": {"session_id": "any"}}
|
167 |
+
res = get_response(prompt, config)
|
168 |
+
|
169 |
+
with st.chat_message("AI"):
|
170 |
+
st.write_stream(stream_data(res['answer']))
|
171 |
+
with st.popover("View Source"):
|
172 |
+
st.markdown("### Source Alerts π’")
|
173 |
+
st.markdown(format_docs_with_metadata(res['context']))
|
requirements.txt
CHANGED
@@ -5,4 +5,5 @@ dnspython
|
|
5 |
langchain
|
6 |
langchain-groq
|
7 |
motor
|
8 |
-
streamlit
|
|
|
|
5 |
langchain
|
6 |
langchain-groq
|
7 |
motor
|
8 |
+
streamlit
|
9 |
+
nest-asyncio
|