Spaces:
Running
Running
Commit
·
e179c46
1
Parent(s):
e9df37a
add history limit
Browse files
app.py
CHANGED
@@ -80,7 +80,7 @@ def init_docsearch(texts, _embeddings):
|
|
80 |
|
81 |
@st.cache_resource
|
82 |
def init_qa_chain():
|
83 |
-
chain = load_qa_chain(OpenAI(temperature=
|
84 |
return chain
|
85 |
|
86 |
|
@@ -172,7 +172,8 @@ def template(history, query):
|
|
172 |
You are an assistant and expert in the EU AI Act. Based on your expertise,
|
173 |
you need to assist and provide the answer to the business questions about the EU AI Act.
|
174 |
Your answer has to be clear and easy to understand for the user.
|
175 |
-
Your answer has to be detailed and
|
|
|
176 |
Be sure to ask any additional information you may need, to provide an accurate answer.
|
177 |
Refer to the coverstation history if necessary.
|
178 |
Be friendly and polite to the user.
|
@@ -190,8 +191,8 @@ def generate_response(question):
|
|
190 |
docs = docsearch.similarity_search(question)
|
191 |
response = chain.run(input_documents=docs, question=question)
|
192 |
st.session_state["generated"].append({"role": "assistant", "content": response})
|
193 |
-
st.session_state["history"] += "User question : " + question + "
|
194 |
-
st.session_state["history"] += "Assistant : " + response + "
|
195 |
|
196 |
|
197 |
response_container = st.container()
|
@@ -205,7 +206,16 @@ prompt = st.session_state["text_input"]
|
|
205 |
send_button = st.button("Send", disabled=st.session_state["disabled"])
|
206 |
if send_button and prompt:
|
207 |
st.session_state["messages"].append({"role": "user", "content": prompt})
|
208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
with st.spinner("Generating response..."):
|
210 |
generate_response(question)
|
211 |
|
|
|
80 |
|
81 |
@st.cache_resource
|
82 |
def init_qa_chain():
|
83 |
+
chain = load_qa_chain(OpenAI(temperature=1), chain_type="stuff")
|
84 |
return chain
|
85 |
|
86 |
|
|
|
172 |
You are an assistant and expert in the EU AI Act. Based on your expertise,
|
173 |
you need to assist and provide the answer to the business questions about the EU AI Act.
|
174 |
Your answer has to be clear and easy to understand for the user.
|
175 |
+
Your answer has to be detailed and fact-checked informations based on the act.
|
176 |
+
Don't hesitate, if necessary create very detailed answer which exceeds 300 words.
|
177 |
Be sure to ask any additional information you may need, to provide an accurate answer.
|
178 |
Refer to the coverstation history if necessary.
|
179 |
Be friendly and polite to the user.
|
|
|
191 |
docs = docsearch.similarity_search(question)
|
192 |
response = chain.run(input_documents=docs, question=question)
|
193 |
st.session_state["generated"].append({"role": "assistant", "content": response})
|
194 |
+
st.session_state["history"] += "User question : " + question + "/"
|
195 |
+
st.session_state["history"] += "Assistant : " + response + "/"
|
196 |
|
197 |
|
198 |
response_container = st.container()
|
|
|
206 |
send_button = st.button("Send", disabled=st.session_state["disabled"])
|
207 |
if send_button and prompt:
|
208 |
st.session_state["messages"].append({"role": "user", "content": prompt})
|
209 |
+
history = st.session_state["history"]
|
210 |
+
# if statement to only keep 6000 chars ~ 1200 words in the history
|
211 |
+
if len(history) > 6000:
|
212 |
+
# idx of the closest full message
|
213 |
+
idx = history.find('/')
|
214 |
+
# reduce the length of the history to the 6000 char
|
215 |
+
history = history[len(history)-6000:]
|
216 |
+
history = history[idx:]
|
217 |
+
|
218 |
+
question = template(history, prompt)
|
219 |
with st.spinner("Generating response..."):
|
220 |
generate_response(question)
|
221 |
|