Spaces:
Sleeping
Sleeping
tracking retrieved context
Browse files
app.py
CHANGED
@@ -55,10 +55,12 @@ else:
|
|
55 |
feedback_option = (
|
56 |
"thumbs" if st.sidebar.toggle(label="`Faces` ⇄ `Thumbs`", value=False) else "faces"
|
57 |
)
|
|
|
58 |
with st.sidebar:
|
59 |
model_name = st.selectbox("**Model**", options=["llama-3.1-70b-versatile","gemma2-9b-it","gemma-7b-it","llama-3.2-3b-preview", "llama3-70b-8192", "mixtral-8x7b-32768"])
|
60 |
temp = st.slider("**Temperature**", min_value=0.0, max_value=1.0, step=0.001)
|
61 |
n_docs = st.number_input("**Number of retireved documents**", min_value=0, max_value=10, value=5, step=1)
|
|
|
62 |
if st.sidebar.button("Clear message history"):
|
63 |
print("Clearing message history")
|
64 |
memory.clear()
|
@@ -103,7 +105,8 @@ else:
|
|
103 |
for i, doc in enumerate(used_docs)
|
104 |
]
|
105 |
)
|
106 |
-
st.
|
|
|
107 |
label="Consulted Documents",
|
108 |
data=docs_content,
|
109 |
file_name="Consulted_documents.txt",
|
@@ -119,7 +122,7 @@ else:
|
|
119 |
collection_name="chat-history",
|
120 |
payload=[
|
121 |
{"text": prompt, "type": "question", "question_ID": run_id},
|
122 |
-
{"text": full_response, "type": "answer", "question_ID": run_id}
|
123 |
],
|
124 |
vectors=[
|
125 |
question_embedding,
|
|
|
55 |
feedback_option = (
|
56 |
"thumbs" if st.sidebar.toggle(label="`Faces` ⇄ `Thumbs`", value=False) else "faces"
|
57 |
)
|
58 |
+
|
59 |
with st.sidebar:
|
60 |
model_name = st.selectbox("**Model**", options=["llama-3.1-70b-versatile","gemma2-9b-it","gemma-7b-it","llama-3.2-3b-preview", "llama3-70b-8192", "mixtral-8x7b-32768"])
|
61 |
temp = st.slider("**Temperature**", min_value=0.0, max_value=1.0, step=0.001)
|
62 |
n_docs = st.number_input("**Number of retireved documents**", min_value=0, max_value=10, value=5, step=1)
|
63 |
+
|
64 |
if st.sidebar.button("Clear message history"):
|
65 |
print("Clearing message history")
|
66 |
memory.clear()
|
|
|
105 |
for i, doc in enumerate(used_docs)
|
106 |
]
|
107 |
)
|
108 |
+
with st.sidebar:
|
109 |
+
st.download_button(
|
110 |
label="Consulted Documents",
|
111 |
data=docs_content,
|
112 |
file_name="Consulted_documents.txt",
|
|
|
122 |
collection_name="chat-history",
|
123 |
payload=[
|
124 |
{"text": prompt, "type": "question", "question_ID": run_id},
|
125 |
+
{"text": full_response, "type": "answer", "question_ID": run_id, "used_docs":used_docs}
|
126 |
],
|
127 |
vectors=[
|
128 |
question_embedding,
|