Update app.py
Browse files
app.py
CHANGED
@@ -187,7 +187,6 @@ def generate_response(db, query_text, previous_context):
|
|
187 |
Using the context provided below, answer the following question. If the information is insufficient to answer the question, please state that clearly:
|
188 |
Context:
|
189 |
{previous_context} {best_recommendation}
|
190 |
-
|
191 |
Instructions:
|
192 |
1. Cross-Reference: Use all provided context to define variables and identify any unknown entities.
|
193 |
2. Mathematical Calculations: Perform any necessary calculations based on the context and available data.
|
@@ -202,23 +201,15 @@ def generate_response(db, query_text, previous_context):
|
|
202 |
filename="unsloth.BF16.gguf",
|
203 |
)
|
204 |
|
205 |
-
|
|
|
206 |
prompt_template,
|
207 |
-
stream=True,
|
208 |
temperature=0.1,
|
209 |
top_p=0.9,
|
210 |
top_k=20
|
211 |
)
|
212 |
|
213 |
-
full_response = ""
|
214 |
-
|
215 |
-
response_placeholder = st.empty()
|
216 |
-
|
217 |
-
for token in output_stream:
|
218 |
-
# Extract the text from the token
|
219 |
-
token_text = token.get("choices", [{}])[0].get("text", "")
|
220 |
-
full_response += token_text
|
221 |
-
response_placeholder.text(full_response) # Print token output in real-time
|
222 |
|
223 |
return full_response
|
224 |
|
@@ -271,24 +262,15 @@ def streamlit_app():
|
|
271 |
st.session_state.messages = []
|
272 |
return st.session_state.messages
|
273 |
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
if prompt := st.chat_input("Ask a question about the models:"):
|
282 |
-
st.chat_message("user").markdown(prompt)
|
283 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
284 |
-
|
285 |
-
if st.session_state.db is None:
|
286 |
-
st.error("Database is not initialized. Please process the models first.")
|
287 |
else:
|
288 |
-
|
289 |
-
|
290 |
-
st.chat_message("assistant").markdown(response) # Directly display the final response
|
291 |
-
st.session_state.messages.append({"role": "assistant", "content": response})
|
292 |
|
293 |
if __name__ == "__main__":
|
294 |
streamlit_app()
|
|
|
187 |
Using the context provided below, answer the following question. If the information is insufficient to answer the question, please state that clearly:
|
188 |
Context:
|
189 |
{previous_context} {best_recommendation}
|
|
|
190 |
Instructions:
|
191 |
1. Cross-Reference: Use all provided context to define variables and identify any unknown entities.
|
192 |
2. Mathematical Calculations: Perform any necessary calculations based on the context and available data.
|
|
|
201 |
filename="unsloth.BF16.gguf",
|
202 |
)
|
203 |
|
204 |
+
# Directly get the output without streaming
|
205 |
+
output = llm(
|
206 |
prompt_template,
|
|
|
207 |
temperature=0.1,
|
208 |
top_p=0.9,
|
209 |
top_k=20
|
210 |
)
|
211 |
|
212 |
+
full_response = output["choices"][0]["text"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
213 |
|
214 |
return full_response
|
215 |
|
|
|
262 |
st.session_state.messages = []
|
263 |
return st.session_state.messages
|
264 |
|
265 |
+
# Query input and response generation
|
266 |
+
query = st.text_area("Ask a question about the models:")
|
267 |
+
|
268 |
+
if st.button("Get Response"):
|
269 |
+
if st.session_state.db:
|
270 |
+
response = generate_response(st.session_state.db, query, "")
|
271 |
+
st.write("Response: ", response)
|
|
|
|
|
|
|
|
|
|
|
|
|
272 |
else:
|
273 |
+
st.warning("Database is not initialized. Please search and analyze models first.")
|
|
|
|
|
|
|
274 |
|
275 |
if __name__ == "__main__":
|
276 |
streamlit_app()
|