TheBobBob commited on
Commit
0da151e
·
verified ·
1 Parent(s): bf7ff37

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -17
app.py CHANGED
@@ -2,7 +2,6 @@ import os
2
  import requests
3
  import tellurium as te
4
  import tempfile
5
- import ollama
6
  import streamlit as st
7
  import chromadb
8
  from langchain_text_splitters import RecursiveCharacterTextSplitter
@@ -158,7 +157,7 @@ def create_vector_db(final_items):
158
  for item in final_items:
159
  item2 = str(item)
160
  item_id = f"id_{item2[:45].replace(' ', '_')}"
161
-
162
  item_id_already_created = db.get(item_id) #referenced db here, but it is already initialized?
163
 
164
  if item_id_already_created is None: # If the ID does not exist
@@ -249,21 +248,17 @@ def generate_response(db, query_text, previous_context):
249
  # Use Streamlit to stream the response in real-time
250
  full_response = ""
251
 
252
- response_placeholder = st.empty() # Create a placeholder for streaming output
253
 
254
- # Stream the response token by token
255
  for token in output_stream:
256
- token_text = token["choices"][0]["text"]
257
- full_response += token_text
258
-
259
- # Continuously update the placeholder in real-time with the new token
260
- response_placeholder.write(full_response)
261
-
262
  return full_response
263
 
 
264
  def streamlit_app():
265
- global db
266
-
267
  st.title("BioModelsRAG")
268
 
269
  search_str = st.text_input("Enter search query:")
@@ -294,18 +289,18 @@ def streamlit_app():
294
 
295
  final_items = split_biomodels(antimony_file_path)
296
 
297
- db = create_vector_db(final_items) # Create or update the database with final items
298
 
299
  st.write("Models have been processed and added to the database.")
300
 
301
- # Cache the chat messages
302
  @st.cache_resource
303
  def get_messages():
304
  if "messages" not in st.session_state:
305
  st.session_state.messages = []
306
  return st.session_state.messages
307
 
308
- st.session_state.messages = get_messages(db)
309
 
310
  # Display chat history
311
  for message in st.session_state.messages:
@@ -319,7 +314,7 @@ def streamlit_app():
319
  st.session_state.messages.append({"role": "user", "content": prompt})
320
 
321
  # Generate the response from the model
322
- response = generate_response(db, prompt, st.session_state.messages) # Pass the prompt to generate_response
323
 
324
  # Display assistant response
325
  with st.chat_message("assistant"):
@@ -327,6 +322,6 @@ def streamlit_app():
327
 
328
  # Add the assistant response to the chat history
329
  st.session_state.messages.append({"role": "assistant", "content": response})
330
-
331
  if __name__ == "__main__":
332
  streamlit_app()
 
2
  import requests
3
  import tellurium as te
4
  import tempfile
 
5
  import streamlit as st
6
  import chromadb
7
  from langchain_text_splitters import RecursiveCharacterTextSplitter
 
157
  for item in final_items:
158
  item2 = str(item)
159
  item_id = f"id_{item2[:45].replace(' ', '_')}"
160
+
161
  item_id_already_created = db.get(item_id) #referenced db here, but it is already initialized?
162
 
163
  if item_id_already_created is None: # If the ID does not exist
 
248
  # Use Streamlit to stream the response in real-time
249
  full_response = ""
250
 
251
+ response_placeholder = st.empty()
252
 
 
253
  for token in output_stream:
254
+ full_response += token
255
+ response_placeholder.text(full_response)
256
+
 
 
 
257
  return full_response
258
 
259
+
260
  def streamlit_app():
261
+ global db
 
262
  st.title("BioModelsRAG")
263
 
264
  search_str = st.text_input("Enter search query:")
 
289
 
290
  final_items = split_biomodels(antimony_file_path)
291
 
292
+ db = create_vector_db(final_items)
293
 
294
  st.write("Models have been processed and added to the database.")
295
 
296
+ # Cache the chat messages without arguments
297
  @st.cache_resource
298
  def get_messages():
299
  if "messages" not in st.session_state:
300
  st.session_state.messages = []
301
  return st.session_state.messages
302
 
303
+ st.session_state.messages = get_messages()
304
 
305
  # Display chat history
306
  for message in st.session_state.messages:
 
314
  st.session_state.messages.append({"role": "user", "content": prompt})
315
 
316
  # Generate the response from the model
317
+ response = generate_response(db, prompt, st.session_state.messages)
318
 
319
  # Display assistant response
320
  with st.chat_message("assistant"):
 
322
 
323
  # Add the assistant response to the chat history
324
  st.session_state.messages.append({"role": "assistant", "content": response})
325
+
326
  if __name__ == "__main__":
327
  streamlit_app()