muhammadsalmanalfaridzi commited on
Commit
2a58f15
·
verified ·
1 Parent(s): b6349d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -20,7 +20,7 @@ if "id" not in st.session_state:
20
  session_id = st.session_state.id
21
  client = None
22
 
23
- # Initialize Cerebras LLM
24
  def load_llm():
25
  # Ensure you have the API Key set in your environment or via input
26
  api_key = os.getenv("CEREBRAS_API_KEY")
@@ -33,6 +33,10 @@ def load_llm():
33
  st.error("API Key is required.")
34
  return None
35
 
 
 
 
 
36
  def reset_chat():
37
  st.session_state.messages = []
38
  st.session_state.context = None
@@ -64,7 +68,6 @@ with st.sidebar:
64
  st.write("Indexing your document...")
65
 
66
  if file_key not in st.session_state.get('file_cache', {}):
67
-
68
  if os.path.exists(temp_dir):
69
  reader = DoclingReader()
70
  loader = SimpleDirectoryReader(
@@ -78,7 +81,7 @@ with st.sidebar:
78
  docs = loader.load_data()
79
 
80
  # setup llm & embedding model
81
- llm = load_llm() # Load the Cerebras model
82
  if not llm:
83
  st.stop() # Stop execution if model initialization failed
84
  embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-large-en-v1.5", trust_remote_code=True)
@@ -111,7 +114,7 @@ with st.sidebar:
111
  else:
112
  query_engine = st.session_state.file_cache[file_key]
113
 
114
- # Inform the user that the file is processed and Display the PDF uploaded
115
  st.success("Ready to Chat!")
116
  display_excel(uploaded_file)
117
 
@@ -150,16 +153,13 @@ if prompt := st.chat_input("What's up?"):
150
  full_response = ""
151
 
152
  # Ensure llm is loaded
153
- if 'file_cache' in st.session_state and len(st.session_state.file_cache) > 0:
154
- query_engine = list(st.session_state.file_cache.values())[0] # Get the first query engine
155
-
156
  # Using Cerebras stream_chat for streaming response
157
  messages = [
158
  ChatMessage(role="user", content=prompt)
159
  ]
160
 
161
- response = query_engine.query(prompt)
162
- st.write(response) # Display raw query response for debugging
163
  for r in response:
164
  full_response += r.delta
165
  message_placeholder.markdown(full_response + "▌")
 
20
  session_id = st.session_state.id
21
  client = None
22
 
23
+ # Initialize Cerebras LLM (ensure it is available across the app)
24
  def load_llm():
25
  # Ensure you have the API Key set in your environment or via input
26
  api_key = os.getenv("CEREBRAS_API_KEY")
 
33
  st.error("API Key is required.")
34
  return None
35
 
36
+ # Load llm at the beginning of the session
37
+ if "llm" not in st.session_state:
38
+ st.session_state.llm = load_llm()
39
+
40
  def reset_chat():
41
  st.session_state.messages = []
42
  st.session_state.context = None
 
68
  st.write("Indexing your document...")
69
 
70
  if file_key not in st.session_state.get('file_cache', {}):
 
71
  if os.path.exists(temp_dir):
72
  reader = DoclingReader()
73
  loader = SimpleDirectoryReader(
 
81
  docs = loader.load_data()
82
 
83
  # setup llm & embedding model
84
+ llm = st.session_state.llm # Load the Cerebras model from session state
85
  if not llm:
86
  st.stop() # Stop execution if model initialization failed
87
  embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-large-en-v1.5", trust_remote_code=True)
 
114
  else:
115
  query_engine = st.session_state.file_cache[file_key]
116
 
117
+ # Inform the user that the file is processed and Display the Excel uploaded
118
  st.success("Ready to Chat!")
119
  display_excel(uploaded_file)
120
 
 
153
  full_response = ""
154
 
155
  # Ensure llm is loaded
156
+ if st.session_state.llm:
 
 
157
  # Using Cerebras stream_chat for streaming response
158
  messages = [
159
  ChatMessage(role="user", content=prompt)
160
  ]
161
 
162
+ response = st.session_state.llm.stream_chat(messages)
 
163
  for r in response:
164
  full_response += r.delta
165
  message_placeholder.markdown(full_response + "▌")