TheBobBob commited on
Commit
20c9dd9
·
verified ·
1 Parent(s): 5e8f0eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -179,6 +179,7 @@ def create_vector_db(final_items):
179
  top_p=0.9,
180
  top_k=20,
181
  stream=False,
 
182
  )
183
 
184
  # Extract the generated summary text
@@ -241,21 +242,22 @@ def generate_response(db, query_text, previous_context):
241
  stream=True, # Enable streaming
242
  temperature=0.1,
243
  top_p=0.9,
244
- top_k=20
 
245
  )
246
 
247
  # Use Streamlit to stream the response in real-time
248
- temp_response = ""
249
  full_response = ""
250
 
251
  response_placeholder = st.empty() # Create a placeholder for streaming output
 
 
252
  for token in output_stream:
253
  token_text = token["choices"][0]["text"]
254
- full_response += token_text # Keep the entire response for future context
255
- temp_response += token_text
256
 
257
- # Update the placeholder in real-time with the new token
258
- response_placeholder.write(temp_response)
259
 
260
  return full_response
261
 
 
179
  top_p=0.9,
180
  top_k=20,
181
  stream=False,
182
+ verbose = True
183
  )
184
 
185
  # Extract the generated summary text
 
242
  stream=True, # Enable streaming
243
  temperature=0.1,
244
  top_p=0.9,
245
+ top_k=20,
246
+ verbose = True
247
  )
248
 
249
  # Use Streamlit to stream the response in real-time
 
250
  full_response = ""
251
 
252
  response_placeholder = st.empty() # Create a placeholder for streaming output
253
+
254
+ # Stream the response token by token
255
  for token in output_stream:
256
  token_text = token["choices"][0]["text"]
257
+ full_response += token_text
 
258
 
259
+ # Continuously update the placeholder in real-time with the new token
260
+ response_placeholder.write(full_response)
261
 
262
  return full_response
263