Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -186,19 +186,24 @@ def create_deepseek_interface():
|
|
186 |
|
187 |
if not api_key:
|
188 |
error_msg = "Environment variable FW_API_KEY is not set. Please check the environment variables on the server."
|
189 |
-
|
|
|
|
|
|
|
|
|
|
|
190 |
return
|
191 |
|
192 |
search_context = ""
|
193 |
search_info = ""
|
194 |
|
195 |
-
# Create new history with the user message
|
196 |
-
new_history = history + [
|
197 |
|
198 |
if use_deep_research:
|
199 |
try:
|
200 |
-
#
|
201 |
-
new_history
|
202 |
yield new_history, ""
|
203 |
|
204 |
# Execute search
|
@@ -220,17 +225,25 @@ When citing search results, mention the source, and ensure your answer reflects
|
|
220 |
except Exception as e:
|
221 |
print(f"Exception occurred during Deep Research: {str(e)}")
|
222 |
search_info = f"🔍 Deep Research feature error: {str(e)}\n\n"
|
|
|
|
|
|
|
|
|
223 |
|
224 |
-
# Update
|
225 |
-
|
226 |
-
|
|
|
227 |
|
228 |
# Prepare conversation history for API request
|
229 |
messages = []
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
|
|
|
|
|
|
234 |
|
235 |
# Add system message with search context if available
|
236 |
if search_context:
|
@@ -298,8 +311,8 @@ When citing search results, mention the source, and ensure your answer reflects
|
|
298 |
chunk_count += 1
|
299 |
full_response += chunk_content
|
300 |
|
301 |
-
# Update
|
302 |
-
new_history[-1] =
|
303 |
|
304 |
# Debug: Print first few chunks
|
305 |
if chunk_count <= 5:
|
@@ -320,7 +333,7 @@ When citing search results, mention the source, and ensure your answer reflects
|
|
320 |
if not full_response or full_response == search_info:
|
321 |
# If no response was generated, add an error message
|
322 |
full_response = search_info + "Error: No response generated from the model. Please try again."
|
323 |
-
new_history[-1] =
|
324 |
|
325 |
print(f"Final response length: {len(full_response)}")
|
326 |
# Always yield the final state
|
@@ -332,22 +345,24 @@ When citing search results, mention the source, and ensure your answer reflects
|
|
332 |
error_msg = "Authentication failed. Please check your FW_API_KEY environment variable."
|
333 |
|
334 |
print(f"Request error: {error_msg}")
|
335 |
-
new_history[-1] =
|
336 |
yield new_history, error_msg
|
337 |
except Exception as e:
|
338 |
error_msg = f"Unexpected error: {str(e)}"
|
339 |
print(f"Unexpected error: {error_msg}")
|
340 |
import traceback
|
341 |
traceback.print_exc()
|
342 |
-
new_history[-1] =
|
343 |
yield new_history, error_msg
|
344 |
|
345 |
# Test function without streaming
|
346 |
def test_simple_response(message, history):
|
347 |
"""Simple test function to verify UI is working"""
|
348 |
print(f"Test function called with message: {message}")
|
349 |
-
|
350 |
-
|
|
|
|
|
351 |
with gr.Blocks(theme="soft", fill_height=True) as demo:
|
352 |
# Header section
|
353 |
gr.Markdown(
|
@@ -365,8 +380,9 @@ When citing search results, mention the source, and ensure your answer reflects
|
|
365 |
chatbot = gr.Chatbot(
|
366 |
height=500,
|
367 |
show_label=False,
|
368 |
-
container=True
|
369 |
-
#
|
|
|
370 |
)
|
371 |
|
372 |
# Add Deep Research toggle and status display
|
@@ -455,12 +471,11 @@ When citing search results, mention the source, and ensure your answer reflects
|
|
455 |
# Run interface
|
456 |
if __name__ == "__main__":
|
457 |
demo = create_deepseek_interface()
|
458 |
-
demo.queue(
|
459 |
demo.launch(
|
460 |
debug=True,
|
461 |
share=False,
|
462 |
server_name="0.0.0.0",
|
463 |
server_port=7860,
|
464 |
-
ssr_mode=False,
|
465 |
show_error=True # Show detailed errors
|
466 |
)
|
|
|
186 |
|
187 |
if not api_key:
|
188 |
error_msg = "Environment variable FW_API_KEY is not set. Please check the environment variables on the server."
|
189 |
+
# Convert to messages format
|
190 |
+
new_history = history + [
|
191 |
+
{"role": "user", "content": message},
|
192 |
+
{"role": "assistant", "content": error_msg}
|
193 |
+
]
|
194 |
+
yield new_history, error_msg
|
195 |
return
|
196 |
|
197 |
search_context = ""
|
198 |
search_info = ""
|
199 |
|
200 |
+
# Create new history with the user message in messages format
|
201 |
+
new_history = history + [{"role": "user", "content": message}]
|
202 |
|
203 |
if use_deep_research:
|
204 |
try:
|
205 |
+
# Add assistant message with search status
|
206 |
+
new_history.append({"role": "assistant", "content": "🔍 Extracting optimal keywords and searching the web..."})
|
207 |
yield new_history, ""
|
208 |
|
209 |
# Execute search
|
|
|
225 |
except Exception as e:
|
226 |
print(f"Exception occurred during Deep Research: {str(e)}")
|
227 |
search_info = f"🔍 Deep Research feature error: {str(e)}\n\n"
|
228 |
+
else:
|
229 |
+
# Add empty assistant message to start
|
230 |
+
new_history.append({"role": "assistant", "content": ""})
|
231 |
+
yield new_history, ""
|
232 |
|
233 |
+
# Update last assistant message with search info
|
234 |
+
if search_info:
|
235 |
+
new_history[-1]["content"] = search_info
|
236 |
+
yield new_history, ""
|
237 |
|
238 |
# Prepare conversation history for API request
|
239 |
messages = []
|
240 |
+
|
241 |
+
# Convert history to API format
|
242 |
+
for msg in history:
|
243 |
+
if msg["role"] == "user":
|
244 |
+
messages.append({"role": "user", "content": msg["content"]})
|
245 |
+
elif msg["role"] == "assistant":
|
246 |
+
messages.append({"role": "assistant", "content": msg["content"]})
|
247 |
|
248 |
# Add system message with search context if available
|
249 |
if search_context:
|
|
|
311 |
chunk_count += 1
|
312 |
full_response += chunk_content
|
313 |
|
314 |
+
# Update last assistant message
|
315 |
+
new_history[-1]["content"] = full_response
|
316 |
|
317 |
# Debug: Print first few chunks
|
318 |
if chunk_count <= 5:
|
|
|
333 |
if not full_response or full_response == search_info:
|
334 |
# If no response was generated, add an error message
|
335 |
full_response = search_info + "Error: No response generated from the model. Please try again."
|
336 |
+
new_history[-1]["content"] = full_response
|
337 |
|
338 |
print(f"Final response length: {len(full_response)}")
|
339 |
# Always yield the final state
|
|
|
345 |
error_msg = "Authentication failed. Please check your FW_API_KEY environment variable."
|
346 |
|
347 |
print(f"Request error: {error_msg}")
|
348 |
+
new_history[-1]["content"] = search_info + error_msg
|
349 |
yield new_history, error_msg
|
350 |
except Exception as e:
|
351 |
error_msg = f"Unexpected error: {str(e)}"
|
352 |
print(f"Unexpected error: {error_msg}")
|
353 |
import traceback
|
354 |
traceback.print_exc()
|
355 |
+
new_history[-1]["content"] = search_info + error_msg
|
356 |
yield new_history, error_msg
|
357 |
|
358 |
# Test function without streaming
|
359 |
def test_simple_response(message, history):
|
360 |
"""Simple test function to verify UI is working"""
|
361 |
print(f"Test function called with message: {message}")
|
362 |
+
# Convert to messages format
|
363 |
+
new_message = {"role": "user", "content": message}
|
364 |
+
response = {"role": "assistant", "content": f"Echo: {message}"}
|
365 |
+
return history + [new_message, response], ""
|
366 |
with gr.Blocks(theme="soft", fill_height=True) as demo:
|
367 |
# Header section
|
368 |
gr.Markdown(
|
|
|
380 |
chatbot = gr.Chatbot(
|
381 |
height=500,
|
382 |
show_label=False,
|
383 |
+
container=True,
|
384 |
+
type="messages" # Explicitly set to messages format
|
385 |
+
)
|
386 |
)
|
387 |
|
388 |
# Add Deep Research toggle and status display
|
|
|
471 |
# Run interface
|
472 |
if __name__ == "__main__":
|
473 |
demo = create_deepseek_interface()
|
474 |
+
demo.queue() # Enable queue with default settings
|
475 |
demo.launch(
|
476 |
debug=True,
|
477 |
share=False,
|
478 |
server_name="0.0.0.0",
|
479 |
server_port=7860,
|
|
|
480 |
show_error=True # Show detailed errors
|
481 |
)
|