Spaces:
Running
Running
langufse fix
Browse files- agent_gradio_chat.py → app.py +12 -30
agent_gradio_chat.py → app.py
RENAMED
@@ -8,7 +8,7 @@ import xml.etree.ElementTree as ET
|
|
8 |
from typing import Any, Dict, List, Optional
|
9 |
from openai import OpenAI
|
10 |
from dotenv import load_dotenv
|
11 |
-
from langfuse import
|
12 |
|
13 |
load_dotenv()
|
14 |
|
@@ -30,11 +30,6 @@ BASE_URL = "https://router.huggingface.co/v1"
|
|
30 |
|
31 |
client = OpenAI(base_url=BASE_URL, api_key=HF_TOKEN)
|
32 |
|
33 |
-
langfuse = Langfuse(
|
34 |
-
public_key=os.getenv("LANGFUSE_PUBLIC_KEY"),
|
35 |
-
secret_key=os.getenv("LANGFUSE_SECRET_KEY")
|
36 |
-
)
|
37 |
-
|
38 |
# ---------- Tools ----------
|
39 |
def fetch_google_news_rss(num: int = 10) -> List[Dict[str, Any]]:
|
40 |
"""Fetch general news from Google News RSS feed."""
|
@@ -317,52 +312,39 @@ def run_agent(user_prompt: str, site_limit: Optional[str] = None, model: str = D
|
|
317 |
return "I could not complete the task within the step limit. Try refining your query."
|
318 |
|
319 |
# ---------- Gradio Interface ----------
|
|
|
320 |
def chat_with_agent(message, history, model):
|
321 |
"""Handle chat messages and return agent responses."""
|
322 |
if not message.strip():
|
323 |
return history
|
324 |
|
325 |
-
|
326 |
-
|
327 |
-
name="chat_interaction",
|
328 |
input={"user_message": message, "model": model, "history_length": len(history)}
|
329 |
)
|
330 |
-
|
331 |
try:
|
332 |
response = run_agent(message, None, model)
|
333 |
-
|
334 |
-
|
335 |
-
trace.update(
|
336 |
output={"agent_response": response},
|
337 |
metadata={
|
338 |
"model": model,
|
339 |
"message_length": len(message),
|
340 |
"response_length": len(response),
|
341 |
-
"success": True
|
342 |
-
}
|
343 |
)
|
344 |
-
|
345 |
-
# Flush the trace to send it to Langfuse
|
346 |
-
langfuse.flush()
|
347 |
-
|
348 |
history.append({"role": "user", "content": message})
|
349 |
history.append({"role": "assistant", "content": response})
|
350 |
return history
|
351 |
|
352 |
except Exception as e:
|
353 |
-
|
354 |
-
trace.update(
|
355 |
output={"error": str(e)},
|
356 |
-
|
357 |
-
metadata={
|
358 |
-
"error": str(e),
|
359 |
-
"success": False
|
360 |
-
}
|
361 |
)
|
362 |
-
|
363 |
-
# Flush the trace to send it to Langfuse
|
364 |
-
langfuse.flush()
|
365 |
-
|
366 |
error_msg = f"Sorry, I encountered an error: {str(e)}"
|
367 |
history.append({"role": "user", "content": message})
|
368 |
history.append({"role": "assistant", "content": error_msg})
|
|
|
8 |
from typing import Any, Dict, List, Optional
|
9 |
from openai import OpenAI
|
10 |
from dotenv import load_dotenv
|
11 |
+
from langfuse import observe, get_client
|
12 |
|
13 |
load_dotenv()
|
14 |
|
|
|
30 |
|
31 |
client = OpenAI(base_url=BASE_URL, api_key=HF_TOKEN)
|
32 |
|
|
|
|
|
|
|
|
|
|
|
33 |
# ---------- Tools ----------
|
34 |
def fetch_google_news_rss(num: int = 10) -> List[Dict[str, Any]]:
|
35 |
"""Fetch general news from Google News RSS feed."""
|
|
|
312 |
return "I could not complete the task within the step limit. Try refining your query."
|
313 |
|
314 |
# ---------- Gradio Interface ----------
|
315 |
+
@observe()
|
316 |
def chat_with_agent(message, history, model):
|
317 |
"""Handle chat messages and return agent responses."""
|
318 |
if not message.strip():
|
319 |
return history
|
320 |
|
321 |
+
lf = get_client()
|
322 |
+
lf.update_current_trace(
|
|
|
323 |
input={"user_message": message, "model": model, "history_length": len(history)}
|
324 |
)
|
325 |
+
|
326 |
try:
|
327 |
response = run_agent(message, None, model)
|
328 |
+
|
329 |
+
lf.update_current_trace(
|
|
|
330 |
output={"agent_response": response},
|
331 |
metadata={
|
332 |
"model": model,
|
333 |
"message_length": len(message),
|
334 |
"response_length": len(response),
|
335 |
+
"success": True,
|
336 |
+
},
|
337 |
)
|
338 |
+
|
|
|
|
|
|
|
339 |
history.append({"role": "user", "content": message})
|
340 |
history.append({"role": "assistant", "content": response})
|
341 |
return history
|
342 |
|
343 |
except Exception as e:
|
344 |
+
lf.update_current_trace(
|
|
|
345 |
output={"error": str(e)},
|
346 |
+
metadata={"success": False, "error": str(e)},
|
|
|
|
|
|
|
|
|
347 |
)
|
|
|
|
|
|
|
|
|
348 |
error_msg = f"Sorry, I encountered an error: {str(e)}"
|
349 |
history.append({"role": "user", "content": message})
|
350 |
history.append({"role": "assistant", "content": error_msg})
|