josondev's picture
Update app.py
b1b09d8 verified
raw
history blame
10.9 kB
""" Enhanced Multi-LLM Agent Evaluation Runner with Agno Integration"""
import os
import gradio as gr
import requests
import pandas as pd
from langchain_core.messages import HumanMessage
# Import the enhanced classes from veryfinal.py in the same directory
try:
from veryfinal import (
build_graph,
UnifiedAgnoEnhancedSystem,
AgnoEnhancedAgentSystem,
AgnoEnhancedModelManager
)
VERYFINAL_AVAILABLE = True
except ImportError as e:
print(f"Error importing from veryfinal.py: {e}")
VERYFINAL_AVAILABLE = False
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# --- Enhanced Agent Definition ---
class EnhancedMultiLLMAgent:
"""A multi-provider Agno agent with NVIDIA + open-source model integration."""
def __init__(self):
print("Enhanced Multi-LLM Agent with Agno Integration initialized.")
if not VERYFINAL_AVAILABLE:
print("Error: veryfinal.py not properly imported")
self.system = None
self.graph = None
return
try:
# Use the unified Agno enhanced system
self.system = UnifiedAgnoEnhancedSystem()
self.graph = self.system.graph
# Display system information
if self.system.agno_system:
info = self.system.get_system_info()
print(f"System initialized with {info.get('total_models', 0)} models")
if info.get('nvidia_available'):
print("βœ… NVIDIA NIM models available")
print(f"Active agents: {info.get('active_agents', [])}")
print("Enhanced Agno Multi-LLM System built successfully.")
except Exception as e:
print(f"Error building enhanced system: {e}")
self.graph = None
self.system = None
def __call__(self, question: str) -> str:
print(f"Agent received question: {question[:100]}...")
if self.system is None:
return "Error: Agent not properly initialized"
try:
# Use the enhanced system's process_query method
answer = self.system.process_query(question)
# Additional validation
if not answer or answer == question or len(answer.strip()) == 0:
return "Information not available"
# Clean up the answer
answer = answer.strip()
# Ensure proper formatting for evaluation
if "FINAL ANSWER:" in answer:
answer = answer.split("FINAL ANSWER:")[-1].strip()
return answer
except Exception as e:
error_msg = f"Error: {str(e)}"
print(error_msg)
return error_msg
def run_and_submit_all(profile: gr.OAuthProfile | None):
"""Fetch questions, run enhanced Agno agent, and submit answers."""
space_id = os.getenv("SPACE_ID")
if profile:
username = f"{profile.username}"
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please Login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# 1. Instantiate Enhanced Agent
try:
agent = EnhancedMultiLLMAgent()
if agent.system is None:
return "Error: Failed to initialize enhanced agent properly", None
except Exception as e:
print(f"Error instantiating agent: {e}")
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "No space ID available"
print(f"Agent code URL: {agent_code}")
# 2. Fetch Questions
print(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
print("Fetched questions list is empty.")
return "Fetched questions list is empty or invalid format.", None
print(f"Fetched {len(questions_data)} questions.")
except Exception as e:
print(f"Error fetching questions: {e}")
return f"Error fetching questions: {e}", None
# 3. Run Enhanced Agno Agent
results_log = []
answers_payload = []
print(f"Running Enhanced Agno Multi-LLM agent on {len(questions_data)} questions...")
for i, item in enumerate(questions_data):
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
print(f"Skipping item with missing task_id or question: {item}")
continue
print(f"Processing question {i+1}/{len(questions_data)}: {task_id}")
try:
submitted_answer = agent(question_text)
# Additional validation to prevent question repetition
if submitted_answer == question_text or submitted_answer.startswith(question_text):
submitted_answer = "Information not available"
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({
"Task ID": task_id,
"Question": question_text[:100] + "..." if len(question_text) > 100 else question_text,
"Submitted Answer": submitted_answer[:200] + "..." if len(submitted_answer) > 200 else submitted_answer
})
except Exception as e:
error_msg = f"AGENT ERROR: {e}"
print(f"Error running agent on task {task_id}: {e}")
answers_payload.append({"task_id": task_id, "submitted_answer": error_msg})
results_log.append({
"Task ID": task_id,
"Question": question_text[:100] + "..." if len(question_text) > 100 else question_text,
"Submitted Answer": error_msg
})
if not answers_payload:
print("Agent did not produce any answers to submit.")
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
# 4. Prepare Submission
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
status_update = f"Enhanced Agno Multi-LLM Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
print(status_update)
# 5. Submit
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
print("Submission successful.")
results_df = pd.DataFrame(results_log)
return final_status, results_df
except Exception as e:
status_message = f"Submission Failed: {e}"
print(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df
# --- Build Gradio Interface ---
with gr.Blocks() as demo:
gr.Markdown("# Enhanced Multi-LLM Agent with Agno + NVIDIA Integration")
gr.Markdown(
"""
**Instructions:**
1. Log in to your Hugging Face account using the button below.
2. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
**Enhanced Agent Features:**
- **NVIDIA NIM Models**: Enterprise-grade optimized models for maximum accuracy
- **Open-Source Models**: Groq, Ollama, Together AI, Anyscale, Hugging Face
- **Specialized Agents**: Enterprise research, advanced math, coding, fast response
- **Intelligent Routing**: Automatically selects best model/agent for each task
- **Advanced Tools**: DuckDuckGo search, Wikipedia, calculator, reasoning tools
- **Agno Framework**: Professional agent framework with memory and tool integration
**Available Model Providers:**
- **NVIDIA NIM**: meta/llama3-70b-instruct, meta/codellama-70b-instruct, etc.
- **Groq (Free)**: llama3-70b-8192, llama3-8b-8192, mixtral-8x7b-32768
- **Ollama (Local)**: llama3, mistral, phi3, codellama, gemma, qwen
- **Together AI**: Meta-Llama models, Mistral, Qwen
- **Anyscale**: Enterprise hosting for open-source models
- **Hugging Face**: Direct model access
**Routing Examples:**
- Enterprise: "Enterprise analysis of quantum computing" β†’ NVIDIA NIM
- Math: "Calculate 25 Γ— 17" β†’ Advanced Math Agent
- Code: "Write Python factorial function" β†’ Advanced Coding Agent
- Research: "Find Mercedes Sosa discography" β†’ Enterprise Research Agent
- Quick: "Capital of France?" β†’ Fast Response Agent
**Setup Requirements:**
- NVIDIA_API_KEY for enterprise models (optional)
- GROQ_API_KEY for free tier models
- Other API keys optional for additional providers
"""
)
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers", variant="primary")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table]
)
if __name__ == "__main__":
print("\n" + "-"*30 + " Enhanced Agno Multi-LLM Agent Starting " + "-"*30)
# Display system status
if VERYFINAL_AVAILABLE:
try:
test_system = UnifiedAgnoEnhancedSystem()
info = test_system.get_system_info()
print(f"βœ… System ready with {info.get('total_models', 0)} models")
print(f"πŸ“Š Model breakdown: {len(info.get('model_breakdown', {}).get('nvidia_models', []))} NVIDIA, "
f"{len(info.get('model_breakdown', {}).get('groq_models', []))} Groq, "
f"{len(info.get('model_breakdown', {}).get('ollama_models', []))} Ollama")
except Exception as e:
print(f"⚠️ System initialization warning: {e}")
else:
print("❌ veryfinal.py not properly imported")
demo.launch(debug=True, share=False)