ChockqOteewy commited on
Commit
808eedd
·
verified ·
1 Parent(s): 4c42a76

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -60
app.py CHANGED
@@ -2,122 +2,132 @@ import os
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
5
- from datasets import load_dataset
6
- from duckduckgo_search import DDGS
7
- from llama_index.llms.huggingface import HuggingFaceLLM
8
- from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
9
  from huggingface_hub import InferenceClient
 
 
10
  import wikipediaapi
11
 
12
  # Constants
13
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
14
- HF_TOKEN = os.environ.get("HF_TOKEN")
15
 
16
- # Advanced LLM via Hugging Face Inference API
17
  llm_model_id = "deepseek-ai/DeepSeek-R1"
18
  hf_client = InferenceClient(llm_model_id, token=HF_TOKEN)
19
 
20
- # Wikipedia API setup
21
- wiki_api = wikipediaapi.Wikipedia('en')
22
-
23
- # Load Wikipedia dataset from Hugging Face
24
- wiki_dataset = load_dataset(
25
- "wikipedia", "20220301.en", split="train[:10000]", trust_remote_code=True
26
  )
27
 
28
- # DuckDuckGo search function
 
 
 
29
  def duckduckgo_search(query):
30
  with DDGS() as ddgs:
31
  results = [r for r in ddgs.text(query, max_results=3)]
32
- if results:
33
- return "\n".join([r["body"] for r in results if r.get("body")])
34
- else:
35
- return "No results found."
 
36
 
37
- # Smart Agent combining multiple sources
38
  class SmartAgent:
39
  def __init__(self):
40
- service_context = ServiceContext.from_defaults(
41
- llm=HuggingFaceLLM(model_name=llm_model_id, token=HF_TOKEN)
42
- )
43
- docs = [doc["text"] for doc in wiki_dataset]
44
- self.index = VectorStoreIndex.from_documents(
45
- [SimpleDirectoryReader.input_to_document(doc) for doc in docs],
46
- service_context=service_context,
47
- show_progress=True
48
- )
49
- self.query_engine = self.index.as_query_engine()
50
 
51
  def __call__(self, question: str) -> str:
52
- question_lower = question.lower()
53
 
54
- # Use DuckDuckGo for recent events, dates, or temporal queries
55
- if any(term in question_lower for term in ["current", "latest", "2024", "2025", "recent", "today", "president"]):
56
  return duckduckgo_search(question)
57
 
58
- # Check if Wikipedia page exists for topic
59
- page = wiki_api.page(question)
60
- if page.exists():
61
- return page.summary[:1000] + "..."
62
 
63
- # Fallback to indexed Wikipedia with RAG
64
  try:
65
- response = self.query_engine.query(question)
66
- return str(response)
67
  except Exception as e:
68
- return f"LLM query error: {e}"
69
 
70
- # Run and submit evaluation
71
  def run_and_submit_all(profile: gr.OAuthProfile | None):
72
  space_id = os.getenv("SPACE_ID")
73
  if profile:
74
- username = f"{profile.username}"
 
75
  else:
76
  return "Please Login to Hugging Face with the button.", None
77
 
78
- questions_url = f"{DEFAULT_API_URL}/questions"
79
- submit_url = f"{DEFAULT_API_URL}/submit"
 
80
 
81
- # Instantiate agent
82
  agent = SmartAgent()
83
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
84
 
85
- # Fetch questions
86
  try:
87
- questions_data = requests.get(questions_url).json()
 
 
88
  except Exception as e:
89
  return f"Error fetching questions: {e}", None
90
 
91
- results_log, answers_payload = [], []
 
 
 
92
  for item in questions_data:
93
- task_id, question_text = item.get("task_id"), item.get("question")
94
- answer = agent(question_text)
95
- answers_payload.append({"task_id": task_id, "submitted_answer": answer})
96
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": answer})
97
 
98
- submission_data = {"username": username, "agent_code": agent_code, "answers": answers_payload}
 
 
 
 
 
 
 
 
99
 
100
  try:
101
- result_data = requests.post(submit_url, json=submission_data).json()
 
 
102
  final_status = (
103
  f"Submission Successful!\n"
104
  f"User: {result_data.get('username')}\n"
105
- f"Overall Score: {result_data.get('score')}%\n"
106
- f"({result_data.get('correct_count')}/{result_data.get('total_attempted')}) correct\n"
107
- f"Message: {result_data.get('message')}"
108
  )
109
  results_df = pd.DataFrame(results_log)
110
  return final_status, results_df
111
  except Exception as e:
112
  return f"Submission Failed: {e}", pd.DataFrame(results_log)
113
 
114
- # Gradio interface setup
115
  with gr.Blocks() as demo:
116
- gr.Markdown("# 🚀 Smart Multi-Source Agent Evaluation")
 
 
 
 
 
 
 
117
  gr.LoginButton()
118
  run_button = gr.Button("Run Evaluation & Submit All Answers")
119
- status_output = gr.Textbox(label="Status & Results", lines=6, interactive=False)
120
- results_table = gr.DataFrame(label="Agent Answers")
121
 
122
  run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
123
 
 
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
 
 
 
 
5
  from huggingface_hub import InferenceClient
6
+ from duckduckgo_search import DDGS
7
+ from datasets import load_dataset
8
  import wikipediaapi
9
 
10
  # Constants
11
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
+ HF_TOKEN = os.getenv("HF_TOKEN")
13
 
14
+ # Setup Hugging Face client (advanced model)
15
  llm_model_id = "deepseek-ai/DeepSeek-R1"
16
  hf_client = InferenceClient(llm_model_id, token=HF_TOKEN)
17
 
18
+ # Wikipedia API setup (corrected user-agent)
19
+ wiki_api = wikipediaapi.Wikipedia(
20
+ language='en',
21
+ user_agent='SmartAgent/1.0 ([email protected])'
 
 
22
  )
23
 
24
+ # Load a subset of Wikipedia dataset (adjust as needed)
25
+ wiki_dataset = load_dataset("wikipedia", "20220301.en", split="train[:10000]", trust_remote_code=True)
26
+
27
+ # Search functions
28
  def duckduckgo_search(query):
29
  with DDGS() as ddgs:
30
  results = [r for r in ddgs.text(query, max_results=3)]
31
+ return "\n".join([r["body"] for r in results if r.get("body")]) or "No results found."
32
+
33
+ def wikipedia_search(query):
34
+ page = wiki_api.page(query)
35
+ return page.summary if page.exists() else "No Wikipedia page found."
36
 
37
+ # Comprehensive Agent
38
  class SmartAgent:
39
  def __init__(self):
40
+ pass
 
 
 
 
 
 
 
 
 
41
 
42
  def __call__(self, question: str) -> str:
43
+ q_lower = question.lower()
44
 
45
+ if any(term in q_lower for term in ["current", "latest", "2024", "2025", "who is the president", "recent", "live"]):
 
46
  return duckduckgo_search(question)
47
 
48
+ wiki_result = wikipedia_search(question)
49
+ if "No Wikipedia page found" not in wiki_result:
50
+ return wiki_result
 
51
 
 
52
  try:
53
+ resp = hf_client.text_generation(question, max_new_tokens=512)
54
+ return resp
55
  except Exception as e:
56
+ return f"HF LLM error: {e}"
57
 
58
+ # Submission logic
59
  def run_and_submit_all(profile: gr.OAuthProfile | None):
60
  space_id = os.getenv("SPACE_ID")
61
  if profile:
62
+ username = profile.username
63
+ print(f"User logged in: {username}")
64
  else:
65
  return "Please Login to Hugging Face with the button.", None
66
 
67
+ api_url = DEFAULT_API_URL
68
+ questions_url = f"{api_url}/questions"
69
+ submit_url = f"{api_url}/submit"
70
 
 
71
  agent = SmartAgent()
72
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
73
 
 
74
  try:
75
+ response = requests.get(questions_url, timeout=15)
76
+ response.raise_for_status()
77
+ questions_data = response.json()
78
  except Exception as e:
79
  return f"Error fetching questions: {e}", None
80
 
81
+ results_log = []
82
+ answers_payload = []
83
+ correct_answers = 0
84
+
85
  for item in questions_data:
86
+ task_id = item.get("task_id")
87
+ question_text = item.get("question")
88
+ if not task_id or not question_text:
89
+ continue
90
 
91
+ submitted_answer = agent(question_text)
92
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
93
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
94
+
95
+ if not answers_payload:
96
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
97
+
98
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
99
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
100
 
101
  try:
102
+ response = requests.post(submit_url, json=submission_data, timeout=60)
103
+ response.raise_for_status()
104
+ result_data = response.json()
105
  final_status = (
106
  f"Submission Successful!\n"
107
  f"User: {result_data.get('username')}\n"
108
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
109
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
110
+ f"Message: {result_data.get('message', 'No message received.')}"
111
  )
112
  results_df = pd.DataFrame(results_log)
113
  return final_status, results_df
114
  except Exception as e:
115
  return f"Submission Failed: {e}", pd.DataFrame(results_log)
116
 
117
+ # Gradio Interface
118
  with gr.Blocks() as demo:
119
+ gr.Markdown("# Smart Agent Evaluation Runner")
120
+ gr.Markdown("""
121
+ **Instructions:**
122
+ 1. Clone this space, define your agent logic, tools, packages, etc.
123
+ 2. Log in to Hugging Face.
124
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
125
+ """)
126
+
127
  gr.LoginButton()
128
  run_button = gr.Button("Run Evaluation & Submit All Answers")
129
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
130
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
131
 
132
  run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
133