jarguello76 commited on
Commit
9f18da5
·
verified ·
1 Parent(s): 569f551

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -36
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import os
2
- import json
3
  import requests
 
4
  import pandas as pd
5
  import gradio as gr
6
 
@@ -9,51 +9,71 @@ from langchain.docstore.document import Document
9
  from langchain.text_splitter import RecursiveCharacterTextSplitter
10
  from langchain_community.retrievers import BM25Retriever
11
 
12
- from huggingface_hub.inference_api import InferenceApi
13
  from smolagents import Tool, CodeAgent
14
 
15
- # ----- HF Inference Wrapper -----
 
 
 
 
 
 
 
 
 
 
16
  class HuggingFaceInferenceWrapper:
17
  def __init__(self, inference_api):
18
  self.inference_api = inference_api
19
 
20
  def generate(self, prompt: str, **kwargs) -> str:
21
- raw_response = self.inference_api(inputs=prompt, raw_response=True)
22
- json_data = json.loads(raw_response.content)
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  if isinstance(json_data, dict) and "generated_text" in json_data:
24
  return json_data["generated_text"].strip()
25
- elif isinstance(json_data, list) and len(json_data) > 0 and "generated_text" in json_data[0]:
 
 
 
 
26
  return json_data[0]["generated_text"].strip()
27
  else:
 
28
  return str(json_data)
29
 
30
-
31
- # ----- Setup HF API -----
32
- hf_token = os.getenv("HUGGINGFACE_API_KEY")
33
- if not hf_token:
34
- raise ValueError("HUGGINGFACE_API_KEY environment variable is not set")
35
-
36
- inference_api = InferenceApi(repo_id="Qwen/Qwen2.5-VL-7B-Instruct", token=hf_token)
37
- model = HuggingFaceInferenceWrapper(inference_api)
38
-
39
-
40
  def run_and_submit_all(profile: gr.OAuthProfile | None):
41
- space_id = os.getenv("SPACE_ID")
42
 
43
  if profile:
44
- username = profile.username
45
  print(f"User logged in: {username}")
46
  else:
47
  print("User not logged in.")
48
  return "Please Login to Hugging Face with the button.", None
49
 
50
- api_url = "https://agents-course-unit4-scoring.hf.space"
51
  questions_url = f"{api_url}/questions"
52
  submit_url = f"{api_url}/submit"
53
 
54
  try:
 
55
  knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train")
56
- knowledge_base = knowledge_base.filter(lambda row: row["source"].startswith("huggingface/transformers"))
 
 
57
 
58
  source_docs = [
59
  Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]})
@@ -72,15 +92,13 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
72
  class RetrieverTool(Tool):
73
  name = "retriever"
74
  description = (
75
- "Uses lexical search to retrieve the parts of transformers documentation "
76
- "that could be most relevant to answer your query."
77
  )
78
  inputs = {
79
  "query": {
80
  "type": "string",
81
  "description": (
82
- "The query to perform. This should be lexically close to your target documents. "
83
- "Use the affirmative form rather than a question."
84
  ),
85
  }
86
  }
@@ -99,22 +117,27 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
99
 
100
  retriever_tool = RetrieverTool(docs_processed)
101
 
102
- # Instantiate CodeAgent with our wrapped model
 
 
 
 
103
  agent = CodeAgent(
104
  tools=[retriever_tool],
105
- model=model,
106
  max_steps=4,
107
  verbosity_level=2,
108
- stream_outputs=False, # Must be False for this wrapper
109
  )
110
 
111
  except Exception as e:
112
  return f"Error initializing agent: {e}", None
113
 
114
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "Code repo URL not available"
 
 
115
  print(agent_code)
116
 
117
- # Fetch Questions
118
  try:
119
  response = requests.get(questions_url, timeout=15)
120
  response.raise_for_status()
@@ -143,6 +166,8 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
143
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
144
 
145
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
 
 
146
  try:
147
  response = requests.post(submit_url, json=submission_data, timeout=60)
148
  response.raise_for_status()
@@ -157,21 +182,24 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
157
  results_df = pd.DataFrame(results_log)
158
  return final_status, results_df
159
  except Exception as e:
 
160
  results_df = pd.DataFrame(results_log)
161
- return f"Submission Failed: {e}", results_df
162
-
163
 
164
- # --- Gradio UI ---
165
 
 
166
  with gr.Blocks() as demo:
167
  gr.Markdown("# Basic Agent Evaluation Runner")
168
  gr.Markdown(
169
  """
170
  **Instructions:**
171
- 1. Clone this space and modify the code to define your agent's logic, tools, packages, etc.
172
- 2. Log in with the Hugging Face button.
173
- 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
174
  ---
 
 
 
175
  """
176
  )
177
 
@@ -182,7 +210,11 @@ with gr.Blocks() as demo:
182
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
183
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
184
 
185
- run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
 
 
 
 
186
 
187
  if __name__ == "__main__":
188
  print("\n" + "-" * 30 + " App Starting " + "-" * 30)
 
1
  import os
 
2
  import requests
3
+ import json
4
  import pandas as pd
5
  import gradio as gr
6
 
 
9
  from langchain.text_splitter import RecursiveCharacterTextSplitter
10
  from langchain_community.retrievers import BM25Retriever
11
 
 
12
  from smolagents import Tool, CodeAgent
13
 
14
+ from huggingface_hub.inference_api import InferenceApi
15
+
16
+ # Load HF token from environment
17
+ hf_token = os.getenv("HUGGINGFACE_API_KEY")
18
+ print("Token from env var:", hf_token)
19
+ if hf_token:
20
+ os.environ["HUGGINGFACE_API_KEY"] = hf_token
21
+ print("Set HUGGINGFACE_API_KEY in env.")
22
+ else:
23
+ print("No HUGGINGFACE_API_KEY found in env.")
24
+
25
  class HuggingFaceInferenceWrapper:
26
  def __init__(self, inference_api):
27
  self.inference_api = inference_api
28
 
29
  def generate(self, prompt: str, **kwargs) -> str:
30
+ response = self.inference_api(inputs=prompt, raw_response=True)
31
+
32
+ # Handle response based on type
33
+ if hasattr(response, "content"):
34
+ # requests.Response-like object
35
+ json_data = json.loads(response.content)
36
+ else:
37
+ # Sometimes response might be a string already
38
+ try:
39
+ json_data = json.loads(response)
40
+ except Exception:
41
+ # Fallback: return raw string response
42
+ return str(response)
43
+
44
+ # Extract generated_text from json
45
  if isinstance(json_data, dict) and "generated_text" in json_data:
46
  return json_data["generated_text"].strip()
47
+ elif (
48
+ isinstance(json_data, list)
49
+ and len(json_data) > 0
50
+ and "generated_text" in json_data[0]
51
+ ):
52
  return json_data[0]["generated_text"].strip()
53
  else:
54
+ # fallback: return entire json as string
55
  return str(json_data)
56
 
 
 
 
 
 
 
 
 
 
 
57
  def run_and_submit_all(profile: gr.OAuthProfile | None):
58
+ space_id = os.getenv("SPACE_ID") # For linking repo code
59
 
60
  if profile:
61
+ username = f"{profile.username}"
62
  print(f"User logged in: {username}")
63
  else:
64
  print("User not logged in.")
65
  return "Please Login to Hugging Face with the button.", None
66
 
67
+ api_url = "https://agents-course-unit4-scoring.hf.space" # Change if needed
68
  questions_url = f"{api_url}/questions"
69
  submit_url = f"{api_url}/submit"
70
 
71
  try:
72
+ # Load knowledge base and filter for retriever
73
  knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train")
74
+ knowledge_base = knowledge_base.filter(
75
+ lambda row: row["source"].startswith("huggingface/transformers")
76
+ )
77
 
78
  source_docs = [
79
  Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]})
 
92
  class RetrieverTool(Tool):
93
  name = "retriever"
94
  description = (
95
+ "Uses lexical search to retrieve relevant parts of transformers docs."
 
96
  )
97
  inputs = {
98
  "query": {
99
  "type": "string",
100
  "description": (
101
+ "The query to perform. Should be lexically close to your target documents."
 
102
  ),
103
  }
104
  }
 
117
 
118
  retriever_tool = RetrieverTool(docs_processed)
119
 
120
+ # Instantiate HuggingFace Inference API wrapper
121
+ inference_api = InferenceApi(repo_id="Qwen/Qwen2.5-VL-7B-Instruct", token=hf_token)
122
+ model_wrapper = HuggingFaceInferenceWrapper(inference_api)
123
+
124
+ # Instantiate the agent with our wrapped model
125
  agent = CodeAgent(
126
  tools=[retriever_tool],
127
+ model=model_wrapper,
128
  max_steps=4,
129
  verbosity_level=2,
130
+ stream_outputs=False, # must be False for this wrapper
131
  )
132
 
133
  except Exception as e:
134
  return f"Error initializing agent: {e}", None
135
 
136
+ agent_code = (
137
+ f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "Code repo URL not available"
138
+ )
139
  print(agent_code)
140
 
 
141
  try:
142
  response = requests.get(questions_url, timeout=15)
143
  response.raise_for_status()
 
166
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
167
 
168
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
169
+ print(f"Submitting {len(answers_payload)} answers...")
170
+
171
  try:
172
  response = requests.post(submit_url, json=submission_data, timeout=60)
173
  response.raise_for_status()
 
182
  results_df = pd.DataFrame(results_log)
183
  return final_status, results_df
184
  except Exception as e:
185
+ status_message = f"Submission Failed: {e}"
186
  results_df = pd.DataFrame(results_log)
187
+ return status_message, results_df
 
188
 
 
189
 
190
+ # Gradio UI code unchanged from your original snippet
191
  with gr.Blocks() as demo:
192
  gr.Markdown("# Basic Agent Evaluation Runner")
193
  gr.Markdown(
194
  """
195
  **Instructions:**
196
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
197
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
198
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
199
  ---
200
+ **Disclaimers:**
201
+ Once clicking on the "submit" button, it can take quite some time (this is the time for the agent to go through all the questions).
202
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a separate action or even to answer the questions asynchronously.
203
  """
204
  )
205
 
 
210
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
211
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
212
 
213
+ run_button.click(
214
+ fn=run_and_submit_all,
215
+ outputs=[status_output, results_table]
216
+ )
217
+
218
 
219
  if __name__ == "__main__":
220
  print("\n" + "-" * 30 + " App Starting " + "-" * 30)