jarguello76 commited on
Commit
569f551
·
verified ·
1 Parent(s): 7bae319

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -39
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import requests
3
  import pandas as pd
4
  import gradio as gr
@@ -8,37 +9,33 @@ from langchain.docstore.document import Document
8
  from langchain.text_splitter import RecursiveCharacterTextSplitter
9
  from langchain_community.retrievers import BM25Retriever
10
 
11
- from smolagents import Tool, CodeAgent
12
  from huggingface_hub.inference_api import InferenceApi
 
13
 
14
-
15
- # Wrapper class to adapt HuggingFace Inference API to have .generate()
16
  class HuggingFaceInferenceWrapper:
17
  def __init__(self, inference_api):
18
  self.inference_api = inference_api
19
 
20
  def generate(self, prompt: str, **kwargs) -> str:
21
- # Call the inference API with prompt, return generated text
22
- response = self.inference_api(inputs=prompt)
23
- if isinstance(response, dict) and "generated_text" in response:
24
- return response["generated_text"]
25
- elif isinstance(response, str):
26
- return response
27
  else:
28
- raise ValueError(f"Unexpected response format: {response}")
29
 
30
 
 
31
  hf_token = os.getenv("HUGGINGFACE_API_KEY")
32
- print("Token from env var:", hf_token)
 
33
 
34
- if hf_token:
35
- os.environ["HUGGINGFACE_API_KEY"] = hf_token
36
- print("Set HUGGINGFACE_API_KEY in env.")
37
- else:
38
- print("No HUGGINGFACE_API_KEY found in env.")
39
 
40
- print("HUGGINGFACE_API_KEY in env:", "HUGGINGFACE_API_KEY" in os.environ)
41
- print("HUGGINGFACE_API_KEY value (masked):", os.environ.get("HUGGINGFACE_API_KEY", "")[:5] + "...")
42
 
43
  def run_and_submit_all(profile: gr.OAuthProfile | None):
44
  space_id = os.getenv("SPACE_ID")
@@ -102,17 +99,13 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
102
 
103
  retriever_tool = RetrieverTool(docs_processed)
104
 
105
- # Initialize HuggingFace InferenceApi
106
- inference_api = InferenceApi(repo_id="Qwen/Qwen2.5-VL-7B-Instruct", token=hf_token)
107
- # Wrap it so it supports .generate()
108
- model_wrapper = HuggingFaceInferenceWrapper(inference_api)
109
-
110
  agent = CodeAgent(
111
  tools=[retriever_tool],
112
- model=model_wrapper,
113
  max_steps=4,
114
  verbosity_level=2,
115
- stream_outputs=False, # Set False because this model doesn't support streaming here
116
  )
117
 
118
  except Exception as e:
@@ -121,6 +114,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
121
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "Code repo URL not available"
122
  print(agent_code)
123
 
 
124
  try:
125
  response = requests.get(questions_url, timeout=15)
126
  response.raise_for_status()
@@ -132,6 +126,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
132
 
133
  results_log = []
134
  answers_payload = []
 
135
  for item in questions_data:
136
  task_id = item.get("task_id")
137
  question_text = item.get("question")
@@ -148,7 +143,6 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
148
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
149
 
150
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
151
-
152
  try:
153
  response = requests.post(submit_url, json=submission_data, timeout=60)
154
  response.raise_for_status()
@@ -163,23 +157,21 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
163
  results_df = pd.DataFrame(results_log)
164
  return final_status, results_df
165
  except Exception as e:
166
- status_message = f"Submission Failed: {e}"
167
  results_df = pd.DataFrame(results_log)
168
- return status_message, results_df
 
169
 
 
170
 
171
  with gr.Blocks() as demo:
172
  gr.Markdown("# Basic Agent Evaluation Runner")
173
  gr.Markdown(
174
  """
175
  **Instructions:**
176
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
177
- 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
178
- 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
179
  ---
180
- **Disclaimers:**
181
- Once clicking on the "submit" button, it can take quite some time (this is the time for the agent to go through all the questions).
182
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a separate action or even to answer the questions asynchronously.
183
  """
184
  )
185
 
@@ -190,11 +182,7 @@ with gr.Blocks() as demo:
190
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
191
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
192
 
193
- run_button.click(
194
- fn=run_and_submit_all,
195
- outputs=[status_output, results_table]
196
- )
197
-
198
 
199
  if __name__ == "__main__":
200
  print("\n" + "-" * 30 + " App Starting " + "-" * 30)
 
1
  import os
2
+ import json
3
  import requests
4
  import pandas as pd
5
  import gradio as gr
 
9
  from langchain.text_splitter import RecursiveCharacterTextSplitter
10
  from langchain_community.retrievers import BM25Retriever
11
 
 
12
  from huggingface_hub.inference_api import InferenceApi
13
+ from smolagents import Tool, CodeAgent
14
 
15
+ # ----- HF Inference Wrapper -----
 
16
  class HuggingFaceInferenceWrapper:
17
  def __init__(self, inference_api):
18
  self.inference_api = inference_api
19
 
20
  def generate(self, prompt: str, **kwargs) -> str:
21
+ raw_response = self.inference_api(inputs=prompt, raw_response=True)
22
+ json_data = json.loads(raw_response.content)
23
+ if isinstance(json_data, dict) and "generated_text" in json_data:
24
+ return json_data["generated_text"].strip()
25
+ elif isinstance(json_data, list) and len(json_data) > 0 and "generated_text" in json_data[0]:
26
+ return json_data[0]["generated_text"].strip()
27
  else:
28
+ return str(json_data)
29
 
30
 
31
+ # ----- Setup HF API -----
32
  hf_token = os.getenv("HUGGINGFACE_API_KEY")
33
+ if not hf_token:
34
+ raise ValueError("HUGGINGFACE_API_KEY environment variable is not set")
35
 
36
+ inference_api = InferenceApi(repo_id="Qwen/Qwen2.5-VL-7B-Instruct", token=hf_token)
37
+ model = HuggingFaceInferenceWrapper(inference_api)
 
 
 
38
 
 
 
39
 
40
  def run_and_submit_all(profile: gr.OAuthProfile | None):
41
  space_id = os.getenv("SPACE_ID")
 
99
 
100
  retriever_tool = RetrieverTool(docs_processed)
101
 
102
+ # Instantiate CodeAgent with our wrapped model
 
 
 
 
103
  agent = CodeAgent(
104
  tools=[retriever_tool],
105
+ model=model,
106
  max_steps=4,
107
  verbosity_level=2,
108
+ stream_outputs=False, # Must be False for this wrapper
109
  )
110
 
111
  except Exception as e:
 
114
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "Code repo URL not available"
115
  print(agent_code)
116
 
117
+ # Fetch Questions
118
  try:
119
  response = requests.get(questions_url, timeout=15)
120
  response.raise_for_status()
 
126
 
127
  results_log = []
128
  answers_payload = []
129
+
130
  for item in questions_data:
131
  task_id = item.get("task_id")
132
  question_text = item.get("question")
 
143
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
144
 
145
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
 
146
  try:
147
  response = requests.post(submit_url, json=submission_data, timeout=60)
148
  response.raise_for_status()
 
157
  results_df = pd.DataFrame(results_log)
158
  return final_status, results_df
159
  except Exception as e:
 
160
  results_df = pd.DataFrame(results_log)
161
+ return f"Submission Failed: {e}", results_df
162
+
163
 
164
+ # --- Gradio UI ---
165
 
166
  with gr.Blocks() as demo:
167
  gr.Markdown("# Basic Agent Evaluation Runner")
168
  gr.Markdown(
169
  """
170
  **Instructions:**
171
+ 1. Clone this space and modify the code to define your agent's logic, tools, packages, etc.
172
+ 2. Log in with the Hugging Face button.
173
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
174
  ---
 
 
 
175
  """
176
  )
177
 
 
182
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
183
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
184
 
185
+ run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
 
 
 
 
186
 
187
  if __name__ == "__main__":
188
  print("\n" + "-" * 30 + " App Starting " + "-" * 30)