josondev commited on
Commit
1a8d658
·
verified ·
1 Parent(s): 163f5c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -33
app.py CHANGED
@@ -2,43 +2,82 @@ import os
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
 
5
  from dotenv import load_dotenv
6
- from langchain_openai import ChatOpenAI
7
- from langchain_nvidia_ai_endpoints import ChatNVIDIA
8
 
9
  # Load environment variables
10
  load_dotenv()
11
 
12
- # --- Constants ---
13
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
14
-
15
- # --- Basic Agent Definition ---
16
- class BasicAgent:
17
- def __init__(self):
18
- self.llm=ChatNVIDIA(model="meta/llama-3.3-70b-instruct",nvidia_api_key=os.getenv("NVIDIA_API_KEY"))
19
- self.instructions = """You are a helpful assistant. For every question, reply with only the answer—no explanation, "
 
20
  "no units, and no extra words. If the answer is a number, just return the number. "
21
  "If it is a word or phrase, return only that. If it is a list, return a comma-separated list with no extra words. "
22
- "Do not include any prefix, suffix, or explanation."""
23
-
24
 
25
- print("BasicAgent initialized.")
 
 
26
 
27
- def __call__(self, question: str) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  prompt = f"{self.instructions}\n\n{question}"
29
- print(f"Agent received question (first 50 chars): {question[:50]}...")
30
- response = self.llm.invoke(prompt)
31
- answer = response.content.strip() if hasattr(response, "content") else str(response)
32
- print(f"Agent returning answer: {answer}")
33
- return answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
  def run_and_submit_all(profile: gr.OAuthProfile | None):
36
- """
37
- Fetches all questions, runs the BasicAgent on them, submits all answers,
38
- and displays the results.
39
- """
40
- space_id = os.getenv("SPACE_ID") # For codebase link
41
-
42
  if profile:
43
  username = f"{profile.username}"
44
  print(f"User logged in: {username}")
@@ -50,9 +89,8 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
50
  questions_url = f"{api_url}/questions"
51
  submit_url = f"{api_url}/submit"
52
 
53
- # 1. Instantiate Agent
54
  try:
55
- agent = BasicAgent()
56
  except Exception as e:
57
  print(f"Error instantiating agent: {e}")
58
  return f"Error initializing agent: {e}", None
@@ -60,7 +98,6 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
60
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
61
  print(agent_code)
62
 
63
- # 2. Fetch Questions
64
  print(f"Fetching questions from: {questions_url}")
65
  try:
66
  response = requests.get(questions_url, timeout=15)
@@ -81,18 +118,19 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
81
  print(f"An unexpected error occurred fetching questions: {e}")
82
  return f"An unexpected error occurred fetching questions: {e}", None
83
 
84
- # 3. Run your Agent
85
  results_log = []
86
  answers_payload = []
87
  print(f"Running agent on {len(questions_data)} questions...")
88
  for item in questions_data:
89
  task_id = item.get("task_id")
90
  question_text = item.get("question")
 
 
91
  if not task_id or question_text is None:
92
  print(f"Skipping item with missing task_id or question: {item}")
93
  continue
94
  try:
95
- submitted_answer = agent(question_text)
96
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
97
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
98
  except Exception as e:
@@ -103,12 +141,10 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
103
  print("Agent did not produce any answers to submit.")
104
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
105
 
106
- # 4. Prepare Submission
107
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
108
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
109
  print(status_update)
110
 
111
- # 5. Submit
112
  print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
113
  try:
114
  response = requests.post(submit_url, json=submission_data, timeout=60)
@@ -163,7 +199,7 @@ with gr.Blocks() as demo:
163
  ---
164
  **Disclaimers:**
165
  Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
166
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
167
  """
168
  )
169
 
 
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
5
+ import base64
6
  from dotenv import load_dotenv
7
+ from groq import Groq
 
8
 
9
  # Load environment variables
10
  load_dotenv()
11
 
12
+ # --- Groq Multimodal Agent ---
13
+ class GroqMultimodalAgent:
14
+ def __init__(self):
15
+ self.client = Groq(api_key=os.getenv("GROQ_API_KEY"))
16
+ self.llava_model = "llava-v1.5-7b-4096-preview" # For image Q&A
17
+ self.llama_model = "llama-3-70b-8192" # For text Q&A
18
+ self.whisper_model = "whisper-large-v3" # For audio transcription
19
+ self.instructions = (
20
+ "You are a helpful assistant. For every question or media, reply with only the answer—no explanation, "
21
  "no units, and no extra words. If the answer is a number, just return the number. "
22
  "If it is a word or phrase, return only that. If it is a list, return a comma-separated list with no extra words. "
23
+ "Do not include any prefix, suffix, or explanation."
24
+ )
25
 
26
+ def _encode_image(self, image_path):
27
+ with open(image_path, "rb") as img_file:
28
+ return base64.b64encode(img_file.read()).decode("utf-8")
29
 
30
+ def _process_image(self, image_path, question):
31
+ base64_image = self._encode_image(image_path)
32
+ prompt = f"{self.instructions}\n\n{question}"
33
+ chat_completion = self.client.chat.completions.create(
34
+ model=self.llava_model,
35
+ messages=[
36
+ {"role": "user", "content": [
37
+ {"type": "text", "text": prompt},
38
+ {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
39
+ ]}
40
+ ]
41
+ )
42
+ answer = chat_completion.choices[0].message.content.strip()
43
+ return self._extract_final_answer(answer)
44
+
45
+ def _process_audio(self, audio_path):
46
+ with open(audio_path, "rb") as audio_file:
47
+ transcript = self.client.audio.transcriptions.create(
48
+ model=self.whisper_model,
49
+ file=audio_file
50
+ )
51
+ return transcript.text.strip()
52
+
53
+ def _process_text(self, question):
54
  prompt = f"{self.instructions}\n\n{question}"
55
+ chat_completion = self.client.chat.completions.create(
56
+ model=self.llama_model,
57
+ messages=[{"role": "user", "content": prompt}]
58
+ )
59
+ answer = chat_completion.choices[0].message.content.strip()
60
+ return self._extract_final_answer(answer)
61
+
62
+ def _extract_final_answer(self, llm_output: str) -> str:
63
+ for prefix in ["FINAL ANSWER:", "Final answer:", "final answer:"]:
64
+ if llm_output.lower().startswith(prefix.lower()):
65
+ return llm_output[len(prefix):].strip()
66
+ return llm_output
67
+
68
+ def __call__(self, question: str, image_path: str = None, audio_path: str = None) -> str:
69
+ if image_path:
70
+ return self._process_image(image_path, question)
71
+ elif audio_path:
72
+ return self._process_audio(audio_path)
73
+ else:
74
+ return self._process_text(question)
75
+
76
+ # --- Gradio Leaderboard Submission App ---
77
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
78
 
79
  def run_and_submit_all(profile: gr.OAuthProfile | None):
80
+ space_id = os.getenv("SPACE_ID")
 
 
 
 
 
81
  if profile:
82
  username = f"{profile.username}"
83
  print(f"User logged in: {username}")
 
89
  questions_url = f"{api_url}/questions"
90
  submit_url = f"{api_url}/submit"
91
 
 
92
  try:
93
+ agent = GroqMultimodalAgent()
94
  except Exception as e:
95
  print(f"Error instantiating agent: {e}")
96
  return f"Error initializing agent: {e}", None
 
98
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
99
  print(agent_code)
100
 
 
101
  print(f"Fetching questions from: {questions_url}")
102
  try:
103
  response = requests.get(questions_url, timeout=15)
 
118
  print(f"An unexpected error occurred fetching questions: {e}")
119
  return f"An unexpected error occurred fetching questions: {e}", None
120
 
 
121
  results_log = []
122
  answers_payload = []
123
  print(f"Running agent on {len(questions_data)} questions...")
124
  for item in questions_data:
125
  task_id = item.get("task_id")
126
  question_text = item.get("question")
127
+ image_path = item.get("image_path", None)
128
+ audio_path = item.get("audio_path", None)
129
  if not task_id or question_text is None:
130
  print(f"Skipping item with missing task_id or question: {item}")
131
  continue
132
  try:
133
+ submitted_answer = agent(question_text, image_path=image_path, audio_path=audio_path)
134
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
135
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
136
  except Exception as e:
 
141
  print("Agent did not produce any answers to submit.")
142
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
143
 
 
144
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
145
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
146
  print(status_update)
147
 
 
148
  print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
149
  try:
150
  response = requests.post(submit_url, json=submission_data, timeout=60)
 
199
  ---
200
  **Disclaimers:**
201
  Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
202
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a separate action or even to answer the questions in async.
203
  """
204
  )
205