keenthinker commited on
Commit
46a1848
·
verified ·
1 Parent(s): c45d156

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -41
app.py CHANGED
@@ -3,54 +3,20 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
6
- from transformers import pipeline
7
- #from PIL import Image
8
- import requests
9
- import io
10
-
11
  # (Keep Constants as is)
12
  # --- Constants ---
13
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
14
 
15
  # --- Basic Agent Definition ---
16
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
17
- #class BasicAgent:
18
- # def __init__(self):
19
- # print("BasicAgent initialized.")
20
- # def __call__(self, question: str) -> str:
21
- # print(f"Agent received question (first 50 chars): {question[:50]}...")
22
- # fixed_answer = "This is a default answer."
23
- # print(f"Agent returning fixed answer: {fixed_answer}")
24
- # return fixed_answer
25
-
26
  class BasicAgent:
27
  def __init__(self):
28
- # Text model (can be improved to Mistral/Zephyr if used in API mode)
29
- self.text_model = pipeline("text2text-generation", model="google/flan-t5-large")
30
- # Image captioning model for questions with files
31
- self.image_model = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
32
-
33
- def run(self, question: str, context: dict = None) -> str:
34
- """
35
- :param question: A text question
36
- :param context: Optional dict containing 'file' (BytesIO) if present
37
- """
38
- try:
39
- prompt = f"Answer the following question concisely:\n{question}"
40
-
41
- # If file provided (e.g., image), extract caption and append to prompt
42
- #if context and "file" in context:
43
- # image_bytes = context["file"]
44
- # image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
45
- # image_caption = self.image_model(image)[0]["generated_text"]
46
- # prompt += f"\n\nThis image may help:\n{image_caption}"
47
-
48
- result = self.text_model(prompt, max_new_tokens=100, temperature=0.3)[0]["generated_text"]
49
- return result.strip()
50
-
51
- except Exception as e:
52
- print(f"[ERROR]: {e}")
53
- return "I'm unable to answer this question."
54
 
55
  def run_and_submit_all( profile: gr.OAuthProfile | None):
56
  """
@@ -113,7 +79,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
113
  print(f"Skipping item with missing task_id or question: {item}")
114
  continue
115
  try:
116
- submitted_answer = agent.run(question_text)
117
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
118
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
119
  except Exception as e:
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from CustomAgent import alfred
 
 
 
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
 
 
 
 
 
 
 
 
13
  class BasicAgent:
14
  def __init__(self):
15
+ print("BasicAgent initialized.")
16
+ def __call__(self, question: str) -> str:
17
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
18
+ response = alfred.invoke({"messages": [{"role": "user", "content": question}]})
19
+ return response['messages'][-1].content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  def run_and_submit_all( profile: gr.OAuthProfile | None):
22
  """
 
79
  print(f"Skipping item with missing task_id or question: {item}")
80
  continue
81
  try:
82
+ submitted_answer = agent(question_text)
83
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
84
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
85
  except Exception as e: