keenthinker commited on
Commit
3be31f7
·
verified ·
1 Parent(s): 5b6c2d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -6
app.py CHANGED
@@ -10,14 +10,43 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
 
 
 
 
 
 
 
 
13
  class BasicAgent:
14
  def __init__(self):
15
- print("BasicAgent initialized.")
16
- def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
 
10
 
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
+ #class BasicAgent:
14
+ # def __init__(self):
15
+ # print("BasicAgent initialized.")
16
+ # def __call__(self, question: str) -> str:
17
+ # print(f"Agent received question (first 50 chars): {question[:50]}...")
18
+ # fixed_answer = "This is a default answer."
19
+ # print(f"Agent returning fixed answer: {fixed_answer}")
20
+ # return fixed_answer
21
+
22
  class BasicAgent:
23
  def __init__(self):
24
+ # Text model (can be improved to Mistral/Zephyr if used in API mode)
25
+ self.text_model = pipeline("text2text-generation", model="google/flan-t5-large")
26
+ # Image captioning model for questions with files
27
+ self.image_model = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
28
+
29
+ def run(self, question: str, context: dict = None) -> str:
30
+ """
31
+ :param question: A text question
32
+ :param context: Optional dict containing 'file' (BytesIO) if present
33
+ """
34
+ try:
35
+ prompt = f"Answer the following question concisely:\n{question}"
36
+
37
+ # If file provided (e.g., image), extract caption and append to prompt
38
+ #if context and "file" in context:
39
+ # image_bytes = context["file"]
40
+ # image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
41
+ # image_caption = self.image_model(image)[0]["generated_text"]
42
+ # prompt += f"\n\nThis image may help:\n{image_caption}"
43
+
44
+ result = self.text_model(prompt, max_new_tokens=100, temperature=0.3)[0]["generated_text"]
45
+ return result.strip()
46
+
47
+ except Exception as e:
48
+ print(f"[ERROR]: {e}")
49
+ return "I'm unable to answer this question."
50
 
51
  def run_and_submit_all( profile: gr.OAuthProfile | None):
52
  """