Spaces:
Runtime error
Runtime error
import os | |
import gradio as gr | |
import requests | |
import inspect | |
import pandas as pd | |
from typing import Any | |
# (Keep Constants as is) | |
# --- Constants --- | |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
# --- Advanced Modular Agent Implementation --- | |
import json | |
import logging | |
import mimetypes | |
import openpyxl | |
import numpy as np | |
from datetime import datetime | |
from io import BytesIO | |
from PIL import Image | |
import subprocess | |
import tempfile | |
from huggingface_hub import InferenceClient | |
import cv2 | |
import torch | |
from bs4 import BeautifulSoup | |
import openai | |
import magic # for robust file type detection | |
logging.basicConfig(filename='gaia_agent.log', level=logging.INFO, format='%(asctime)s %(levelname)s:%(message)s') | |
logger = logging.getLogger(__name__) | |
HF_TOKEN = os.environ.get("HF_TOKEN", "") | |
def llama3_chat(prompt): | |
try: | |
client = InferenceClient(provider="fireworks-ai", api_key=HF_TOKEN) | |
completion = client.chat.completions.create( | |
model="meta-llama/Llama-3.1-8B-Instruct", | |
messages=[{"role": "user", "content": prompt}], | |
) | |
return completion.choices[0].message.content | |
except Exception as e: | |
logging.error(f"llama3_chat error: {e}") | |
return f"LLM error: {e}" | |
def mixtral_chat(prompt): | |
try: | |
client = InferenceClient(provider="hf-inference", api_key=HF_TOKEN) | |
completion = client.chat.completions.create( | |
model="mistralai/Mixtral-8x7B-Instruct-v0.1", | |
messages=[{"role": "user", "content": prompt}], | |
) | |
return completion.choices[0].message.content | |
except Exception as e: | |
logging.error(f"mixtral_chat error: {e}") | |
return f"LLM error: {e}" | |
def extractive_qa(question, context): | |
try: | |
client = InferenceClient(provider="hf-inference", api_key=HF_TOKEN) | |
answer = client.question_answering( | |
question=question, | |
context=context, | |
model="deepset/roberta-base-squad2", | |
) | |
return answer["answer"] | |
except Exception as e: | |
logging.error(f"extractive_qa error: {e}") | |
return f"QA error: {e}" | |
def table_qa(query, table): | |
try: | |
client = InferenceClient(provider="hf-inference", api_key=HF_TOKEN) | |
answer = client.table_question_answering( | |
query=query, | |
table=table, | |
model="google/tapas-large-finetuned-wtq", | |
) | |
return answer["answer"] | |
except Exception as e: | |
logging.error(f"table_qa error: {e}") | |
return f"Table QA error: {e}" | |
def asr_transcribe(audio_path): | |
try: | |
import torchaudio | |
from transformers import pipeline | |
asr = pipeline("automatic-speech-recognition", model="openai/whisper-base.en") | |
result = asr(audio_path) | |
return result["text"] | |
except Exception as e: | |
logging.error(f"asr_transcribe error: {e}") | |
return f"ASR error: {e}" | |
def image_caption(image_path): | |
try: | |
from transformers import BlipProcessor, BlipForConditionalGeneration | |
from PIL import Image | |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") | |
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") | |
raw_image = Image.open(image_path).convert('RGB') | |
inputs = processor(raw_image, return_tensors="pt") | |
out = model.generate(**inputs) | |
return processor.decode(out[0], skip_special_tokens=True) | |
except Exception as e: | |
logging.error(f"image_caption error: {e}") | |
return f"Image captioning error: {e}" | |
def code_analysis(py_path): | |
try: | |
with open(py_path) as f: | |
code = f.read() | |
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as tmp: | |
tmp.write(code) | |
tmp_path = tmp.name | |
try: | |
result = subprocess.run([ | |
"python3", tmp_path | |
], capture_output=True, text=True, timeout=5) | |
if result.returncode == 0: | |
output = result.stdout.strip().split('\n') | |
return output[-1] if output else '' | |
else: | |
logging.error(f"code_analysis subprocess error: {result.stderr}") | |
return f"Code error: {result.stderr}" | |
except subprocess.TimeoutExpired: | |
logging.error("code_analysis timeout") | |
return "Code execution timed out" | |
finally: | |
os.remove(tmp_path) | |
except Exception as e: | |
logging.error(f"code_analysis error: {e}") | |
return f"Code analysis error: {e}" | |
def youtube_video_qa(youtube_url, question): | |
import subprocess | |
import tempfile | |
import os | |
from transformers import pipeline | |
try: | |
with tempfile.TemporaryDirectory() as tmpdir: | |
# Download video | |
video_path = os.path.join(tmpdir, "video.mp4") | |
cmd = ["yt-dlp", "-f", "mp4", "-o", video_path, youtube_url] | |
subprocess.run(cmd, check=True) | |
# Extract audio for ASR | |
audio_path = os.path.join(tmpdir, "audio.mp3") | |
cmd_audio = ["yt-dlp", "-f", "bestaudio", "--extract-audio", "--audio-format", "mp3", "-o", audio_path, youtube_url] | |
subprocess.run(cmd_audio, check=True) | |
# Transcribe audio | |
asr = pipeline("automatic-speech-recognition", model="openai/whisper-base.en") | |
result = asr(audio_path) | |
transcript = result["text"] | |
# Extract frames for vision QA | |
cap = cv2.VideoCapture(video_path) | |
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
fps = int(cap.get(cv2.CAP_PROP_FPS)) | |
frames = [] | |
for i in range(0, frame_count, max(1, fps*5)): | |
cap.set(cv2.CAP_PROP_POS_FRAMES, i) | |
ret, frame = cap.read() | |
if not ret: | |
break | |
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) | |
frames.append(img) | |
cap.release() | |
# Object detection (YOLOv8) | |
try: | |
from ultralytics import YOLO | |
yolo = YOLO("yolov8n.pt") | |
detections = [] | |
for img in frames: | |
results = yolo(np.array(img)) | |
for r in results: | |
for c in r.boxes.cls: | |
detections.append(yolo.model.names[int(c)]) | |
detection_summary = {} | |
for obj in detections: | |
detection_summary[obj] = detection_summary.get(obj, 0) + 1 | |
except Exception as e: | |
logging.error(f"YOLOv8 error: {e}") | |
detection_summary = {} | |
# Image captioning (BLIP) | |
try: | |
from transformers import BlipProcessor, BlipForConditionalGeneration | |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") | |
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") | |
captions = [] | |
for img in frames: | |
inputs = processor(img, return_tensors="pt") | |
out = model.generate(**inputs) | |
captions.append(processor.decode(out[0], skip_special_tokens=True)) | |
except Exception as e: | |
logging.error(f"BLIP error: {e}") | |
captions = [] | |
context = f"Transcript: {transcript}\nCaptions: {' | '.join(captions)}\nDetections: {detection_summary}" | |
answer = extractive_qa(question, context) | |
return answer | |
except Exception as e: | |
logging.error(f"YouTube video QA error: {e}") | |
return f"Video analysis error: {e}" | |
def web_search_duckduckgo(query, max_results=5): | |
"""DuckDuckGo web search tool: returns top snippets and URLs.""" | |
try: | |
import duckduckgo_search | |
results = duckduckgo_search.DuckDuckGoSearch().search(query, max_results=max_results) | |
snippets = [] | |
for r in results: | |
snippet = f"Title: {r['title']}\nSnippet: {r['body']}\nURL: {r['href']}" | |
snippets.append(snippet) | |
return '\n---\n'.join(snippets) | |
except Exception as e: | |
logging.error(f"web_search_duckduckgo error: {e}") | |
return f"Web search error: {e}" | |
def gpt4_chat(prompt, api_key=None): | |
"""OpenAI GPT-4.1 chat completion.""" | |
try: | |
api_key = api_key or os.environ.get("OPENAI_API_KEY", "") | |
if not api_key: | |
return "No OpenAI API key provided." | |
response = openai.ChatCompletion.create( | |
model="gpt-4-1106-preview", | |
messages=[{"role": "system", "content": "You are a general AI assistant. Answer using as few words as possible, in the required format. Use tools as needed, and only output the answer."}, | |
{"role": "user", "content": prompt}], | |
api_key=api_key, | |
) | |
return response.choices[0].message['content'].strip() | |
except Exception as e: | |
logging.error(f"gpt4_chat error: {e}") | |
return f"GPT-4 error: {e}" | |
TOOL_REGISTRY = { | |
"llama3_chat": llama3_chat, | |
"mixtral_chat": mixtral_chat, | |
"extractive_qa": extractive_qa, | |
"table_qa": table_qa, | |
"asr_transcribe": asr_transcribe, | |
"image_caption": image_caption, | |
"code_analysis": code_analysis, | |
"youtube_video_qa": youtube_video_qa, | |
"web_search_duckduckgo": web_search_duckduckgo, | |
"gpt4_chat": gpt4_chat, | |
} | |
# --- Utility: Robust file type detection --- | |
def detect_file_type_magic(file_name): | |
try: | |
mime = magic.Magic(mime=True) | |
filetype = mime.from_file(file_name) | |
if 'audio' in filetype: | |
return 'audio' | |
elif 'image' in filetype: | |
return 'image' | |
elif 'python' in filetype or file_name.endswith('.py'): | |
return 'code' | |
elif 'spreadsheet' in filetype or file_name.endswith('.xlsx'): | |
return 'excel' | |
elif 'csv' in filetype or file_name.endswith('.csv'): | |
return 'csv' | |
elif 'json' in filetype or file_name.endswith('.json'): | |
return 'json' | |
elif 'text' in filetype or file_name.endswith(('.txt', '.md')): | |
return 'text' | |
else: | |
return 'unknown' | |
except Exception as e: | |
logger.error(f"magic file type detection error: {e}") | |
return 'unknown' | |
# --- Improved prompt template for LLMs --- | |
def build_prompt(context, question): | |
return f""" | |
Context: | |
{context} | |
Question: | |
{question} | |
Answer: | |
""" | |
# --- Refactored ModularGAIAAgent --- | |
class ModularGAIAAgent: | |
def __init__(self, api_url=DEFAULT_API_URL, tool_registry=None): | |
self.api_url = api_url | |
self.tools = tool_registry or TOOL_REGISTRY | |
self.reasoning_trace = [] | |
self.file_cache = set(os.listdir('.')) | |
def fetch_questions(self, from_api=True, questions_path="Hugging Face Questions"): | |
"""Fetch questions from API or local file.""" | |
try: | |
if from_api: | |
r = requests.get(f"{self.api_url}/questions") | |
r.raise_for_status() | |
return r.json() | |
else: | |
with open(questions_path) as f: | |
data = f.read() | |
start = data.find("[") | |
end = data.rfind("]") + 1 | |
questions = json.loads(data[start:end]) | |
return questions | |
except Exception as e: | |
logger.error(f"fetch_questions error: {e}") | |
return [] | |
def download_file(self, file_id, file_name=None): | |
"""Download file if not present locally.""" | |
try: | |
if not file_name: | |
file_name = file_id | |
if file_name in self.file_cache: | |
return file_name | |
url = f"{self.api_url}/files/{file_id}" | |
r = requests.get(url) | |
if r.status_code == 200: | |
with open(file_name, "wb") as f: | |
f.write(r.content) | |
self.file_cache.add(file_name) | |
return file_name | |
else: | |
self.reasoning_trace.append(f"Failed to download file {file_id} (status {r.status_code})") | |
logger.error(f"Failed to download file {file_id} (status {r.status_code})") | |
return None | |
except Exception as e: | |
logger.error(f"download_file error: {e}") | |
self.reasoning_trace.append(f"Download error: {e}") | |
return None | |
def detect_file_type(self, file_name): | |
"""Detect file type using magic and extension as fallback.""" | |
file_type = detect_file_type_magic(file_name) | |
if file_type == 'unknown': | |
ext = os.path.splitext(file_name)[-1].lower() | |
if ext in ['.mp3', '.wav', '.flac']: | |
return 'audio' | |
elif ext in ['.png', '.jpg', '.jpeg', '.bmp']: | |
return 'image' | |
elif ext in ['.py']: | |
return 'code' | |
elif ext in ['.xlsx']: | |
return 'excel' | |
elif ext in ['.csv']: | |
return 'csv' | |
elif ext in ['.json']: | |
return 'json' | |
elif ext in ['.txt', '.md']: | |
return 'text' | |
else: | |
return 'unknown' | |
return file_type | |
def analyze_file(self, file_name, file_type): | |
"""Analyze file and return context for the question.""" | |
try: | |
if file_type == 'audio': | |
transcript = self.tools['asr_transcribe'](file_name) | |
self.reasoning_trace.append(f"Transcribed audio: {transcript[:100]}...") | |
return transcript | |
elif file_type == 'image': | |
caption = self.tools['image_caption'](file_name) | |
self.reasoning_trace.append(f"Image caption: {caption}") | |
return caption | |
elif file_type == 'code': | |
result = self.tools['code_analysis'](file_name) | |
self.reasoning_trace.append(f"Code analysis result: {result}") | |
return result | |
elif file_type == 'excel': | |
wb = openpyxl.load_workbook(file_name) | |
ws = wb.active | |
data = list(ws.values) | |
headers = data[0] | |
table = [dict(zip(headers, row)) for row in data[1:]] | |
self.reasoning_trace.append(f"Excel table loaded: {table[:2]}...") | |
return table | |
elif file_type == 'csv': | |
df = pd.read_csv(file_name) | |
table = df.to_dict(orient='records') | |
self.reasoning_trace.append(f"CSV table loaded: {table[:2]}...") | |
return table | |
elif file_type == 'json': | |
with open(file_name) as f: | |
data = json.load(f) | |
self.reasoning_trace.append(f"JSON loaded: {str(data)[:100]}...") | |
return data | |
elif file_type == 'text': | |
with open(file_name) as f: | |
text = f.read() | |
self.reasoning_trace.append(f"Text loaded: {text[:100]}...") | |
return text | |
else: | |
self.reasoning_trace.append(f"Unknown file type: {file_name}") | |
logger.warning(f"Unknown file type: {file_name}") | |
return None | |
except Exception as e: | |
logger.error(f"analyze_file error: {e}") | |
self.reasoning_trace.append(f"Analyze file error: {e}") | |
return None | |
def smart_tool_select(self, question, file_type=None): | |
"""Select the best tool(s) for the question, optionally using GPT-4.1 for planning.""" | |
api_key = os.environ.get("OPENAI_API_KEY", "") | |
try: | |
if api_key: | |
plan_prompt = f""" | |
You are an expert AI agent. Given the following question and file type, suggest the best tool(s) to use from this list: {list(self.tools.keys())}. | |
Question: {question} | |
File type: {file_type} | |
Respond with a comma-separated list of tool names only, in order of use. If unsure, start with web_search_duckduckgo. | |
""" | |
plan = gpt4_chat(plan_prompt, api_key=api_key) | |
tool_names = [t.strip() for t in plan.split(',') if t.strip() in self.tools] | |
if tool_names: | |
return tool_names | |
except Exception as e: | |
logger.error(f"smart_tool_select planning error: {e}") | |
# Fallback: heuristic | |
if file_type == 'audio': | |
return ['asr_transcribe'] | |
elif file_type == 'image': | |
return ['image_caption'] | |
elif file_type == 'code': | |
return ['code_analysis'] | |
elif file_type in ['excel', 'csv']: | |
return ['table_qa'] | |
elif 'youtube.com' in question or 'youtu.be' in question: | |
return ['youtube_video_qa'] | |
elif any(w in question.lower() for w in ['wikipedia', 'who', 'when', 'where', 'what', 'how', 'find', 'search']): | |
return ['web_search_duckduckgo'] | |
else: | |
return ['llama3_chat'] | |
def answer_question(self, question_obj): | |
"""Answer a question using the best tool(s) and context.""" | |
self.reasoning_trace = [] | |
q = question_obj["question"] | |
file_name = question_obj.get("file_name", "") | |
file_content = None | |
file_type = None | |
if file_name: | |
file_id = file_name.split('.')[0] | |
local_file = self.download_file(file_id, file_name) | |
if local_file: | |
file_type = self.detect_file_type(local_file) | |
file_content = self.analyze_file(local_file, file_type) | |
# Smart tool selection | |
tool_names = self.smart_tool_select(q, file_type) | |
answer = None | |
context = file_content | |
for tool_name in tool_names: | |
tool = self.tools[tool_name] | |
try: | |
logger.info(f"Using tool: {tool_name} | Question: {q} | Context: {str(context)[:200]}") | |
if tool_name == 'web_search_duckduckgo': | |
context = tool(q) | |
answer = llama3_chat(build_prompt(context, q)) | |
elif tool_name == 'gpt4_chat': | |
answer = tool(build_prompt(context, q)) | |
elif tool_name == 'table_qa' and file_content: | |
answer = tool(q, file_content) | |
elif tool_name in ['asr_transcribe', 'image_caption', 'code_analysis'] and file_content: | |
answer = tool(file_name) | |
elif tool_name == 'youtube_video_qa': | |
answer = tool(q, q) | |
else: | |
# Always pass context if available | |
if context: | |
answer = llama3_chat(build_prompt(context, q)) | |
else: | |
answer = tool(q) | |
if answer: | |
break | |
except Exception as e: | |
logger.error(f"Tool {tool_name} error: {e}") | |
self.reasoning_trace.append(f"Tool {tool_name} error: {e}") | |
continue | |
self.reasoning_trace.append(f"Tools used: {tool_names}") | |
self.reasoning_trace.append(f"Final answer: {answer}") | |
return self.format_answer(answer), self.reasoning_trace | |
def format_answer(self, answer): | |
"""Strict GAIA: only the answer, no extra text, no prefix.""" | |
if isinstance(answer, str): | |
return answer.strip().split('\n')[0] | |
return str(answer) | |
# --- Basic Agent Definition (now wraps ModularGAIAAgent) --- | |
class BasicAgent: | |
def __init__(self): | |
print("BasicAgent (GAIA Modular Agent) initialized.") | |
self.agent = ModularGAIAAgent() | |
def __call__(self, question: str, file_name: str = "") -> str: | |
print(f"Agent received question (first 50 chars): {question[:50]}...") | |
try: | |
answer, trace = self.agent.answer_question({"task_id": "manual", "question": question, "file_name": file_name}) | |
print(f"Agent returning answer: {answer}") | |
return answer | |
except Exception as e: | |
print(f"Agent error: {e}") | |
return f"AGENT ERROR: {e}" | |
def run_and_submit_all(profile: gr.OAuthProfile | None): | |
""" | |
Fetches all questions, runs the BasicAgent on them, submits all answers, | |
and displays the results. | |
""" | |
space_id = os.getenv("SPACE_ID") | |
if profile: | |
username = f"{profile.username}" | |
print(f"User logged in: {username}") | |
else: | |
print("User not logged in.") | |
return "Please Login to Hugging Face with the button.", None | |
api_url = DEFAULT_API_URL | |
questions_url = f"{api_url}/questions" | |
submit_url = f"{api_url}/submit" | |
try: | |
agent = BasicAgent() | |
except Exception as e: | |
print(f"Error instantiating agent: {e}") | |
return f"Error initializing agent: {e}", None | |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
print(agent_code) | |
print(f"Fetching questions from: {questions_url}") | |
try: | |
response = requests.get(questions_url, timeout=15) | |
response.raise_for_status() | |
questions_data = response.json() | |
if not questions_data: | |
print("Fetched questions list is empty.") | |
return "Fetched questions list is empty or invalid format.", None | |
print(f"Fetched {len(questions_data)} questions.") | |
except requests.exceptions.RequestException as e: | |
print(f"Error fetching questions: {e}") | |
return f"Error fetching questions: {e}", None | |
except requests.exceptions.JSONDecodeError as e: | |
print(f"Error decoding JSON response from questions endpoint: {e}") | |
print(f"Response text: {response.text[:500]}") | |
return f"Error decoding server response for questions: {e}", None | |
except Exception as e: | |
print(f"An unexpected error occurred fetching questions: {e}") | |
return f"An unexpected error occurred fetching questions: {e}", None | |
results_log = [] | |
answers_payload = [] | |
print(f"Running agent on {len(questions_data)} questions...") | |
for item in questions_data: | |
task_id = item.get("task_id") | |
question_text = item.get("question") | |
file_name = item.get("file_name", "") | |
if not task_id or question_text is None: | |
print(f"Skipping item with missing task_id or question: {item}") | |
continue | |
try: | |
submitted_answer = agent(question_text, file_name) | |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) | |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) | |
except Exception as e: | |
print(f"Error running agent on task {task_id}: {e}") | |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"}) | |
if not answers_payload: | |
print("Agent did not produce any answers to submit.") | |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) | |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload} | |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..." | |
print(status_update) | |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}") | |
try: | |
response = requests.post(submit_url, json=submission_data, timeout=60) | |
response.raise_for_status() | |
result_data = response.json() | |
final_status = ( | |
f"Submission Successful!\n" | |
f"User: {result_data.get('username')}\n" | |
f"Overall Score: {result_data.get('score', 'N/A')}% " | |
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
f"Message: {result_data.get('message', 'No message received.')}") | |
print("Submission successful.") | |
results_df = pd.DataFrame(results_log) | |
return final_status, results_df | |
except requests.exceptions.HTTPError as e: | |
error_detail = f"Server responded with status {e.response.status_code}." | |
try: | |
error_json = e.response.json() | |
error_detail += f" Detail: {error_json.get('detail', e.response.text)}" | |
except requests.exceptions.JSONDecodeError: | |
error_detail += f" Response: {e.response.text[:500]}" | |
status_message = f"Submission Failed: {error_detail}" | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
return status_message, results_df | |
except requests.exceptions.Timeout: | |
status_message = "Submission Failed: The request timed out." | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
return status_message, results_df | |
except requests.exceptions.RequestException as e: | |
status_message = f"Submission Failed: Network error - {e}" | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
return status_message, results_df | |
except Exception as e: | |
status_message = f"An unexpected error occurred during submission: {e}" | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
return status_message, results_df | |
# --- Build Gradio Interface using Blocks --- | |
with gr.Blocks() as demo: | |
gr.Markdown("# Basic Agent Evaluation Runner") | |
gr.Markdown( | |
""" | |
**Instructions:** | |
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ... | |
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission. | |
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score. | |
--- | |
**Disclaimers:** | |
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions). | |
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async. | |
""" | |
) | |
gr.LoginButton() | |
run_button = gr.Button("Run Evaluation & Submit All Answers") | |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) | |
# Removed max_rows=10 from DataFrame constructor | |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) | |
run_button.click( | |
fn=run_and_submit_all, | |
outputs=[status_output, results_table] | |
) | |
if __name__ == "__main__": | |
print("\n" + "-"*30 + " App Starting " + "-"*30) | |
# Check for SPACE_HOST and SPACE_ID at startup for information | |
space_host_startup = os.getenv("SPACE_HOST") | |
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup | |
if space_host_startup: | |
print(f"✅ SPACE_HOST found: {space_host_startup}") | |
print(f" Runtime URL should be: https://{space_host_startup}.hf.space") | |
else: | |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).") | |
if space_id_startup: # Print repo URLs if SPACE_ID is found | |
print(f"✅ SPACE_ID found: {space_id_startup}") | |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}") | |
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main") | |
else: | |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.") | |
print("-"*(60 + len(" App Starting ")) + "\n") | |
print("Launching Gradio Interface for Basic Agent Evaluation...") | |
demo.launch(debug=True, share=False) |