SushantGautam commited on
Commit
e09eeb5
Β·
1 Parent(s): 1ba23a3

Add challenge evaluation support by patching submission file and processing predictions

Browse files
medvqa/competitions/gi-2025/task_2.py CHANGED
@@ -1,5 +1,5 @@
1
  from gradio_client import Client, handle_file
2
- from huggingface_hub import snapshot_download, login, whoami
3
  import argparse
4
  import os
5
  import subprocess as sp
@@ -8,6 +8,7 @@ from datetime import datetime, timezone
8
  import shutil # Add this import
9
  import json
10
  from huggingface_hub import HfApi, grant_access
 
11
 
12
  HF_GATE_ACESSLIST = ["SushantGautam",
13
  "stevenah", "vlbthambawita"]
@@ -69,6 +70,50 @@ if os.path.isfile(os.path.join(snap_dir, "requirements.txt")):
69
  sp.run(["python", "-m", "pip", "install", "-q", "-r",
70
  f"{snap_dir}/requirements.txt"], cwd=snap_dir, check=True)
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  print("πŸ” Starting your script and loading submission details...")
73
  sp.run(["python", f"{snap_dir}/{submission_file}"],
74
  cwd=snap_dir, check=True)
@@ -115,3 +160,25 @@ else:
115
  print(result)
116
  print("Visit this URL to see the entry: πŸ‘‡")
117
  Client("SimulaMet/medvqa")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from gradio_client import Client, handle_file
2
+ from huggingface_hub import snapshot_download, login, whoami, hf_hub_download
3
  import argparse
4
  import os
5
  import subprocess as sp
 
8
  import shutil # Add this import
9
  import json
10
  from huggingface_hub import HfApi, grant_access
11
+ import re
12
 
13
  HF_GATE_ACESSLIST = ["SushantGautam",
14
  "stevenah", "vlbthambawita"]
 
70
  sp.run(["python", "-m", "pip", "install", "-q", "-r",
71
  f"{snap_dir}/requirements.txt"], cwd=snap_dir, check=True)
72
 
73
+ if os.environ.get("_MEDVQA_CHALLENGE_EVALUATE_FLAG_", "FALSE") == "TRUE":
74
+ # Patch submission file for challenge evaluation
75
+ challenge_file = submission_file.replace(".py", "_challenge.py")
76
+ submission_path = os.path.join(snap_dir, submission_file)
77
+ challenge_path = os.path.join(snap_dir, challenge_file)
78
+ with open(submission_path, "r", encoding="utf-8") as f:
79
+ code = f.read()
80
+ # Replace only the dataset string
81
+ if "SimulaMet/Kvasir-VQA-test" in code:
82
+ code = code.replace("SimulaMet/Kvasir-VQA-test",
83
+ "SimulaMet/Kvasir-VQA-private")
84
+ # Comment out specific lines
85
+ lines = code.splitlines()
86
+ for i, line in enumerate(lines):
87
+ if ("huggingface.co/datasets" in line or
88
+ re.search(r'^\s*prompt_to_real\s*=', line) or
89
+ re.search(r'^\s*jsons__\s*=', line)):
90
+ if not line.lstrip().startswith("#"):
91
+ leading_ws = len(line) - len(line.lstrip())
92
+ lines[i] = line[:leading_ws] + "# " + line[leading_ws:]
93
+ # Insert new code block after 'import requests'
94
+ for i, line in enumerate(lines):
95
+ if "import requests" in line:
96
+ insert_idx = i + 1
97
+ break
98
+ else:
99
+ insert_idx = None
100
+ new_block = [
101
+ 'prompt_to_real = json.load(open(hf_hub_download("SimulaMet/Kvasir-VQA-private", "real_mapping", repo_type="dataset")))',
102
+ 'jsons__ = json.load(open(hf_hub_download("SimulaMet/Kvasir-VQA-private", "imagen-test", repo_type="dataset")))',
103
+ ]
104
+ if insert_idx is not None:
105
+ lines[insert_idx:insert_idx] = new_block
106
+ code = "\n".join(lines)
107
+ with open(challenge_path, "w", encoding="utf-8") as f:
108
+ f.write(code)
109
+ submission_file = challenge_file
110
+ print(f"πŸ”„ Challenge file created at: {challenge_path}")
111
+ else:
112
+ print(
113
+ "⚠️ Challenge patch not applied: expected string not found in submission file.")
114
+ os.exit(
115
+ "Please check the submission file for compatibility with challenge evaluation.")
116
+
117
  print("πŸ” Starting your script and loading submission details...")
118
  sp.run(["python", f"{snap_dir}/{submission_file}"],
119
  cwd=snap_dir, check=True)
 
160
  print(result)
161
  print("Visit this URL to see the entry: πŸ‘‡")
162
  Client("SimulaMet/medvqa")
163
+
164
+ if os.environ.get("_MEDVQA_CHALLENGE_EVALUATE_FLAG_", "FALSE") == "TRUE":
165
+ src_json = os.path.join(snap_dir, "predictions_2.json")
166
+ if os.path.isfile(src_json):
167
+ with open(src_json, "r", encoding="utf-8") as f:
168
+ data = json.load(f)
169
+ # Remove 'debug' key if present
170
+ data.pop("debug", None)
171
+ # Rename 'public_scores' to 'challenge_scores' if present
172
+ if "public_scores" in data:
173
+ data["challenge_scores"] = data.pop("public_scores")
174
+ # Get Team_Name from submission_info
175
+ team_name = data.get("submission_info", {}).get(
176
+ "Team_Name", "unknown_team")
177
+ team_name_safe = re.sub(r'[^a-zA-Z0-9_\-]', '_', team_name)
178
+ out_json = os.path.join(os.getcwd(), f"task2_{team_name_safe}.json")
179
+ with open(out_json, "w", encoding="utf-8") as f:
180
+ json.dump(data, f, ensure_ascii=False, indent=2)
181
+ print(f"βœ… Copied and processed predictions to: {out_json}")
182
+ else:
183
+ print("❌ predictions_1.json not found in snapshot directory!")
184
+ # === End: Post-processing predictions_1.json ===