SushantGautam commited on
Commit
1ba23a3
Β·
1 Parent(s): be00b48

Add challenge evaluation support by patching submission file and processing predictions

Browse files
medvqa/competitions/gi-2025/task_1.py CHANGED
@@ -8,6 +8,7 @@ from datetime import datetime, timezone
8
  import shutil # Add this import
9
  import json
10
  from huggingface_hub import HfApi, grant_access
 
11
 
12
  HF_GATE_ACESSLIST = ["SushantGautam",
13
  "stevenah", "vlbthambawita"]
@@ -68,6 +69,29 @@ if os.path.isfile(os.path.join(snap_dir, "requirements.txt")):
68
  sp.run(["python", "-m", "pip", "install", "-q", "-r",
69
  f"{snap_dir}/requirements.txt"], cwd=snap_dir, check=True)
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  sp.run(["python", f"{snap_dir}/{submission_file}"],
72
  cwd=snap_dir, check=True)
73
  print(
@@ -113,3 +137,26 @@ else:
113
  print(result)
114
  print("Visit this URL to see the entry: πŸ‘‡")
115
  Client("SimulaMet/medvqa")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  import shutil # Add this import
9
  import json
10
  from huggingface_hub import HfApi, grant_access
11
+ import re
12
 
13
  HF_GATE_ACESSLIST = ["SushantGautam",
14
  "stevenah", "vlbthambawita"]
 
69
  sp.run(["python", "-m", "pip", "install", "-q", "-r",
70
  f"{snap_dir}/requirements.txt"], cwd=snap_dir, check=True)
71
 
72
+
73
+ if os.environ.get("_MEDVQA_CHALLENGE_EVALUATE_FLAG_", "FALSE") == "TRUE":
74
+ # Patch submission file for challenge evaluation
75
+ challenge_file = submission_file.replace(".py", "_challenge.py")
76
+ submission_path = os.path.join(snap_dir, submission_file)
77
+ challenge_path = os.path.join(snap_dir, challenge_file)
78
+ with open(submission_path, "r", encoding="utf-8") as f:
79
+ code = f.read()
80
+ # Use regex to match the line, ignoring whitespace
81
+ pattern = r'val_dataset\s*=\s*load_dataset\(\s*["\']SimulaMet/Kvasir-VQA-test["\']\s*,\s*split\s*=\s*["\']validation["\']\s*\)'
82
+ new_line = 'val_dataset = load_dataset("SimulaMet/Kvasir-VQA-private", split="test")'
83
+ if re.search(pattern, code):
84
+ code = re.sub(pattern, new_line, code)
85
+ with open(challenge_path, "w", encoding="utf-8") as f:
86
+ f.write(code)
87
+ submission_file = challenge_file
88
+ print(f"πŸ”„ Challenge file created at: {challenge_path}")
89
+ else:
90
+ print("⚠️ Challenge patch not applied: expected line not found in submission file.")
91
+ os.exit(
92
+ "Please check the submission file for compatibility with challenge evaluation.")
93
+
94
+
95
  sp.run(["python", f"{snap_dir}/{submission_file}"],
96
  cwd=snap_dir, check=True)
97
  print(
 
137
  print(result)
138
  print("Visit this URL to see the entry: πŸ‘‡")
139
  Client("SimulaMet/medvqa")
140
+
141
+
142
+ if os.environ.get("_MEDVQA_CHALLENGE_EVALUATE_FLAG_", "FALSE") == "TRUE":
143
+ src_json = os.path.join(snap_dir, "predictions_1.json")
144
+ if os.path.isfile(src_json):
145
+ with open(src_json, "r", encoding="utf-8") as f:
146
+ data = json.load(f)
147
+ # Remove 'debug' key if present
148
+ data.pop("debug", None)
149
+ # Rename 'public_scores' to 'challenge_scores' if present
150
+ if "public_scores" in data:
151
+ data["challenge_scores"] = data.pop("public_scores")
152
+ # Get Team_Name from submission_info
153
+ team_name = data.get("submission_info", {}).get(
154
+ "Team_Name", "unknown_team")
155
+ team_name_safe = re.sub(r'[^a-zA-Z0-9_\-]', '_', team_name)
156
+ out_json = os.path.join(os.getcwd(), f"task1_{team_name_safe}.json")
157
+ with open(out_json, "w", encoding="utf-8") as f:
158
+ json.dump(data, f, ensure_ascii=False, indent=2)
159
+ print(f"βœ… Copied and processed predictions to: {out_json}")
160
+ else:
161
+ print("❌ predictions_1.json not found in snapshot directory!")
162
+ # === End: Post-processing predictions_1.json ===