SushantGautam commited on
Commit
8bd97dc
·
1 Parent(s): fe6255d

Refactor submission_task1.py to include metric calculations and enhance output data structure; update pyproject.toml for improved dependency management.

Browse files
medvqa/submission_samples/gi-2025/submission_task1.py CHANGED
@@ -5,13 +5,23 @@ import torch
5
  import json
6
  import time
7
  from tqdm import tqdm
 
 
 
 
 
 
 
 
 
 
8
 
9
  val_dataset = load_dataset("SimulaMet-HOST/Kvasir-VQA")['raw'].select(range(5))
10
  predictions = [] # List to store predictions
11
 
12
  gpu_name = torch.cuda.get_device_name(
13
  0) if torch.cuda.is_available() else "cpu"
14
- device = "CUDA" if torch.cuda.is_available() else "cpu"
15
 
16
 
17
  def get_mem(): return torch.cuda.memory_allocated(device) / \
@@ -34,10 +44,10 @@ SUBMISSION_INFO = {
34
  "Country": "Norway",
35
  "Notes_to_organizers": '''
36
  eg, We have finetund XXX model
37
- This is optional . .
38
- Used data augmentations . .
39
  Custom info about the model . .
40
- Any insights. .
41
  + Any informal things you like to share about this submission.
42
  '''
43
  }
@@ -88,14 +98,44 @@ for idx, ex in enumerate(tqdm(val_dataset, desc="Validating")):
88
  # Ensure all predictions match dataset length
89
  assert len(predictions) == len(
90
  val_dataset), "Mismatch between predictions and dataset length"
91
- # Saves predictions to a JSON file
92
  total_time, final_mem = round(
93
  time.time() - start_time, 4), round(get_mem() - post_model_mem, 2)
94
  model_mem_used = round(post_model_mem - initial_mem, 2)
95
 
96
- output_data = {"submission_info": SUBMISSION_INFO,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  "predictions": predictions, "total_time": total_time, "time_per_item": total_time / len(val_dataset),
98
- "memory_used_mb": final_mem, "model_memory_mb": model_mem_used, "gpu_name": gpu_name, }
 
 
 
 
 
 
 
 
99
 
100
 
101
  with open("predictions_1.json", "w") as f:
 
5
  import json
6
  import time
7
  from tqdm import tqdm
8
+ import subprocess
9
+ import platform
10
+ import sys
11
+
12
+ from evaluate import load
13
+
14
+ bleu = load("bleu")
15
+ rouge = load("rouge")
16
+ meteor = load("meteor")
17
+
18
 
19
  val_dataset = load_dataset("SimulaMet-HOST/Kvasir-VQA")['raw'].select(range(5))
20
  predictions = [] # List to store predictions
21
 
22
  gpu_name = torch.cuda.get_device_name(
23
  0) if torch.cuda.is_available() else "cpu"
24
+ device = "cuda" if torch.cuda.is_available() else "cpu"
25
 
26
 
27
  def get_mem(): return torch.cuda.memory_allocated(device) / \
 
44
  "Country": "Norway",
45
  "Notes_to_organizers": '''
46
  eg, We have finetund XXX model
47
+ This is optional . .
48
+ Used data augmentations . .
49
  Custom info about the model . .
50
+ Any insights. .
51
  + Any informal things you like to share about this submission.
52
  '''
53
  }
 
98
  # Ensure all predictions match dataset length
99
  assert len(predictions) == len(
100
  val_dataset), "Mismatch between predictions and dataset length"
101
+
102
  total_time, final_mem = round(
103
  time.time() - start_time, 4), round(get_mem() - post_model_mem, 2)
104
  model_mem_used = round(post_model_mem - initial_mem, 2)
105
 
106
+ # caulcualtes metrics
107
+ references = [[e] for e in val_dataset['answer']]
108
+ preds = [pred['answer'] for pred in predictions]
109
+
110
+ bleu_result = bleu.compute(predictions=preds, references=references)
111
+ rouge_result = rouge.compute(predictions=preds, references=references)
112
+ meteor_result = meteor.compute(predictions=preds, references=references)
113
+ bleu_score = bleu_result['bleu']
114
+ rouge1_score = float(rouge_result['rouge1'])
115
+ rouge2_score = float(rouge_result['rouge2'])
116
+ rougeL_score = float(rouge_result['rougeL'])
117
+ meteor_score = float(meteor_result['meteor'])
118
+ public_scores = {
119
+ 'bleu': bleu_score,
120
+ 'rouge1': rouge1_score,
121
+ 'rouge2': rouge2_score,
122
+ 'rougeL': rougeL_score,
123
+ 'meteor': meteor_score
124
+ }
125
+
126
+ # Saves predictions to a JSON file
127
+
128
+ output_data = {"submission_info": SUBMISSION_INFO, "public_scores": public_scores,
129
  "predictions": predictions, "total_time": total_time, "time_per_item": total_time / len(val_dataset),
130
+ "memory_used_mb": final_mem, "model_memory_mb": model_mem_used, "gpu_name": gpu_name,
131
+ "debug": {
132
+ "packages": json.loads(subprocess.check_output([sys.executable, "-m", "pip", "list", "--format=json"])),
133
+ "system": {
134
+ "python": platform.python_version(),
135
+ "os": platform.system(),
136
+ "platform": platform.platform(),
137
+ "arch": platform.machine()
138
+ }}}
139
 
140
 
141
  with open("predictions_1.json", "w") as f:
pyproject.toml CHANGED
@@ -1,29 +1,31 @@
1
  [project]
2
- name = "medvqa"
3
- version = "0.14.8"
4
- description = "Competition Submission CLI for ImageCLEFmed-MedVQA-GI-2025 (https://github.com/simula/ImageCLEFmed-MEDVQA-GI-2025)"
5
- readme = "README.md"
6
- requires-python = ">=3.6"
7
- dependencies = [
8
- "huggingface_hub",
9
- "huggingface_hub[hf_transfer]",
10
- "gradio_client==1.3.0"
11
- ]
12
  authors = [
13
- { name="Sushant Gautam", email="[email protected]" },
14
  ]
15
  classifiers = [
16
- "Programming Language :: Python :: 3",
17
- "Operating System :: OS Independent",
 
 
 
 
 
 
 
18
  ]
19
- license = { text = "MIT" }
 
 
 
 
 
20
 
21
  [project.scripts]
22
  medvqa = "medvqa.cli:main"
23
 
24
  [tool.setuptools.packages.find]
25
- where = ["."]
26
  include = ["*", "competitions/**/"]
 
27
 
28
  [project.urls]
29
  Homepage = "https://github.com/SushantGautam/MedVQA"
 
1
  [project]
 
 
 
 
 
 
 
 
 
 
2
  authors = [
3
+ {name = "Sushant Gautam", email = "[email protected]"},
4
  ]
5
  classifiers = [
6
+ "Programming Language :: Python :: 3",
7
+ "Operating System :: OS Independent",
8
+ ]
9
+ dependencies = [
10
+ "huggingface_hub",
11
+ "huggingface_hub[hf_transfer]",
12
+ "gradio_client==1.3.0",
13
+ "evaluate",
14
+ "rouge_score",
15
  ]
16
+ description = "Competition Submission CLI for ImageCLEFmed-MedVQA-GI-2025 (https://github.com/simula/ImageCLEFmed-MEDVQA-GI-2025)"
17
+ license = {text = "MIT"}
18
+ name = "medvqa"
19
+ readme = "README.md"
20
+ requires-python = ">=3.6"
21
+ version = "0.14.8"
22
 
23
  [project.scripts]
24
  medvqa = "medvqa.cli:main"
25
 
26
  [tool.setuptools.packages.find]
 
27
  include = ["*", "competitions/**/"]
28
+ where = ["."]
29
 
30
  [project.urls]
31
  Homepage = "https://github.com/SushantGautam/MedVQA"