SushantGautam commited on
Commit
ba8c7e3
Β·
1 Parent(s): 1507e7a

add submission sample

Browse files
medvqa/submission_samples/gi-2025/submission_task1.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM
2
+ from datasets import load_dataset
3
+ from transformers import AutoProcessor
4
+ import torch
5
+ import json
6
+ import time
7
+ from tqdm import tqdm
8
+
9
+ val_dataset = load_dataset("SimulaMet-HOST/Kvasir-VQA")['raw'].select(range(5))
10
+ predictions = [] # List to store predictions
11
+
12
+ gpu_name = torch.cuda.get_device_name(
13
+ 0) if torch.cuda.is_available() else "cpu"
14
+ device = "CUDA" if torch.cuda.is_available() else "cpu"
15
+
16
+
17
+ def get_mem(): return torch.cuda.memory_allocated(device) / \
18
+ (1024 ** 2) if torch.cuda.is_available() else 0
19
+
20
+
21
+ initial_mem = get_mem()
22
+
23
+ # ✏️✏️--------EDIT SECTION 1: SUBMISISON DETAILS and MODEL LOADING --------✏️✏️#
24
+
25
+ SUBMISSION_INFO = {
26
+ # πŸ”Ή TODO: PARTICIPANTS MUST ADD PROPER SUBMISSION INFO FOR THE SUBMISSION πŸ”Ή
27
+ # This will be visible to the organizers
28
+ # DONT change the keys, only add your info
29
+ "Participant_Names": "Sushant Gautam, Steven Hicks and Vajita Thambawita",
30
+ "Affiliations": "SimulaMet",
31
+ "Contact_emails": ["[email protected]", "[email protected]"],
32
+ # But, the first email only will be used for correspondance
33
+ "Team_Name": "SimulaMetmedVQA Rangers",
34
+ "Country": "Norway",
35
+ "Notes_to_organizers": '''
36
+ eg, We have finetund XXX model
37
+ This is optional . .
38
+ Used data augmentations . .
39
+ Custom info about the model . .
40
+ Any insights. .
41
+ + Any informal things you like to share about this submission.
42
+ '''
43
+ }
44
+ # πŸ”Ή TODO: PARTICIPANTS MUST LOAD THEIR MODEL HERE, EDIT AS NECESSARY FOR YOUR MODEL πŸ”Ή
45
+ # can add necessary library imports here
46
+
47
+ model_hf = AutoModelForCausalLM.from_pretrained(
48
+ "SushantGautam/Florence-2-vqa-demo", trust_remote_code=True).to(device)
49
+ processor = AutoProcessor.from_pretrained(
50
+ "microsoft/Florence-2-base-ft", trust_remote_code=True)
51
+
52
+ model_hf.eval() # Ensure model is in evaluation mode
53
+ # 🏁----------------END SUBMISISON DETAILS and MODEL LOADING -----------------🏁#
54
+
55
+ start_time, post_model_mem = time.time(), get_mem()
56
+ total_time, final_mem = round(
57
+ time.time() - start_time, 4), round(get_mem() - post_model_mem, 2)
58
+ model_mem_used = round(post_model_mem - initial_mem, 2)
59
+
60
+ for idx, ex in enumerate(tqdm(val_dataset, desc="Validating")):
61
+ question = ex["question"]
62
+ image = ex["image"].convert(
63
+ "RGB") if ex["image"].mode != "RGB" else ex["image"]
64
+ # you have access to 'question' and 'image' variables for each example
65
+
66
+ # ✏️✏️___________EDIT SECTION 2: ANSWER GENERATION___________✏️✏️#
67
+ # πŸ”Ή TODO: PARTICIPANTS CAN MODIFY THIS TOKENIZATION STEP IF NEEDED πŸ”Ή
68
+ inputs = processor(text=[question], images=[image],
69
+ return_tensors="pt", padding=True)
70
+ inputs = {k: v.to(device) for k, v in inputs.items()
71
+ if k not in ['labels', 'attention_mask']}
72
+
73
+ # πŸ”Ή TODO: PARTICIPANTS CAN MODIFY THE GENERATION AND DECODING METHOD HERE πŸ”Ή
74
+ with torch.no_grad():
75
+ output = model_hf.generate(**inputs)
76
+ answer = processor.tokenizer.decode(output[0], skip_special_tokens=True)
77
+ # make sure 'answer' variable will hold answer (sentence/word) as str
78
+ # 🏁________________ END ANSWER GENERATION ________________🏁#
79
+
80
+ # β›” DO NOT EDIT any lines below from here, can edit only upto decoding step above as required. β›”
81
+ # Ensures answer is a string
82
+ assert isinstance(
83
+ answer, str), f"Generated answer at index {idx} is not a string"
84
+ # Appends prediction
85
+ predictions.append(
86
+ {"index": idx, "img_id": ex["img_id"], "question": ex["question"], "answer": answer})
87
+
88
+ # Ensure all predictions match dataset length
89
+ assert len(predictions) == len(
90
+ val_dataset), "Mismatch between predictions and dataset length"
91
+ # Saves predictions to a JSON file
92
+ total_time, final_mem = round(
93
+ time.time() - start_time, 4), round(get_mem() - post_model_mem, 2)
94
+ model_mem_used = round(post_model_mem - initial_mem, 2)
95
+
96
+ output_data = {"submission_info": SUBMISSION_INFO,
97
+ "predictions": predictions, "total_time": total_time, "time_per_item": total_time / len(val_dataset),
98
+ "memory_used_mb": final_mem, "model_memory_mb": model_mem_used, "gpu_name": gpu_name, }
99
+
100
+
101
+ with open("predictions_1.json", "w") as f:
102
+ json.dump(output_data, f, indent=4)
103
+ print(f"Time: {total_time}s | Mem: {final_mem}MB | Model Load Mem: {model_mem_used}MB | GPU: {gpu_name}")
104
+ print("βœ… Scripts Looks Good! Generation process completed successfully. Results saved to 'predictions_1.json'.")
105
+ print("Next Step:\n 1) Upload this submission_task1.py script file to HuggingFace model repository.")
106
+ print('''\n 2) Make a submission to the competition:\n Run:: medvqa validate_and_submit --competition=gi-2025 --task=1 --repo_id=...''')