harpreetsahota commited on
Commit
a563046
·
verified ·
1 Parent(s): bdcc619

Update script.py

Browse files
Files changed (1) hide show
  1. script.py +0 -70
script.py CHANGED
@@ -93,76 +93,6 @@ def train_model(training_dataset, training_config):
93
  return best_model
94
 
95
 
96
- def run_inference_on_eval_set(eval_dataset, best_model):
97
- """
98
- Run inference on the evaluation set using the best trained model.
99
-
100
- Args:
101
- eval_dataset (fiftyone.core.dataset.Dataset): The evaluation dataset.
102
- best_model (YOLO): The best trained YOLO model.
103
-
104
- Returns:
105
- The dataset eval_dataset with predictions
106
- """
107
- eval_dataset.apply_model(best_model, label_field="predictions")
108
- eval_dataset.save()
109
- return eval_dataset
110
-
111
-
112
- def eval_model(dataset_to_evaluate):
113
- """
114
- Evaluate the model on the evaluation dataset.
115
-
116
- Args:
117
- dataset_to_evaluate (fiftyone.core.dataset.Dataset): The evaluation dataset.
118
-
119
- Returns:
120
- the mean average precision (mAP) of the model on the evaluation dataset.
121
- """
122
- current_datetime = datetime.now().strftime("%Y%m%d_%H%M%S")
123
-
124
- detection_results = dataset_to_evaluate.evaluate_detections(
125
- gt_field="ground_truth",
126
- pred_field="predictions",
127
- eval_key=f"evalrun_{current_datetime}",
128
- compute_mAP=True,
129
- )
130
-
131
- return detection_results.mAP()
132
-
133
- def run():
134
- """
135
- Main function to run the entire training and evaluation process.
136
-
137
- Returns:
138
- None
139
- """
140
- os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
141
- script_dir = os.path.dirname(os.path.abspath(__file__))
142
- config_path = os.path.join(script_dir, 'training_config.yaml')
143
- with open(config_path, 'r') as file:
144
- training_config = yaml.safe_load(file)
145
-
146
- #train set
147
- curated_train_dataset = prepare_dataset(name="Voxel51/Data-Centric-Visual-AI-Challenge-Train-Set")
148
-
149
- #public eval set
150
- public_eval_dataset = fouh.load_from_hub("Voxel51/DCVAI-Challenge-Public-Eval-Set")
151
-
152
- N = len(curated_train_dataset)
153
-
154
- best_trained_model = train_model(training_dataset=curated_train_dataset, training_config=training_config)
155
-
156
- model_predictions = run_inference_on_eval_set(eval_dataset=public_eval_dataset, best_model=best_trained_model)
157
-
158
- mAP_on_public_eval_set = eval_model(dataset_to_evaluate=model_predictions)
159
-
160
- adjusted_mAP = (mAP_on_public_eval_set * log(N))/N
161
-
162
- # need to add logic to log the score to the leaderboard for now, just print
163
-
164
- print(f"The adjusted mean Average Precision (mAP) on the public evaluation set is: {adjusted_mAP:.4f}")
165
-
166
 
167
  if __name__=="__main__":
168
  run()
 
93
  return best_model
94
 
95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  if __name__=="__main__":
98
  run()