ref-metrics / ref-metrics.py
hichem-abdellali's picture
Update ref-metrics.py
64747fb verified
raw
history blame
5.81 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import datetime
import os
import datasets
import evaluate
from seametrics.user_friendly.utils import calculate_from_payload
import wandb
_CITATION = """\
@InProceedings{huggingface:module,
title = {A great new module},
authors={huggingface, Inc.},
year={2020}
}\
@article{milan2016mot16,
title={MOT16: A benchmark for multi-object tracking},
author={Milan, Anton and Leal-Taix{\'e}, Laura and Reid, Ian and Roth, Stefan and Schindler, Konrad},
journal={arXiv preprint arXiv:1603.00831},
year={2016}
}
"""
_DESCRIPTION = """\
The MOT Metrics module is designed to evaluate multi-object tracking (MOT)
algorithms by computing various metrics based on predicted and ground truth bounding
boxes. It serves as a crucial tool in assessing the performance of MOT systems,
aiding in the iterative improvement of tracking algorithms."""
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of predictions to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
max_iou (`float`, *optional*):
If specified, this is the minimum Intersection over Union (IoU) threshold to consider a detection as a true positive.
Default is 0.5.
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class UserFriendlyMetrics(evaluate.Metric):
"""TODO: Short description of my evaluation module."""
def _info(self):
# TODO: Specifies the evaluate.EvaluationModuleInfo object
return evaluate.MetricInfo(
# This is the description that will appear on the modules page.
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features(
{
"predictions": datasets.Sequence(
datasets.Sequence(datasets.Value("float"))
),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("float"))
),
}
),
# Additional links to the codebase or references
codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
reference_urls=["http://path.to.reference.url/new_module"],
)
def _download_and_prepare(self, dl_manager):
"""Optional: download external resources useful to compute the scores"""
# TODO: Download external resources if needed
pass
def compute_from_payload(
self,
payload,
max_iou: float = 0.5,
filters={},
recognition_thresholds=[0.3, 0.5, 0.8],
area_ranges_tuples=None, # Optional parameter
debug: bool = False,
):
"""
Call the required functions to compute the metrics and return it.
Returns:
dict: A dictionary containing the computed metrics based on the provided area in the area_ranges_tuples.
"""
return self.dummy_values(area_ranges_tuples)
def dummy_values(self, area_ranges_tuples=None):
"""Dummy randome values in the expected format that all new metrics need to return"""
# Use default ranges if none are provided
if area_ranges_tuples is None:
area_ranges_tuples = [
("all", [0, 1e5**2]),
("small", [0**2, 6**2]),
("medium", [6**2, 12**2]),
("large", [12**2, 1e5**2]),
]
# Generate random dummy values
def generate_random_values():
return {
"tp": random.randint(0, 100), # Random integer between 0 and 100
"fp": random.randint(0, 50), # Random integer between 0 and 50
"fn": random.randint(0, 50), # Random integer between 0 and 50
"precision": round(random.uniform(0.5, 1.0), 2), # Random float between 0.5 and 1.0
"recall": round(random.uniform(0.5, 1.0), 2), # Random float between 0.5 and 1.0
"f1": round(random.uniform(0.5, 1.0), 2) # Random float between 0.5 and 1.0
}
# Initialize output structure
dummy_output = {
"model_1": {
"overall": {},
"per_sequence": {
"sequence_1": {}
}
}
}
# Populate only the ranges specified in area_ranges_tuples with random values
for range_name, _ in area_ranges_tuples:
dummy_output["model_1"]["overall"][range_name] = generate_random_values()
dummy_output["model_1"]["per_sequence"]["sequence_1"][range_name] = generate_random_values()
return dummy_output