Spaces:
Sleeping
Sleeping
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import random | |
import datetime | |
import os | |
import datasets | |
import evaluate | |
from seametrics.user_friendly.utils import calculate_from_payload | |
import wandb | |
_CITATION = """\ | |
@InProceedings{huggingface:module, | |
title = {A great new module}, | |
authors={huggingface, Inc.}, | |
year={2020} | |
}\ | |
@article{milan2016mot16, | |
title={MOT16: A benchmark for multi-object tracking}, | |
author={Milan, Anton and Leal-Taix{\'e}, Laura and Reid, Ian and Roth, Stefan and Schindler, Konrad}, | |
journal={arXiv preprint arXiv:1603.00831}, | |
year={2016} | |
} | |
""" | |
_DESCRIPTION = """\ | |
The MOT Metrics module is designed to evaluate multi-object tracking (MOT) | |
algorithms by computing various metrics based on predicted and ground truth bounding | |
boxes. It serves as a crucial tool in assessing the performance of MOT systems, | |
aiding in the iterative improvement of tracking algorithms.""" | |
_KWARGS_DESCRIPTION = """ | |
Calculates how good are predictions given some references, using certain scores | |
Args: | |
predictions: list of predictions to score. Each predictions | |
should be a string with tokens separated by spaces. | |
references: list of reference for each prediction. Each | |
reference should be a string with tokens separated by spaces. | |
max_iou (`float`, *optional*): | |
If specified, this is the minimum Intersection over Union (IoU) threshold to consider a detection as a true positive. | |
Default is 0.5. | |
""" | |
class UserFriendlyMetrics(evaluate.Metric): | |
"""TODO: Short description of my evaluation module.""" | |
def _info(self): | |
# TODO: Specifies the evaluate.EvaluationModuleInfo object | |
return evaluate.MetricInfo( | |
# This is the description that will appear on the modules page. | |
module_type="metric", | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
# This defines the format of each prediction and reference | |
features=datasets.Features( | |
{ | |
"predictions": datasets.Sequence( | |
datasets.Sequence(datasets.Value("float")) | |
), | |
"references": datasets.Sequence( | |
datasets.Sequence(datasets.Value("float")) | |
), | |
} | |
), | |
# Additional links to the codebase or references | |
codebase_urls=["http://github.com/path/to/codebase/of/new_module"], | |
reference_urls=["http://path.to.reference.url/new_module"], | |
) | |
def _download_and_prepare(self, dl_manager): | |
"""Optional: download external resources useful to compute the scores""" | |
# TODO: Download external resources if needed | |
pass | |
def compute_from_payload( | |
self, | |
payload, | |
area_ranges_tuples=None, # Optional parameter | |
): | |
""" | |
Compute the metric from the payload. | |
Args: | |
payload (Payload): The payload to compute the metric from. | |
**kwargs: Additional keyword arguments. | |
Returns: | |
dict: The computed metric results with the following format: | |
{ | |
"model_name": { | |
"overall": { | |
"all": {"tp": ..., "fp": ..., "fn": ..., "f1": ...}, | |
... # more area ranges | |
}, | |
"per_sequence": { | |
"sequence_name": { | |
"all": {...}, | |
... # more area ranges | |
}, | |
... # more sequences | |
} | |
}, | |
... # more models | |
} | |
Note: | |
- If the metric does not support area ranges, the metric should store the results under the `all` key. | |
- If a range area is provided it will be displayed in the output. if area_ranges_tuples is None, then all the area ranges will be displayed | |
""" | |
return self.dummy_values(area_ranges_tuples) | |
def dummy_values(self, area_ranges_tuples=None): | |
"""Dummy randome values in the expected format that all new metrics need to return""" | |
# Use default ranges if none are provided | |
if area_ranges_tuples is None: | |
area_names = ["all", "small", "medium", "large"] | |
else: | |
area_names = { | |
key | |
for key, value in area_ranges_tuples.items() | |
if value["range"] is not None | |
} | |
# Generate random dummy values | |
def generate_random_values(): | |
return { | |
"tp": random.randint(0, 100), # Random integer between 0 and 100 | |
"fp": random.randint(0, 50), # Random integer between 0 and 50 | |
"fn": random.randint(0, 50), # Random integer between 0 and 50 | |
"precision": round( | |
random.uniform(0.5, 1.0), 2 | |
), # Random float between 0.5 and 1.0 | |
"recall": round( | |
random.uniform(0.5, 1.0), 2 | |
), # Random float between 0.5 and 1.0 | |
"f1": round( | |
random.uniform(0.5, 1.0), 2 | |
), # Random float between 0.5 and 1.0 | |
} | |
# Initialize output structure | |
dummy_output = {"model_1": {"overall": {}, "per_sequence": {"sequence_1": {}}}} | |
# Populate only the ranges specified in area_ranges_tuples with random values | |
for area_name in area_names: | |
dummy_output["model_1"]["overall"][area_name] = generate_random_values() | |
dummy_output["model_1"]["per_sequence"]["sequence_1"][ | |
area_name | |
] = generate_random_values() | |
return dummy_output | |