Spaces:
Sleeping
Sleeping
File size: 8,701 Bytes
40c6d5b adaef8a 40c6d5b adaef8a ac2ff4a adaef8a 40c6d5b aee4926 40c6d5b adaef8a 40c6d5b adaef8a 40c6d5b adaef8a 40c6d5b adaef8a 0175937 fa350f6 22a0855 0175937 fa350f6 adaef8a 22a0855 adaef8a 22a0855 adaef8a 22a0855 adaef8a 22a0855 adaef8a 40c6d5b adaef8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import datasets
import evaluate
from seametrics.user_friendly.utils import calculate_from_payload
import wandb
_CITATION = """\
@InProceedings{huggingface:module,
title = {A great new module},
authors={huggingface, Inc.},
year={2020}
}\
@article{milan2016mot16,
title={MOT16: A benchmark for multi-object tracking},
author={Milan, Anton and Leal-Taix{\'e}, Laura and Reid, Ian and Roth, Stefan and Schindler, Konrad},
journal={arXiv preprint arXiv:1603.00831},
year={2016}
}
"""
_DESCRIPTION = """\
The MOT Metrics module is designed to evaluate multi-object tracking (MOT)
algorithms by computing various metrics based on predicted and ground truth bounding
boxes. It serves as a crucial tool in assessing the performance of MOT systems,
aiding in the iterative improvement of tracking algorithms."""
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of predictions to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
max_iou (`float`, *optional*):
If specified, this is the minimum Intersection over Union (IoU) threshold to consider a detection as a true positive.
Default is 0.5.
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class UserFriendlyMetrics(evaluate.Metric):
"""TODO: Short description of my evaluation module."""
def _info(self):
# TODO: Specifies the evaluate.EvaluationModuleInfo object
return evaluate.MetricInfo(
# This is the description that will appear on the modules page.
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features(
{
"predictions": datasets.Sequence(
datasets.Sequence(datasets.Value("float"))
),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("float"))
),
}
),
# Additional links to the codebase or references
codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
reference_urls=["http://path.to.reference.url/new_module"],
)
def _download_and_prepare(self, dl_manager):
"""Optional: download external resources useful to compute the scores"""
# TODO: Download external resources if needed
pass
def _compute(
self,
payload,
max_iou: float = 0.5,
filters={},
recognition_thresholds=[0.3, 0.5, 0.8],
debug: bool = False,
):
"""Returns the scores"""
# TODO: Compute the different scores of the module
return calculate_from_payload(
payload, max_iou, filters, recognition_thresholds, debug
)
# return calculate(predictions, references, max_iou)
def wandb(
self,
results,
wandb_section: str = None,
wandb_project="user_friendly_metrics",
log_plots: bool = True,
debug: bool = False,
):
"""
Logs metrics to Weights and Biases (wandb) for tracking and visualization, including categorized bar charts for global metrics.
Args:
results (dict): Results dictionary with 'global' and 'per_sequence' keys.
wandb_section (str, optional): W&B section for metric grouping. Defaults to None.
wandb_project (str, optional): The name of the wandb project. Defaults to 'user_friendly_metrics'.
log_plots (bool, optional): Generates categorized bar charts for global metrics. Defaults to True.
debug (bool, optional): Logs detailed summaries and histories to the terminal console. Defaults to False.
"""
current_datetime = datetime.datetime.now()
formatted_datetime = current_datetime.strftime("%Y-%m-%d_%H-%M-%S")
wandb.login(key=os.getenv("WANDB_API_KEY"))
run = wandb.init(
project=wandb_project,
name=f"evaluation-{formatted_datetime}",
reinit=True,
settings=wandb.Settings(silent=not debug),
)
categories = {
"user_friendly_metrics": {
"mostly_tracked_score_0.3",
"mostly_tracked_score_0.5",
"mostly_tracked_score_0.8",
},
"evaluation_metrics_dev": {
"f1",
"recall",
"precision",
},
"user_friendly_metrics_dev": {
"mostly_tracked_count_0.3",
"mostly_tracked_count_0.5",
"mostly_tracked_count_0.8",
"unique_obj_count",
},
"predictions_summary": {
"fp",
"tp",
"fn",
},
}
chart_data = {key: [] for key in categories.keys()}
# Log global metrics
if "global" in results:
for global_key, global_metrics in results["global"].items():
for metric, value in global_metrics["all"].items():
log_key = (
f"{wandb_section}/global/{global_key}/{metric}"
if wandb_section
else f"global/{global_key}/{metric}"
)
run.log({log_key: value})
if debug:
print(f" {log_key} = {value}")
for category, metrics in categories.items():
if metric in metrics:
chart_data[category].append([metric, value])
print("----------------------------------------------------")
if log_plots:
for category, data in chart_data.items():
if data:
table_data = [[label, value] for label, value in data]
table = wandb.Table(data=table_data, columns=["metrics", "value"])
run.log(
{
f"{category}_bar_chart": wandb.plot.bar(
table,
"metrics",
"value",
title=f"{category.replace('_', ' ').title()}",
)
}
)
if "per_sequence" in results:
sorted_sequences = sorted(
results["per_sequence"].items(),
key=lambda x: next(iter(x[1].values()), {}).get("all", {}).get("f1", 0),
reverse=True, # Set to True for descending order
)
for sequence_name, sequence_data in sorted_sequences:
for seq_key, seq_metrics in sequence_data.items():
for metric, value in seq_metrics["all"].items():
log_key = (
f"{wandb_section}/per_sequence/{sequence_name}/{seq_key}/{metric}"
if wandb_section
else f"per_sequence/{sequence_name}/{seq_key}/{metric}"
)
run.log({log_key: value})
if debug:
print(f" {log_key} = {value}")
print("----------------------------------------------------")
if debug:
print("\nDebug Mode: Logging Summary and History")
print(f"Results Summary:\n{results}")
print(f"WandB Settings:\n{run.settings}")
print("All metrics have been logged.")
run.finish()
|