horizon-metrics / horizonmetrics.py
Victoria Oberascher
test
d1e5cf8
raw
history blame
14 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""
import evaluate
import datasets
import numpy as np
#from seametrics.horizon.utils import *
# TODO: Add BibTeX citation
_CITATION = """\
@InProceedings{huggingface:module,
title = {A great new module},
authors={huggingface, Inc.},
year={2020}
}
"""
# TODO: Add description of the module here
_DESCRIPTION = """\
This new module is designed to solve this great ML task and is crafted with a lot of care.
"""
# TODO: Add description of the arguments of the module here
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of predictions to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Returns:
accuracy: description of the first score,
another_score: description of the second score,
Examples:
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> my_new_module = evaluate.load("my_new_module")
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
>>> print(results)
{'accuracy': 1.0}
"""
# TODO: Define external resources urls if needed
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION,
_KWARGS_DESCRIPTION)
# begin utils
def xy_points_to_slope_midpoint(xy_points):
"""
Given two points, return the slope and midpoint of the line
Args:
xy_points: list of two points, each point is a list of two elements
Points are in the form of [x, y], where x and y are normalized to [0, 1]
Returns:
slope: Slope of the line
midpoint : Midpoint is in the form of [x,y], and is also normalized to [0, 1]
"""
#x1, y1, x2, y2 = xy_points[0][0], xy_points[0][1], xy_points[1][
# 0], xy_points[1][1]
x1, y1, x2, y2 = xy_points[0], xy_points[1], xy_points[2], xy_points[3]
slope = (y2 - y1) / (x2 - x1)
midpoint_x = 0.5
midpoint_y = slope * (0.5 - x1) + y1
midpoint = [midpoint_x, midpoint_y]
return slope, midpoint
def calculate_horizon_error(annotated_horizon, proposed_horizon):
"""
Calculate the error between the annotated horizon and the proposed horizon
Args:
annotated_horizon: list of two points, each point is a list of two elements
Points are in the form of [x, y], where x and y are normalized to [0, 1]
proposed_horizon: list of two points, each point is a list of two elements
Points are in the form of [x, y], where x and y are normalized to [0, 1]
Returns:
slope_error: Error in the slope of the lines
midpoint_error: Error in the midpoint_y of the lines
"""
slope_annotated, midpoint_annotated = xy_points_to_slope_midpoint(
annotated_horizon)
slope_proposed, midpoint_proposed = xy_points_to_slope_midpoint(
proposed_horizon)
slope_error = abs(slope_annotated - slope_proposed)
midpoint_error = abs(midpoint_annotated[1] - midpoint_proposed[1])
return slope_error, midpoint_error
def calculate_horizon_error_across_sequence(slope_error_list,
midpoint_error_list,
slope_error_jump_threshold,
midpoint_error_jump_threshold):
"""
Calculate the error statistics across a sequence of frames
Args:
slope_error_list: List of errors in the slope of the lines
midpoint_error_list: List of errors in the midpoint_y of the lines
Returns:
average_slope_error: Average error in the slope of the lines
average_midpoint_error: Average error in the midpoint_y of the lines
"""
# Calculate the average and standard deviation of the errors
average_slope_error = np.mean(slope_error_list)
average_midpoint_error = np.mean(midpoint_error_list)
stddev_slope_error = np.std(slope_error_list)
stddev_midpoint_error = np.std(midpoint_error_list)
# Calculate the maximum errors
max_slope_error = np.max(slope_error_list)
max_midpoint_error = np.max(midpoint_error_list)
# Calculate the differences between errors in successive frames
diff_slope_error = np.abs(np.diff(slope_error_list))
diff_midpoint_error = np.abs(np.diff(midpoint_error_list))
# Calculate the number of jumps in the errors
num_slope_error_jumps = np.sum(
diff_slope_error > slope_error_jump_threshold)
num_midpoint_error_jumps = np.sum(
diff_midpoint_error > midpoint_error_jump_threshold)
# Create a dictionary to store the results
sequence_results = {
'average_slope_error': average_slope_error,
'average_midpoint_error': average_midpoint_error,
'stddev_slope_error': stddev_slope_error,
'stddev_midpoint_error': stddev_midpoint_error,
'max_slope_error': max_slope_error,
'max_midpoint_error': max_midpoint_error,
'num_slope_error_jumps': num_slope_error_jumps,
'num_midpoint_error_jumps': num_midpoint_error_jumps
}
return sequence_results
def xy_points_to_slope_midpoint(xy_points):
"""
Given two points, return the slope and midpoint of the line
Args:
xy_points: list of two points, each point is a list of two elements
Points are in the form of [x, y], where x and y are normalized to [0, 1]
Returns:
slope: Slope of the line
midpoint : Midpoint is in the form of [x,y], and is also normalized to [0, 1]
"""
x1, y1, x2, y2 = xy_points[0][0], xy_points[0][1], xy_points[1][
0], xy_points[1][1]
slope = (y2 - y1) / (x2 - x1)
midpoint_x = 0.5
midpoint_y = slope * (0.5 - x1) + y1
midpoint = [midpoint_x, midpoint_y]
return slope, midpoint
def calculate_horizon_error(annotated_horizon, proposed_horizon):
"""
Calculate the error between the annotated horizon and the proposed horizon
Args:
annotated_horizon: list of two points, each point is a list of two elements
Points are in the form of [x, y], where x and y are normalized to [0, 1]
proposed_horizon: list of two points, each point is a list of two elements
Points are in the form of [x, y], where x and y are normalized to [0, 1]
Returns:
slope_error: Error in the slope of the lines
midpoint_error: Error in the midpoint_y of the lines
"""
slope_annotated, midpoint_annotated = xy_points_to_slope_midpoint(
annotated_horizon)
slope_proposed, midpoint_proposed = xy_points_to_slope_midpoint(
proposed_horizon)
slope_error = abs(slope_annotated - slope_proposed)
midpoint_error = abs(midpoint_annotated[1] - midpoint_proposed[1])
return slope_error, midpoint_error
def calculate_horizon_error_across_sequence(slope_error_list,
midpoint_error_list,
slope_error_jump_threshold,
midpoint_error_jump_threshold):
"""
Calculate the error statistics across a sequence of frames
Args:
slope_error_list: List of errors in the slope of the lines
midpoint_error_list: List of errors in the midpoint_y of the lines
Returns:
average_slope_error: Average error in the slope of the lines
average_midpoint_error: Average error in the midpoint_y of the lines
"""
# Calculate the average and standard deviation of the errors
average_slope_error = np.mean(slope_error_list)
average_midpoint_error = np.mean(midpoint_error_list)
stddev_slope_error = np.std(slope_error_list)
stddev_midpoint_error = np.std(midpoint_error_list)
# Calculate the maximum errors
max_slope_error = np.max(slope_error_list)
max_midpoint_error = np.max(midpoint_error_list)
# Calculate the differences between errors in successive frames
diff_slope_error = np.abs(np.diff(slope_error_list))
diff_midpoint_error = np.abs(np.diff(midpoint_error_list))
# Calculate the number of jumps in the errors
num_slope_error_jumps = np.sum(
diff_slope_error > slope_error_jump_threshold)
num_midpoint_error_jumps = np.sum(
diff_midpoint_error > midpoint_error_jump_threshold)
# Create a dictionary to store the results
sequence_results = {
'average_slope_error': average_slope_error,
'average_midpoint_error': average_midpoint_error,
'stddev_slope_error': stddev_slope_error,
'stddev_midpoint_error': stddev_midpoint_error,
'max_slope_error': max_slope_error,
'max_midpoint_error': max_midpoint_error,
'num_slope_error_jumps': num_slope_error_jumps,
'num_midpoint_error_jumps': num_midpoint_error_jumps
}
return sequence_results
def slope_to_roll(slope):
"""
Convert the slope of the horizon to roll
Args:
slope: Slope of the horizon
Returns:
roll: Roll in degrees
"""
roll = np.arctan(slope) * 180 / np.pi
return roll
def roll_to_slope(roll):
"""
Convert the roll of the horizon to slope
Args:
roll: Roll of the horizon in degrees
Returns:
slope: Slope of the horizon
"""
slope = np.tan(roll * np.pi / 180)
return slope
def midpoint_to_pitch(midpoint, vertical_fov_degrees):
"""
Convert the midpoint of the horizon to pitch
Args:
midpoint: Midpoint of the horizon
vertical_fov_degrees: Vertical field of view of the camera in degrees
Returns:
pitch: Pitch in degrees
"""
pitch = midpoint * vertical_fov_degrees
return pitch
def pitch_to_midpoint(pitch, vertical_fov_degrees):
"""
Convert the pitch of the horizon to midpoint
Args:
pitch: Pitch of the horizon in degrees
vertical_fov_degrees: Vertical field of view of the camera in degrees
Returns:
midpoint: Midpoint of the horizon
"""
midpoint = pitch / vertical_fov_degrees
return midpoint
# end utils
class horizonmetrics(evaluate.Metric):
"""TODO: Short description of my evaluation module."""
def __init__(self,
slope_threshold=0.1,
midpoint_threshold=0.1,
vertical_fov_degrees=25.6,
**kwargs):
super().__init__(**kwargs)
self.slope_threshold = slope_threshold
self.midpoint_threshold = midpoint_threshold
self.vertical_fov_degrees = vertical_fov_degrees
self.predictions = None
self.ground_truth_det = None
self.slope_error_list = None
self.midpoint_error_list = None
def _info(self):
# TODO: Specifies the evaluate.EvaluationModuleInfo object
return evaluate.MetricInfo(
# This is the description that will appear on the modules page.
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features({
'predictions': datasets.Value('int64'),
'references': datasets.Value('int64'),
}),
# Homepage of the module for documentation
homepage="http://module.homepage",
# Additional links to the codebase or references
codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
reference_urls=["http://path.to.reference.url/new_module"])
def add(self, *, predictions, references, **kwargs):
"""
Update the predictions and ground truth detections.
Parameters
----------
predictions : list
List of predicted horizons.
ground_truth_det : list
List of ground truth horizons.
"""
self.predictions = predictions
self.ground_truth_det = references
self.slope_error_list = []
self.midpoint_error_list = []
for annotated_horizon, proposed_horizon in zip(self.ground_truth_det,
self.predictions):
slope_error, midpoint_error = calculate_horizon_error(
annotated_horizon, proposed_horizon)
self.slope_error_list.append(slope_error)
self.midpoint_error_list.append(midpoint_error)
# does not impact the metric, but is required for the interface x_x
super(evaluate.Metric, self).add(prediction=predictions,
references=references,
**kwargs)
def _compute(self):
"""
Compute the horizon error across the sequence.
Returns
-------
float
The computed horizon error.
"""
return calculate_horizon_error_across_sequence(
self.slope_error_list, self.midpoint_error_list,
self.slope_threshold, self.midpoint_threshold)
def _download_and_prepare(self, dl_manager):
"""Optional: download external resources useful to compute the scores"""
# TODO: Download external resources if needed
pass