Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def information_gain(reference_beats,
estimated_beats,
bins=41):
validate(reference_beats, estimated_beats)
# If an even number of bins is provided,
# there will be no bin centered at zero, so warn the user.
if not bins % 2:
warnings.warn("bins parameter is even, "
"so there will not be a bin centered at zero.")
# Warn when only one beat is provided for either estimated or reference,
# report a warning
if reference_beats.size == 1:
warnings.warn("Only one reference beat was provided, so beat intervals"
" cannot be computed.")
if estimated_beats.size == 1:
warnings.warn("Only one estimated beat was provided, so beat intervals"
" cannot be computed.")
# When estimated or reference beats have <= 1 beats, can't compute the
# metric, so return 0
if estimated_beats.size <= 1 or reference_beats.size <= 1:
return 0.
# Get entropy for reference beats->estimated beats
# and estimated beats->reference beats
forward_entropy = _get_entropy(reference_beats, estimated_beats, bins)
backward_entropy = _get_entropy(estimated_beats, reference_beats, bins)
# Pick the larger of the entropies
norm = np.log2(bins)
if forward_entropy > backward_entropy:
# Note that the beat evaluation toolbox does not normalize
information_gain_score = (norm - forward_entropy)/norm
else:
information_gain_score = (norm - backward_entropy)/norm
return information_gain_score | [
"Get the information gain - K-L divergence of the beat error histogram\n to a uniform histogram\n\n Examples\n --------\n >>> reference_beats = mir_eval.io.load_events('reference.txt')\n >>> reference_beats = mir_eval.beat.trim_beats(reference_beats)\n >>> estimated_beats = mir_eval.io.load_events('estimated.txt')\n >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)\n >>> information_gain = mir_eval.beat.information_gain(reference_beats,\n estimated_beats)\n\n Parameters\n ----------\n reference_beats : np.ndarray\n reference beat times, in seconds\n estimated_beats : np.ndarray\n query beat times, in seconds\n bins : int\n Number of bins in the beat error histogram\n (Default value = 41)\n\n Returns\n -------\n information_gain_score : float\n Entropy of beat error histogram\n "
] |
Please provide a description of the function:def _get_entropy(reference_beats, estimated_beats, bins):
beat_error = np.zeros(estimated_beats.shape[0])
for n in range(estimated_beats.shape[0]):
# Get index of closest annotation to this beat
beat_distances = estimated_beats[n] - reference_beats
closest_beat = np.argmin(np.abs(beat_distances))
absolute_error = beat_distances[closest_beat]
# If the first annotation is closest...
if closest_beat == 0:
# Inter-annotation interval - space between first two beats
interval = .5*(reference_beats[1] - reference_beats[0])
# If last annotation is closest...
if closest_beat == (reference_beats.shape[0] - 1):
interval = .5*(reference_beats[-1] - reference_beats[-2])
else:
if absolute_error < 0:
# Closest annotation is the one before the current beat
# so look at previous inner-annotation-interval
start = reference_beats[closest_beat]
end = reference_beats[closest_beat - 1]
interval = .5*(start - end)
else:
# Closest annotation is the one after the current beat
# so look at next inner-annotation-interval
start = reference_beats[closest_beat + 1]
end = reference_beats[closest_beat]
interval = .5*(start - end)
# The actual error of this beat
beat_error[n] = .5*absolute_error/interval
# Put beat errors in range (-.5, .5)
beat_error = np.mod(beat_error + .5, -1) + .5
# Note these are slightly different the beat evaluation toolbox
# (they are uniform)
histogram_bin_edges = np.linspace(-.5, .5, bins + 1)
# Get the histogram
raw_bin_values = np.histogram(beat_error, histogram_bin_edges)[0]
# Turn into a proper probability distribution
raw_bin_values = raw_bin_values/(1.0*np.sum(raw_bin_values))
# Set zero-valued bins to 1 to make the entropy calculation well-behaved
raw_bin_values[raw_bin_values == 0] = 1
# Calculate entropy
return -np.sum(raw_bin_values * np.log2(raw_bin_values)) | [
"Helper function for information gain\n (needs to be run twice - once backwards, once forwards)\n\n Parameters\n ----------\n reference_beats : np.ndarray\n reference beat times, in seconds\n estimated_beats : np.ndarray\n query beat times, in seconds\n bins : int\n Number of bins in the beat error histogram\n\n Returns\n -------\n entropy : float\n Entropy of beat error histogram\n\n "
] |
Please provide a description of the function:def evaluate(reference_beats, estimated_beats, **kwargs):
# Trim beat times at the beginning of the annotations
reference_beats = util.filter_kwargs(trim_beats, reference_beats, **kwargs)
estimated_beats = util.filter_kwargs(trim_beats, estimated_beats, **kwargs)
# Now compute all the metrics
scores = collections.OrderedDict()
# F-Measure
scores['F-measure'] = util.filter_kwargs(f_measure, reference_beats,
estimated_beats, **kwargs)
# Cemgil
scores['Cemgil'], scores['Cemgil Best Metric Level'] = \
util.filter_kwargs(cemgil, reference_beats, estimated_beats, **kwargs)
# Goto
scores['Goto'] = util.filter_kwargs(goto, reference_beats,
estimated_beats, **kwargs)
# P-Score
scores['P-score'] = util.filter_kwargs(p_score, reference_beats,
estimated_beats, **kwargs)
# Continuity metrics
(scores['Correct Metric Level Continuous'],
scores['Correct Metric Level Total'],
scores['Any Metric Level Continuous'],
scores['Any Metric Level Total']) = util.filter_kwargs(continuity,
reference_beats,
estimated_beats,
**kwargs)
# Information gain
scores['Information gain'] = util.filter_kwargs(information_gain,
reference_beats,
estimated_beats,
**kwargs)
return scores | [
"Compute all metrics for the given reference and estimated annotations.\n\n Examples\n --------\n >>> reference_beats = mir_eval.io.load_events('reference.txt')\n >>> estimated_beats = mir_eval.io.load_events('estimated.txt')\n >>> scores = mir_eval.beat.evaluate(reference_beats, estimated_beats)\n\n Parameters\n ----------\n reference_beats : np.ndarray\n Reference beat times, in seconds\n estimated_beats : np.ndarray\n Query beat times, in seconds\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n "
] |
Please provide a description of the function:def index_labels(labels, case_sensitive=False):
label_to_index = {}
index_to_label = {}
# If we're not case-sensitive,
if not case_sensitive:
labels = [str(s).lower() for s in labels]
# First, build the unique label mapping
for index, s in enumerate(sorted(set(labels))):
label_to_index[s] = index
index_to_label[index] = s
# Remap the labels to indices
indices = [label_to_index[s] for s in labels]
# Return the converted labels, and the inverse mapping
return indices, index_to_label | [
"Convert a list of string identifiers into numerical indices.\n\n Parameters\n ----------\n labels : list of strings, shape=(n,)\n A list of annotations, e.g., segment or chord labels from an\n annotation file.\n\n case_sensitive : bool\n Set to True to enable case-sensitive label indexing\n (Default value = False)\n\n Returns\n -------\n indices : list, shape=(n,)\n Numerical representation of ``labels``\n index_to_label : dict\n Mapping to convert numerical indices back to labels.\n ``labels[i] == index_to_label[indices[i]]``\n\n "
] |
Please provide a description of the function:def generate_labels(items, prefix='__'):
return ['{}{}'.format(prefix, n) for n in range(len(items))] | [
"Given an array of items (e.g. events, intervals), create a synthetic label\n for each event of the form '(label prefix)(item number)'\n\n Parameters\n ----------\n items : list-like\n A list or array of events or intervals\n prefix : str\n This prefix will be prepended to all synthetically generated labels\n (Default value = '__')\n\n Returns\n -------\n labels : list of str\n Synthetically generated labels\n\n "
] |
Please provide a description of the function:def intervals_to_samples(intervals, labels, offset=0, sample_size=0.1,
fill_value=None):
# Round intervals to the sample size
num_samples = int(np.floor(intervals.max() / sample_size))
sample_indices = np.arange(num_samples, dtype=np.float32)
sample_times = (sample_indices*sample_size + offset).tolist()
sampled_labels = interpolate_intervals(
intervals, labels, sample_times, fill_value)
return sample_times, sampled_labels | [
"Convert an array of labeled time intervals to annotated samples.\n\n Parameters\n ----------\n intervals : np.ndarray, shape=(n, d)\n An array of time intervals, as returned by\n :func:`mir_eval.io.load_intervals()` or\n :func:`mir_eval.io.load_labeled_intervals()`.\n The ``i`` th interval spans time ``intervals[i, 0]`` to\n ``intervals[i, 1]``.\n\n labels : list, shape=(n,)\n The annotation for each interval\n\n offset : float > 0\n Phase offset of the sampled time grid (in seconds)\n (Default value = 0)\n\n sample_size : float > 0\n duration of each sample to be generated (in seconds)\n (Default value = 0.1)\n\n fill_value : type(labels[0])\n Object to use for the label with out-of-range time points.\n (Default value = None)\n\n Returns\n -------\n sample_times : list\n list of sample times\n\n sample_labels : list\n array of labels for each generated sample\n\n Notes\n -----\n Intervals will be rounded down to the nearest multiple\n of ``sample_size``.\n\n "
] |
Please provide a description of the function:def interpolate_intervals(intervals, labels, time_points, fill_value=None):
# Verify that time_points is sorted
time_points = np.asarray(time_points)
if np.any(time_points[1:] < time_points[:-1]):
raise ValueError('time_points must be in non-decreasing order')
aligned_labels = [fill_value] * len(time_points)
starts = np.searchsorted(time_points, intervals[:, 0], side='left')
ends = np.searchsorted(time_points, intervals[:, 1], side='right')
for (start, end, lab) in zip(starts, ends, labels):
aligned_labels[start:end] = [lab] * (end - start)
return aligned_labels | [
"Assign labels to a set of points in time given a set of intervals.\n\n Time points that do not lie within an interval are mapped to `fill_value`.\n\n Parameters\n ----------\n intervals : np.ndarray, shape=(n, 2)\n An array of time intervals, as returned by\n :func:`mir_eval.io.load_intervals()`.\n The ``i`` th interval spans time ``intervals[i, 0]`` to\n ``intervals[i, 1]``.\n\n Intervals are assumed to be disjoint.\n\n labels : list, shape=(n,)\n The annotation for each interval\n\n time_points : array_like, shape=(m,)\n Points in time to assign labels. These must be in\n non-decreasing order.\n\n fill_value : type(labels[0])\n Object to use for the label with out-of-range time points.\n (Default value = None)\n\n Returns\n -------\n aligned_labels : list\n Labels corresponding to the given time points.\n\n Raises\n ------\n ValueError\n If `time_points` is not in non-decreasing order.\n "
] |
Please provide a description of the function:def sort_labeled_intervals(intervals, labels=None):
'''Sort intervals, and optionally, their corresponding labels
according to start time.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
The input intervals
labels : list, optional
Labels for each interval
Returns
-------
intervals_sorted or (intervals_sorted, labels_sorted)
Labels are only returned if provided as input
'''
idx = np.argsort(intervals[:, 0])
intervals_sorted = intervals[idx]
if labels is None:
return intervals_sorted
else:
return intervals_sorted, [labels[_] for _ in idx] | [] |
Please provide a description of the function:def f_measure(precision, recall, beta=1.0):
if precision == 0 and recall == 0:
return 0.0
return (1 + beta**2)*precision*recall/((beta**2)*precision + recall) | [
"Compute the f-measure from precision and recall scores.\n\n Parameters\n ----------\n precision : float in (0, 1]\n Precision\n recall : float in (0, 1]\n Recall\n beta : float > 0\n Weighting factor for f-measure\n (Default value = 1.0)\n\n Returns\n -------\n f_measure : float\n The weighted f-measure\n\n "
] |
Please provide a description of the function:def intervals_to_boundaries(intervals, q=5):
return np.unique(np.ravel(np.round(intervals, decimals=q))) | [
"Convert interval times into boundaries.\n\n Parameters\n ----------\n intervals : np.ndarray, shape=(n_events, 2)\n Array of interval start and end-times\n q : int\n Number of decimals to round to. (Default value = 5)\n\n Returns\n -------\n boundaries : np.ndarray\n Interval boundary times, including the end of the final interval\n\n "
] |
Please provide a description of the function:def boundaries_to_intervals(boundaries):
if not np.allclose(boundaries, np.unique(boundaries)):
raise ValueError('Boundary times are not unique or not ascending.')
intervals = np.asarray(list(zip(boundaries[:-1], boundaries[1:])))
return intervals | [
"Convert an array of event times into intervals\n\n Parameters\n ----------\n boundaries : list-like\n List-like of event times. These are assumed to be unique\n timestamps in ascending order.\n\n Returns\n -------\n intervals : np.ndarray, shape=(n_intervals, 2)\n Start and end time for each interval\n "
] |
Please provide a description of the function:def adjust_intervals(intervals,
labels=None,
t_min=0.0,
t_max=None,
start_label='__T_MIN',
end_label='__T_MAX'):
# When supplied intervals are empty and t_max and t_min are supplied,
# create one interval from t_min to t_max with the label start_label
if t_min is not None and t_max is not None and intervals.size == 0:
return np.array([[t_min, t_max]]), [start_label]
# When intervals are empty and either t_min or t_max are not supplied,
# we can't append new intervals
elif (t_min is None or t_max is None) and intervals.size == 0:
raise ValueError("Supplied intervals are empty, can't append new"
" intervals")
if t_min is not None:
# Find the intervals that end at or after t_min
first_idx = np.argwhere(intervals[:, 1] >= t_min)
if len(first_idx) > 0:
# If we have events below t_min, crop them out
if labels is not None:
labels = labels[int(first_idx[0]):]
# Clip to the range (t_min, +inf)
intervals = intervals[int(first_idx[0]):]
intervals = np.maximum(t_min, intervals)
if intervals.min() > t_min:
# Lowest boundary is higher than t_min:
# add a new boundary and label
intervals = np.vstack(([t_min, intervals.min()], intervals))
if labels is not None:
labels.insert(0, start_label)
if t_max is not None:
# Find the intervals that begin after t_max
last_idx = np.argwhere(intervals[:, 0] > t_max)
if len(last_idx) > 0:
# We have boundaries above t_max.
# Trim to only boundaries <= t_max
if labels is not None:
labels = labels[:int(last_idx[0])]
# Clip to the range (-inf, t_max)
intervals = intervals[:int(last_idx[0])]
intervals = np.minimum(t_max, intervals)
if intervals.max() < t_max:
# Last boundary is below t_max: add a new boundary and label
intervals = np.vstack((intervals, [intervals.max(), t_max]))
if labels is not None:
labels.append(end_label)
return intervals, labels | [
"Adjust a list of time intervals to span the range ``[t_min, t_max]``.\n\n Any intervals lying completely outside the specified range will be removed.\n\n Any intervals lying partially outside the specified range will be cropped.\n\n If the specified range exceeds the span of the provided data in either\n direction, additional intervals will be appended. If an interval is\n appended at the beginning, it will be given the label ``start_label``; if\n an interval is appended at the end, it will be given the label\n ``end_label``.\n\n Parameters\n ----------\n intervals : np.ndarray, shape=(n_events, 2)\n Array of interval start and end-times\n labels : list, len=n_events or None\n List of labels\n (Default value = None)\n t_min : float or None\n Minimum interval start time.\n (Default value = 0.0)\n t_max : float or None\n Maximum interval end time.\n (Default value = None)\n start_label : str or float or int\n Label to give any intervals appended at the beginning\n (Default value = '__T_MIN')\n end_label : str or float or int\n Label to give any intervals appended at the end\n (Default value = '__T_MAX')\n\n Returns\n -------\n new_intervals : np.ndarray\n Intervals spanning ``[t_min, t_max]``\n new_labels : list\n List of labels for ``new_labels``\n\n "
] |
Please provide a description of the function:def adjust_events(events, labels=None, t_min=0.0,
t_max=None, label_prefix='__'):
if t_min is not None:
first_idx = np.argwhere(events >= t_min)
if len(first_idx) > 0:
# We have events below t_min
# Crop them out
if labels is not None:
labels = labels[int(first_idx[0]):]
events = events[int(first_idx[0]):]
if events[0] > t_min:
# Lowest boundary is higher than t_min:
# add a new boundary and label
events = np.concatenate(([t_min], events))
if labels is not None:
labels.insert(0, '%sT_MIN' % label_prefix)
if t_max is not None:
last_idx = np.argwhere(events > t_max)
if len(last_idx) > 0:
# We have boundaries above t_max.
# Trim to only boundaries <= t_max
if labels is not None:
labels = labels[:int(last_idx[0])]
events = events[:int(last_idx[0])]
if events[-1] < t_max:
# Last boundary is below t_max: add a new boundary and label
events = np.concatenate((events, [t_max]))
if labels is not None:
labels.append('%sT_MAX' % label_prefix)
return events, labels | [
"Adjust the given list of event times to span the range\n ``[t_min, t_max]``.\n\n Any event times outside of the specified range will be removed.\n\n If the times do not span ``[t_min, t_max]``, additional events will be\n added with the prefix ``label_prefix``.\n\n Parameters\n ----------\n events : np.ndarray\n Array of event times (seconds)\n labels : list or None\n List of labels\n (Default value = None)\n t_min : float or None\n Minimum valid event time.\n (Default value = 0.0)\n t_max : float or None\n Maximum valid event time.\n (Default value = None)\n label_prefix : str\n Prefix string to use for synthetic labels\n (Default value = '__')\n\n Returns\n -------\n new_times : np.ndarray\n Event times corrected to the given range.\n\n "
] |
Please provide a description of the function:def intersect_files(flist1, flist2):
def fname(abs_path):
return os.path.splitext(os.path.split(abs_path)[-1])[0]
fmap = dict([(fname(f), f) for f in flist1])
pairs = [list(), list()]
for f in flist2:
if fname(f) in fmap:
pairs[0].append(fmap[fname(f)])
pairs[1].append(f)
return pairs | [
"Return the intersection of two sets of filepaths, based on the file name\n (after the final '/') and ignoring the file extension.\n\n Examples\n --------\n >>> flist1 = ['/a/b/abc.lab', '/c/d/123.lab', '/e/f/xyz.lab']\n >>> flist2 = ['/g/h/xyz.npy', '/i/j/123.txt', '/k/l/456.lab']\n >>> sublist1, sublist2 = mir_eval.util.intersect_files(flist1, flist2)\n >>> print sublist1\n ['/e/f/xyz.lab', '/c/d/123.lab']\n >>> print sublist2\n ['/g/h/xyz.npy', '/i/j/123.txt']\n\n Parameters\n ----------\n flist1 : list\n first list of filepaths\n flist2 : list\n second list of filepaths\n\n Returns\n -------\n sublist1 : list\n subset of filepaths with matching stems from ``flist1``\n sublist2 : list\n corresponding filepaths from ``flist2``\n\n ",
"Returns the filename given an absolute path.\n\n Parameters\n ----------\n abs_path :\n\n\n Returns\n -------\n\n "
] |
Please provide a description of the function:def merge_labeled_intervals(x_intervals, x_labels, y_intervals, y_labels):
r
align_check = [x_intervals[0, 0] == y_intervals[0, 0],
x_intervals[-1, 1] == y_intervals[-1, 1]]
if False in align_check:
raise ValueError(
"Time intervals do not align; did you mean to call "
"'adjust_intervals()' first?")
time_boundaries = np.unique(
np.concatenate([x_intervals, y_intervals], axis=0))
output_intervals = np.array(
[time_boundaries[:-1], time_boundaries[1:]]).T
x_labels_out, y_labels_out = [], []
x_label_range = np.arange(len(x_labels))
y_label_range = np.arange(len(y_labels))
for t0, _ in output_intervals:
x_idx = x_label_range[(t0 >= x_intervals[:, 0])]
x_labels_out.append(x_labels[x_idx[-1]])
y_idx = y_label_range[(t0 >= y_intervals[:, 0])]
y_labels_out.append(y_labels[y_idx[-1]])
return output_intervals, x_labels_out, y_labels_out | [
"Merge the time intervals of two sequences.\n\n Parameters\n ----------\n x_intervals : np.ndarray\n Array of interval times (seconds)\n x_labels : list or None\n List of labels\n y_intervals : np.ndarray\n Array of interval times (seconds)\n y_labels : list or None\n List of labels\n\n Returns\n -------\n new_intervals : np.ndarray\n New interval times of the merged sequences.\n new_x_labels : list\n New labels for the sequence ``x``\n new_y_labels : list\n New labels for the sequence ``y``\n\n "
] |
Please provide a description of the function:def _bipartite_match(graph):
# Adapted from:
#
# Hopcroft-Karp bipartite max-cardinality matching and max independent set
# David Eppstein, UC Irvine, 27 Apr 2002
# initialize greedy matching (redundant, but faster than full search)
matching = {}
for u in graph:
for v in graph[u]:
if v not in matching:
matching[v] = u
break
while True:
# structure residual graph into layers
# pred[u] gives the neighbor in the previous layer for u in U
# preds[v] gives a list of neighbors in the previous layer for v in V
# unmatched gives a list of unmatched vertices in final layer of V,
# and is also used as a flag value for pred[u] when u is in the first
# layer
preds = {}
unmatched = []
pred = dict([(u, unmatched) for u in graph])
for v in matching:
del pred[matching[v]]
layer = list(pred)
# repeatedly extend layering structure by another pair of layers
while layer and not unmatched:
new_layer = {}
for u in layer:
for v in graph[u]:
if v not in preds:
new_layer.setdefault(v, []).append(u)
layer = []
for v in new_layer:
preds[v] = new_layer[v]
if v in matching:
layer.append(matching[v])
pred[matching[v]] = v
else:
unmatched.append(v)
# did we finish layering without finding any alternating paths?
if not unmatched:
unlayered = {}
for u in graph:
for v in graph[u]:
if v not in preds:
unlayered[v] = None
return matching
def recurse(v):
if v in preds:
L = preds[v]
del preds[v]
for u in L:
if u in pred:
pu = pred[u]
del pred[u]
if pu is unmatched or recurse(pu):
matching[v] = u
return True
return False
for v in unmatched:
recurse(v) | [
"Find maximum cardinality matching of a bipartite graph (U,V,E).\n The input format is a dictionary mapping members of U to a list\n of their neighbors in V.\n\n The output is a dict M mapping members of V to their matches in U.\n\n Parameters\n ----------\n graph : dictionary : left-vertex -> list of right vertices\n The input bipartite graph. Each edge need only be specified once.\n\n Returns\n -------\n matching : dictionary : right-vertex -> left vertex\n A maximal bipartite matching.\n\n ",
"Recursively search backward through layers to find alternating\n paths. recursion returns true if found path, false otherwise\n "
] |
Please provide a description of the function:def _outer_distance_mod_n(ref, est, modulus=12):
ref_mod_n = np.mod(ref, modulus)
est_mod_n = np.mod(est, modulus)
abs_diff = np.abs(np.subtract.outer(ref_mod_n, est_mod_n))
return np.minimum(abs_diff, modulus - abs_diff) | [
"Compute the absolute outer distance modulo n.\n Using this distance, d(11, 0) = 1 (modulo 12)\n\n Parameters\n ----------\n ref : np.ndarray, shape=(n,)\n Array of reference values.\n est : np.ndarray, shape=(m,)\n Array of estimated values.\n modulus : int\n The modulus.\n 12 by default for octave equivalence.\n\n Returns\n -------\n outer_distance : np.ndarray, shape=(n, m)\n The outer circular distance modulo n.\n\n "
] |
Please provide a description of the function:def match_events(ref, est, window, distance=None):
if distance is not None:
# Compute the indices of feasible pairings
hits = np.where(distance(ref, est) <= window)
else:
hits = _fast_hit_windows(ref, est, window)
# Construct the graph input
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(_bipartite_match(G).items())
return matching | [
"Compute a maximum matching between reference and estimated event times,\n subject to a window constraint.\n\n Given two lists of event times ``ref`` and ``est``, we seek the largest set\n of correspondences ``(ref[i], est[j])`` such that\n ``distance(ref[i], est[j]) <= window``, and each\n ``ref[i]`` and ``est[j]`` is matched at most once.\n\n This is useful for computing precision/recall metrics in beat tracking,\n onset detection, and segmentation.\n\n Parameters\n ----------\n ref : np.ndarray, shape=(n,)\n Array of reference values\n est : np.ndarray, shape=(m,)\n Array of estimated values\n window : float > 0\n Size of the window.\n distance : function\n function that computes the outer distance of ref and est.\n By default uses ``|ref[i] - est[j]|``\n\n Returns\n -------\n matching : list of tuples\n A list of matched reference and event numbers.\n ``matching[i] == (i, j)`` where ``ref[i]`` matches ``est[j]``.\n\n "
] |
Please provide a description of the function:def _fast_hit_windows(ref, est, window):
'''Fast calculation of windowed hits for time events.
Given two lists of event times ``ref`` and ``est``, and a
tolerance window, computes a list of pairings
``(i, j)`` where ``|ref[i] - est[j]| <= window``.
This is equivalent to, but more efficient than the following:
>>> hit_ref, hit_est = np.where(np.abs(np.subtract.outer(ref, est))
... <= window)
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float >= 0
Size of the tolerance window
Returns
-------
hit_ref : np.ndarray
hit_est : np.ndarray
indices such that ``|hit_ref[i] - hit_est[i]| <= window``
'''
ref = np.asarray(ref)
est = np.asarray(est)
ref_idx = np.argsort(ref)
ref_sorted = ref[ref_idx]
left_idx = np.searchsorted(ref_sorted, est - window, side='left')
right_idx = np.searchsorted(ref_sorted, est + window, side='right')
hit_ref, hit_est = [], []
for j, (start, end) in enumerate(zip(left_idx, right_idx)):
hit_ref.extend(ref_idx[start:end])
hit_est.extend([j] * (end - start))
return hit_ref, hit_est | [] |
Please provide a description of the function:def validate_intervals(intervals):
# Validate interval shape
if intervals.ndim != 2 or intervals.shape[1] != 2:
raise ValueError('Intervals should be n-by-2 numpy ndarray, '
'but shape={}'.format(intervals.shape))
# Make sure no times are negative
if (intervals < 0).any():
raise ValueError('Negative interval times found')
# Make sure all intervals have strictly positive duration
if (intervals[:, 1] <= intervals[:, 0]).any():
raise ValueError('All interval durations must be strictly positive') | [
"Checks that an (n, 2) interval ndarray is well-formed, and raises errors\n if not.\n\n Parameters\n ----------\n intervals : np.ndarray, shape=(n, 2)\n Array of interval start/end locations.\n\n "
] |
Please provide a description of the function:def validate_events(events, max_time=30000.):
# Make sure no event times are huge
if (events > max_time).any():
raise ValueError('An event at time {} was found which is greater than '
'the maximum allowable time of max_time = {} (did you'
' supply event times in '
'seconds?)'.format(events.max(), max_time))
# Make sure event locations are 1-d np ndarrays
if events.ndim != 1:
raise ValueError('Event times should be 1-d numpy ndarray, '
'but shape={}'.format(events.shape))
# Make sure event times are increasing
if (np.diff(events) < 0).any():
raise ValueError('Events should be in increasing order.') | [
"Checks that a 1-d event location ndarray is well-formed, and raises\n errors if not.\n\n Parameters\n ----------\n events : np.ndarray, shape=(n,)\n Array of event times\n max_time : float\n If an event is found above this time, a ValueError will be raised.\n (Default value = 30000.)\n\n "
] |
Please provide a description of the function:def validate_frequencies(frequencies, max_freq, min_freq,
allow_negatives=False):
# If flag is true, map frequencies to their absolute value.
if allow_negatives:
frequencies = np.abs(frequencies)
# Make sure no frequency values are huge
if (np.abs(frequencies) > max_freq).any():
raise ValueError('A frequency of {} was found which is greater than '
'the maximum allowable value of max_freq = {} (did '
'you supply frequency values in '
'Hz?)'.format(frequencies.max(), max_freq))
# Make sure no frequency values are tiny
if (np.abs(frequencies) < min_freq).any():
raise ValueError('A frequency of {} was found which is less than the '
'minimum allowable value of min_freq = {} (did you '
'supply frequency values in '
'Hz?)'.format(frequencies.min(), min_freq))
# Make sure frequency values are 1-d np ndarrays
if frequencies.ndim != 1:
raise ValueError('Frequencies should be 1-d numpy ndarray, '
'but shape={}'.format(frequencies.shape)) | [
"Checks that a 1-d frequency ndarray is well-formed, and raises\n errors if not.\n\n Parameters\n ----------\n frequencies : np.ndarray, shape=(n,)\n Array of frequency values\n max_freq : float\n If a frequency is found above this pitch, a ValueError will be raised.\n (Default value = 5000.)\n min_freq : float\n If a frequency is found below this pitch, a ValueError will be raised.\n (Default value = 20.)\n allow_negatives : bool\n Whether or not to allow negative frequency values.\n "
] |
Please provide a description of the function:def has_kwargs(function):
r'''Determine whether a function has \*\*kwargs.
Parameters
----------
function : callable
The function to test
Returns
-------
True if function accepts arbitrary keyword arguments.
False otherwise.
'''
if six.PY2:
return inspect.getargspec(function).keywords is not None
else:
sig = inspect.signature(function)
for param in sig.parameters.values():
if param.kind == param.VAR_KEYWORD:
return True
return False | [] |
Please provide a description of the function:def filter_kwargs(_function, *args, **kwargs):
if has_kwargs(_function):
return _function(*args, **kwargs)
# Get the list of function arguments
func_code = six.get_function_code(_function)
function_args = func_code.co_varnames[:func_code.co_argcount]
# Construct a dict of those kwargs which appear in the function
filtered_kwargs = {}
for kwarg, value in list(kwargs.items()):
if kwarg in function_args:
filtered_kwargs[kwarg] = value
# Call the function with the supplied args and the filtered kwarg dict
return _function(*args, **filtered_kwargs) | [
"Given a function and args and keyword args to pass to it, call the function\n but using only the keyword arguments which it accepts. This is equivalent\n to redefining the function with an additional \\*\\*kwargs to accept slop\n keyword args.\n\n If the target function already accepts \\*\\*kwargs parameters, no filtering\n is performed.\n\n Parameters\n ----------\n _function : callable\n Function to call. Can take in any number of args or kwargs\n\n "
] |
Please provide a description of the function:def intervals_to_durations(intervals):
validate_intervals(intervals)
return np.abs(np.diff(intervals, axis=-1)).flatten() | [
"Converts an array of n intervals to their n durations.\n\n Parameters\n ----------\n intervals : np.ndarray, shape=(n, 2)\n An array of time intervals, as returned by\n :func:`mir_eval.io.load_intervals()`.\n The ``i`` th interval spans time ``intervals[i, 0]`` to\n ``intervals[i, 1]``.\n\n Returns\n -------\n durations : np.ndarray, shape=(n,)\n Array of the duration of each interval.\n\n "
] |
Please provide a description of the function:def validate(reference_sources, estimated_sources):
if reference_sources.shape != estimated_sources.shape:
raise ValueError('The shape of estimated sources and the true '
'sources should match. reference_sources.shape '
'= {}, estimated_sources.shape '
'= {}'.format(reference_sources.shape,
estimated_sources.shape))
if reference_sources.ndim > 3 or estimated_sources.ndim > 3:
raise ValueError('The number of dimensions is too high (must be less '
'than 3). reference_sources.ndim = {}, '
'estimated_sources.ndim '
'= {}'.format(reference_sources.ndim,
estimated_sources.ndim))
if reference_sources.size == 0:
warnings.warn("reference_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(reference_sources):
raise ValueError('All the reference sources should be non-silent (not '
'all-zeros), but at least one of the reference '
'sources is all 0s, which introduces ambiguity to the'
' evaluation. (Otherwise we can add infinitely many '
'all-zero sources.)')
if estimated_sources.size == 0:
warnings.warn("estimated_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(estimated_sources):
raise ValueError('All the estimated sources should be non-silent (not '
'all-zeros), but at least one of the estimated '
'sources is all 0s. Since we require each reference '
'source to be non-silent, having a silent estimated '
'source will result in an underdetermined system.')
if (estimated_sources.shape[0] > MAX_SOURCES or
reference_sources.shape[0] > MAX_SOURCES):
raise ValueError('The supplied matrices should be of shape (nsrc,'
' nsampl) but reference_sources.shape[0] = {} and '
'estimated_sources.shape[0] = {} which is greater '
'than mir_eval.separation.MAX_SOURCES = {}. To '
'override this check, set '
'mir_eval.separation.MAX_SOURCES to a '
'larger value.'.format(reference_sources.shape[0],
estimated_sources.shape[0],
MAX_SOURCES)) | [
"Checks that the input data to a metric are valid, and throws helpful\n errors if not.\n\n Parameters\n ----------\n reference_sources : np.ndarray, shape=(nsrc, nsampl)\n matrix containing true sources\n estimated_sources : np.ndarray, shape=(nsrc, nsampl)\n matrix containing estimated sources\n\n "
] |
Please provide a description of the function:def _any_source_silent(sources):
return np.any(np.all(np.sum(
sources, axis=tuple(range(2, sources.ndim))) == 0, axis=1)) | [
"Returns true if the parameter sources has any silent first dimensions"
] |
Please provide a description of the function:def bss_eval_sources(reference_sources, estimated_sources,
compute_permutation=True):
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = estimated_sources.shape[0]
# does user desire permutations?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nsrc))
sir = np.empty((nsrc, nsrc))
sar = np.empty((nsrc, nsrc))
for jest in range(nsrc):
for jtrue in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[jest],
jtrue, 512)
sdr[jest, jtrue], sir[jest, jtrue], sar[jest, jtrue] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# select the best ordering
perms = list(itertools.permutations(list(range(nsrc))))
mean_sir = np.empty(len(perms))
dum = np.arange(nsrc)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm, dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt, dum)
return (sdr[idx], sir[idx], sar[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nsrc)
sir = np.empty(nsrc)
sar = np.empty(nsrc)
for j in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[j],
j, 512)
sdr[j], sir[j], sar[j] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, sir, sar, popt) | [
"\n Ordering and measurement of the separation quality for estimated source\n signals in terms of filtered true source, interference and artifacts.\n\n The decomposition allows a time-invariant filter distortion of length\n 512, as described in Section III.B of [#vincent2006performance]_.\n\n Passing ``False`` for ``compute_permutation`` will improve the computation\n performance of the evaluation; however, it is not always appropriate and\n is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_sources.\n\n Examples\n --------\n >>> # reference_sources[n] should be an ndarray of samples of the\n >>> # n'th reference source\n >>> # estimated_sources[n] should be the same for the n'th estimated\n >>> # source\n >>> (sdr, sir, sar,\n ... perm) = mir_eval.separation.bss_eval_sources(reference_sources,\n ... estimated_sources)\n\n Parameters\n ----------\n reference_sources : np.ndarray, shape=(nsrc, nsampl)\n matrix containing true sources (must have same shape as\n estimated_sources)\n estimated_sources : np.ndarray, shape=(nsrc, nsampl)\n matrix containing estimated sources (must have same shape as\n reference_sources)\n compute_permutation : bool, optional\n compute permutation of estimate/source combinations (True by default)\n\n Returns\n -------\n sdr : np.ndarray, shape=(nsrc,)\n vector of Signal to Distortion Ratios (SDR)\n sir : np.ndarray, shape=(nsrc,)\n vector of Source to Interference Ratios (SIR)\n sar : np.ndarray, shape=(nsrc,)\n vector of Sources to Artifacts Ratios (SAR)\n perm : np.ndarray, shape=(nsrc,)\n vector containing the best ordering of estimated sources in\n the mean SIR sense (estimated source number ``perm[j]`` corresponds to\n true source number ``j``). Note: ``perm`` will be ``[0, 1, ...,\n nsrc-1]`` if ``compute_permutation`` is ``False``.\n\n References\n ----------\n .. [#] Emmanuel Vincent, Shoko Araki, Fabian J. Theis, Guido Nolte, Pau\n Bofill, Hiroshi Sawada, Alexey Ozerov, B. Vikrham Gowreesunker, Dominik\n Lutter and Ngoc Q.K. Duong, \"The Signal Separation Evaluation Campaign\n (2007-2010): Achievements and remaining challenges\", Signal Processing,\n 92, pp. 1928-1936, 2012.\n\n "
] |
Please provide a description of the function:def bss_eval_sources_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the sources result
if nwin < 2:
result = bss_eval_sources(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice]
est_slice = estimated_sources[:, win_slice]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], sir[:, k], sar[:, k], perm[:, k] = bss_eval_sources(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, sir, sar, perm | [
"Framewise computation of bss_eval_sources\n\n Please be aware that this function does not compute permutations (by\n default) on the possible relations between reference_sources and\n estimated_sources due to the dangers of a changing permutation. Therefore\n (by default), it assumes that ``reference_sources[i]`` corresponds to\n ``estimated_sources[i]``. To enable computing permutations please set\n ``compute_permutation`` to be ``True`` and check that the returned ``perm``\n is identical for all windows.\n\n NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated\n using only a single window or are shorter than the window length, the\n result of :func:`mir_eval.separation.bss_eval_sources` called on\n ``reference_sources`` and ``estimated_sources`` (with the\n ``compute_permutation`` parameter passed to\n :func:`mir_eval.separation.bss_eval_sources`) is returned.\n\n Examples\n --------\n >>> # reference_sources[n] should be an ndarray of samples of the\n >>> # n'th reference source\n >>> # estimated_sources[n] should be the same for the n'th estimated\n >>> # source\n >>> (sdr, sir, sar,\n ... perm) = mir_eval.separation.bss_eval_sources_framewise(\n reference_sources,\n ... estimated_sources)\n\n Parameters\n ----------\n reference_sources : np.ndarray, shape=(nsrc, nsampl)\n matrix containing true sources (must have the same shape as\n ``estimated_sources``)\n estimated_sources : np.ndarray, shape=(nsrc, nsampl)\n matrix containing estimated sources (must have the same shape as\n ``reference_sources``)\n window : int, optional\n Window length for framewise evaluation (default value is 30s at a\n sample rate of 44.1kHz)\n hop : int, optional\n Hop size for framewise evaluation (default value is 15s at a\n sample rate of 44.1kHz)\n compute_permutation : bool, optional\n compute permutation of estimate/source combinations for all windows\n (False by default)\n\n Returns\n -------\n sdr : np.ndarray, shape=(nsrc, nframes)\n vector of Signal to Distortion Ratios (SDR)\n sir : np.ndarray, shape=(nsrc, nframes)\n vector of Source to Interference Ratios (SIR)\n sar : np.ndarray, shape=(nsrc, nframes)\n vector of Sources to Artifacts Ratios (SAR)\n perm : np.ndarray, shape=(nsrc, nframes)\n vector containing the best ordering of estimated sources in\n the mean SIR sense (estimated source number ``perm[j]`` corresponds to\n true source number ``j``). Note: ``perm`` will be ``range(nsrc)`` for\n all windows if ``compute_permutation`` is ``False``\n\n "
] |
Please provide a description of the function:def bss_eval_images(reference_sources, estimated_sources,
compute_permutation=True):
# make sure the input has 3 dimensions
# assuming input is in shape (nsampl) or (nsrc, nsampl)
estimated_sources = np.atleast_3d(estimated_sources)
reference_sources = np.atleast_3d(reference_sources)
# we will ensure input doesn't have more than 3 dimensions in validate
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), \
np.array([]), np.array([])
# determine size parameters
nsrc = estimated_sources.shape[0]
nsampl = estimated_sources.shape[1]
nchan = estimated_sources.shape[2]
# does the user desire permutation?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nsrc))
isr = np.empty((nsrc, nsrc))
sir = np.empty((nsrc, nsrc))
sar = np.empty((nsrc, nsrc))
for jest in range(nsrc):
for jtrue in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt_images(
reference_sources,
np.reshape(
estimated_sources[jest],
(nsampl, nchan),
order='F'
),
jtrue,
512
)
sdr[jest, jtrue], isr[jest, jtrue], \
sir[jest, jtrue], sar[jest, jtrue] = \
_bss_image_crit(s_true, e_spat, e_interf, e_artif)
# select the best ordering
perms = list(itertools.permutations(range(nsrc)))
mean_sir = np.empty(len(perms))
dum = np.arange(nsrc)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm, dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt, dum)
return (sdr[idx], isr[idx], sir[idx], sar[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nsrc)
isr = np.empty(nsrc)
sir = np.empty(nsrc)
sar = np.empty(nsrc)
Gj = [0] * nsrc # prepare G matrics with zeroes
G = np.zeros(1)
for j in range(nsrc):
# save G matrix to avoid recomputing it every call
s_true, e_spat, e_interf, e_artif, Gj_temp, G = \
_bss_decomp_mtifilt_images(reference_sources,
np.reshape(estimated_sources[j],
(nsampl, nchan),
order='F'),
j, 512, Gj[j], G)
Gj[j] = Gj_temp
sdr[j], isr[j], sir[j], sar[j] = \
_bss_image_crit(s_true, e_spat, e_interf, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, isr, sir, sar, popt) | [
"Implementation of the bss_eval_images function from the\n BSS_EVAL Matlab toolbox.\n\n Ordering and measurement of the separation quality for estimated source\n signals in terms of filtered true source, interference and artifacts.\n This method also provides the ISR measure.\n\n The decomposition allows a time-invariant filter distortion of length\n 512, as described in Section III.B of [#vincent2006performance]_.\n\n Passing ``False`` for ``compute_permutation`` will improve the computation\n performance of the evaluation; however, it is not always appropriate and\n is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_images.\n\n Examples\n --------\n >>> # reference_sources[n] should be an ndarray of samples of the\n >>> # n'th reference source\n >>> # estimated_sources[n] should be the same for the n'th estimated\n >>> # source\n >>> (sdr, isr, sir, sar,\n ... perm) = mir_eval.separation.bss_eval_images(reference_sources,\n ... estimated_sources)\n\n Parameters\n ----------\n reference_sources : np.ndarray, shape=(nsrc, nsampl, nchan)\n matrix containing true sources\n estimated_sources : np.ndarray, shape=(nsrc, nsampl, nchan)\n matrix containing estimated sources\n compute_permutation : bool, optional\n compute permutation of estimate/source combinations (True by default)\n\n Returns\n -------\n sdr : np.ndarray, shape=(nsrc,)\n vector of Signal to Distortion Ratios (SDR)\n isr : np.ndarray, shape=(nsrc,)\n vector of source Image to Spatial distortion Ratios (ISR)\n sir : np.ndarray, shape=(nsrc,)\n vector of Source to Interference Ratios (SIR)\n sar : np.ndarray, shape=(nsrc,)\n vector of Sources to Artifacts Ratios (SAR)\n perm : np.ndarray, shape=(nsrc,)\n vector containing the best ordering of estimated sources in\n the mean SIR sense (estimated source number ``perm[j]`` corresponds to\n true source number ``j``). Note: ``perm`` will be ``(1,2,...,nsrc)``\n if ``compute_permutation`` is ``False``.\n\n References\n ----------\n .. [#] Emmanuel Vincent, Shoko Araki, Fabian J. Theis, Guido Nolte, Pau\n Bofill, Hiroshi Sawada, Alexey Ozerov, B. Vikrham Gowreesunker, Dominik\n Lutter and Ngoc Q.K. Duong, \"The Signal Separation Evaluation Campaign\n (2007-2010): Achievements and remaining challenges\", Signal Processing,\n 92, pp. 1928-1936, 2012.\n\n "
] |
Please provide a description of the function:def bss_eval_images_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
# make sure the input has 3 dimensions
# assuming input is in shape (nsampl) or (nsrc, nsampl)
estimated_sources = np.atleast_3d(estimated_sources)
reference_sources = np.atleast_3d(reference_sources)
# we will ensure input doesn't have more than 3 dimensions in validate
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the images result
if nwin < 2:
result = bss_eval_images(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
isr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice, :]
est_slice = estimated_sources[:, win_slice, :]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], isr[:, k], sir[:, k], sar[:, k], perm[:, k] = \
bss_eval_images(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, isr, sir, sar, perm | [
"Framewise computation of bss_eval_images\n\n Please be aware that this function does not compute permutations (by\n default) on the possible relations between ``reference_sources`` and\n ``estimated_sources`` due to the dangers of a changing permutation.\n Therefore (by default), it assumes that ``reference_sources[i]``\n corresponds to ``estimated_sources[i]``. To enable computing permutations\n please set ``compute_permutation`` to be ``True`` and check that the\n returned ``perm`` is identical for all windows.\n\n NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated\n using only a single window or are shorter than the window length, the\n result of ``bss_eval_images`` called on ``reference_sources`` and\n ``estimated_sources`` (with the ``compute_permutation`` parameter passed to\n ``bss_eval_images``) is returned\n\n Examples\n --------\n >>> # reference_sources[n] should be an ndarray of samples of the\n >>> # n'th reference source\n >>> # estimated_sources[n] should be the same for the n'th estimated\n >>> # source\n >>> (sdr, isr, sir, sar,\n ... perm) = mir_eval.separation.bss_eval_images_framewise(\n reference_sources,\n ... estimated_sources,\n window,\n .... hop)\n\n Parameters\n ----------\n reference_sources : np.ndarray, shape=(nsrc, nsampl, nchan)\n matrix containing true sources (must have the same shape as\n ``estimated_sources``)\n estimated_sources : np.ndarray, shape=(nsrc, nsampl, nchan)\n matrix containing estimated sources (must have the same shape as\n ``reference_sources``)\n window : int\n Window length for framewise evaluation\n hop : int\n Hop size for framewise evaluation\n compute_permutation : bool, optional\n compute permutation of estimate/source combinations for all windows\n (False by default)\n\n Returns\n -------\n sdr : np.ndarray, shape=(nsrc, nframes)\n vector of Signal to Distortion Ratios (SDR)\n isr : np.ndarray, shape=(nsrc, nframes)\n vector of source Image to Spatial distortion Ratios (ISR)\n sir : np.ndarray, shape=(nsrc, nframes)\n vector of Source to Interference Ratios (SIR)\n sar : np.ndarray, shape=(nsrc, nframes)\n vector of Sources to Artifacts Ratios (SAR)\n perm : np.ndarray, shape=(nsrc, nframes)\n vector containing the best ordering of estimated sources in\n the mean SIR sense (estimated source number perm[j] corresponds to\n true source number j)\n Note: perm will be range(nsrc) for all windows if compute_permutation\n is False\n\n "
] |
Please provide a description of the function:def _bss_decomp_mtifilt(reference_sources, estimated_source, j, flen):
nsampl = estimated_source.size
# decomposition
# true source image
s_true = np.hstack((reference_sources[j], np.zeros(flen - 1)))
# spatial (or filtering) distortion
e_spat = _project(reference_sources[j, np.newaxis, :], estimated_source,
flen) - s_true
# interference
e_interf = _project(reference_sources,
estimated_source, flen) - s_true - e_spat
# artifacts
e_artif = -s_true - e_spat - e_interf
e_artif[:nsampl] += estimated_source
return (s_true, e_spat, e_interf, e_artif) | [
"Decomposition of an estimated source image into four components\n representing respectively the true source image, spatial (or filtering)\n distortion, interference and artifacts, derived from the true source\n images using multichannel time-invariant filters.\n "
] |
Please provide a description of the function:def _bss_decomp_mtifilt_images(reference_sources, estimated_source, j, flen,
Gj=None, G=None):
nsampl = np.shape(estimated_source)[0]
nchan = np.shape(estimated_source)[1]
# are we saving the Gj and G parameters?
saveg = Gj is not None and G is not None
# decomposition
# true source image
s_true = np.hstack((np.reshape(reference_sources[j],
(nsampl, nchan),
order="F").transpose(),
np.zeros((nchan, flen - 1))))
# spatial (or filtering) distortion
if saveg:
e_spat, Gj = _project_images(reference_sources[j, np.newaxis, :],
estimated_source, flen, Gj)
else:
e_spat = _project_images(reference_sources[j, np.newaxis, :],
estimated_source, flen)
e_spat = e_spat - s_true
# interference
if saveg:
e_interf, G = _project_images(reference_sources,
estimated_source, flen, G)
else:
e_interf = _project_images(reference_sources,
estimated_source, flen)
e_interf = e_interf - s_true - e_spat
# artifacts
e_artif = -s_true - e_spat - e_interf
e_artif[:, :nsampl] += estimated_source.transpose()
# return Gj and G only if they were passed in
if saveg:
return (s_true, e_spat, e_interf, e_artif, Gj, G)
else:
return (s_true, e_spat, e_interf, e_artif) | [
"Decomposition of an estimated source image into four components\n representing respectively the true source image, spatial (or filtering)\n distortion, interference and artifacts, derived from the true source\n images using multichannel time-invariant filters.\n Adapted version to work with multichannel sources.\n Improved performance can be gained by passing Gj and G parameters initially\n as all zeros. These parameters store the results from the computation of\n the G matrix in _project_images and then return them for subsequent calls\n to this function. This only works when not computing permuations.\n "
] |
Please provide a description of the function:def _project(reference_sources, estimated_source, flen):
nsrc = reference_sources.shape[0]
nsampl = reference_sources.shape[1]
# computing coefficients of least squares problem via FFT ##
# zero padding and FFT of input data
reference_sources = np.hstack((reference_sources,
np.zeros((nsrc, flen - 1))))
estimated_source = np.hstack((estimated_source, np.zeros(flen - 1)))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# inner products between delayed versions of reference_sources
G = np.zeros((nsrc * flen, nsrc * flen))
for i in range(nsrc):
for j in range(nsrc):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
# inner products between estimated_source and delayed versions of
# reference_sources
D = np.zeros(nsrc * flen)
for i in range(nsrc):
ssef = sf[i] * np.conj(sef)
ssef = np.real(scipy.fftpack.ifft(ssef))
D[i * flen: (i+1) * flen] = np.hstack((ssef[0], ssef[-1:-flen:-1]))
# Computing projection
# Distortion filters
try:
C = np.linalg.solve(G, D).reshape(flen, nsrc, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, nsrc, order='F')
# Filtering
sproj = np.zeros(nsampl + flen - 1)
for i in range(nsrc):
sproj += fftconvolve(C[:, i], reference_sources[i])[:nsampl + flen - 1]
return sproj | [
"Least-squares projection of estimated source on the subspace spanned by\n delayed versions of reference sources, with delays between 0 and flen-1\n "
] |
Please provide a description of the function:def _project_images(reference_sources, estimated_source, flen, G=None):
nsrc = reference_sources.shape[0]
nsampl = reference_sources.shape[1]
nchan = reference_sources.shape[2]
reference_sources = np.reshape(np.transpose(reference_sources, (2, 0, 1)),
(nchan*nsrc, nsampl), order='F')
# computing coefficients of least squares problem via FFT ##
# zero padding and FFT of input data
reference_sources = np.hstack((reference_sources,
np.zeros((nchan*nsrc, flen - 1))))
estimated_source = \
np.hstack((estimated_source.transpose(), np.zeros((nchan, flen - 1))))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# inner products between delayed versions of reference_sources
if G is None:
saveg = False
G = np.zeros((nchan * nsrc * flen, nchan * nsrc * flen))
for i in range(nchan * nsrc):
for j in range(i+1):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
else: # avoid recomputing G (only works if no permutation is desired)
saveg = True # return G
if np.all(G == 0): # only compute G if passed as 0
G = np.zeros((nchan * nsrc * flen, nchan * nsrc * flen))
for i in range(nchan * nsrc):
for j in range(i+1):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
# inner products between estimated_source and delayed versions of
# reference_sources
D = np.zeros((nchan * nsrc * flen, nchan))
for k in range(nchan * nsrc):
for i in range(nchan):
ssef = sf[k] * np.conj(sef[i])
ssef = np.real(scipy.fftpack.ifft(ssef))
D[k * flen: (k+1) * flen, i] = \
np.hstack((ssef[0], ssef[-1:-flen:-1])).transpose()
# Computing projection
# Distortion filters
try:
C = np.linalg.solve(G, D).reshape(flen, nchan*nsrc, nchan, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, nchan*nsrc, nchan,
order='F')
# Filtering
sproj = np.zeros((nchan, nsampl + flen - 1))
for k in range(nchan * nsrc):
for i in range(nchan):
sproj[i] += fftconvolve(C[:, k, i].transpose(),
reference_sources[k])[:nsampl + flen - 1]
# return G only if it was passed in
if saveg:
return sproj, G
else:
return sproj | [
"Least-squares projection of estimated source on the subspace spanned by\n delayed versions of reference sources, with delays between 0 and flen-1.\n Passing G as all zeros will populate the G matrix and return it so it can\n be passed into the next call to avoid recomputing G (this will only works\n if not computing permutations).\n "
] |
Please provide a description of the function:def _bss_source_crit(s_true, e_spat, e_interf, e_artif):
# energy ratios
s_filt = s_true + e_spat
sdr = _safe_db(np.sum(s_filt**2), np.sum((e_interf + e_artif)**2))
sir = _safe_db(np.sum(s_filt**2), np.sum(e_interf**2))
sar = _safe_db(np.sum((s_filt + e_interf)**2), np.sum(e_artif**2))
return (sdr, sir, sar) | [
"Measurement of the separation quality for a given source in terms of\n filtered true source, interference and artifacts.\n "
] |
Please provide a description of the function:def _bss_image_crit(s_true, e_spat, e_interf, e_artif):
# energy ratios
sdr = _safe_db(np.sum(s_true**2), np.sum((e_spat+e_interf+e_artif)**2))
isr = _safe_db(np.sum(s_true**2), np.sum(e_spat**2))
sir = _safe_db(np.sum((s_true+e_spat)**2), np.sum(e_interf**2))
sar = _safe_db(np.sum((s_true+e_spat+e_interf)**2), np.sum(e_artif**2))
return (sdr, isr, sir, sar) | [
"Measurement of the separation quality for a given image in terms of\n filtered true source, spatial error, interference and artifacts.\n "
] |
Please provide a description of the function:def _safe_db(num, den):
if den == 0:
return np.Inf
return 10 * np.log10(num / den) | [
"Properly handle the potential +Inf db SIR, instead of raising a\n RuntimeWarning. Only denominator is checked because the numerator can never\n be 0.\n "
] |
Please provide a description of the function:def evaluate(reference_sources, estimated_sources, **kwargs):
# Compute all the metrics
scores = collections.OrderedDict()
sdr, isr, sir, sar, perm = util.filter_kwargs(
bss_eval_images,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images - Source to Distortion'] = sdr.tolist()
scores['Images - Image to Spatial'] = isr.tolist()
scores['Images - Source to Interference'] = sir.tolist()
scores['Images - Source to Artifact'] = sar.tolist()
scores['Images - Source permutation'] = perm.tolist()
sdr, isr, sir, sar, perm = util.filter_kwargs(
bss_eval_images_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images Frames - Source to Distortion'] = sdr.tolist()
scores['Images Frames - Image to Spatial'] = isr.tolist()
scores['Images Frames - Source to Interference'] = sir.tolist()
scores['Images Frames - Source to Artifact'] = sar.tolist()
scores['Images Frames - Source permutation'] = perm.tolist()
# Verify we can compute sources on this input
if reference_sources.ndim < 3 and estimated_sources.ndim < 3:
sdr, sir, sar, perm = util.filter_kwargs(
bss_eval_sources_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources Frames - Source to Distortion'] = sdr.tolist()
scores['Sources Frames - Source to Interference'] = sir.tolist()
scores['Sources Frames - Source to Artifact'] = sar.tolist()
scores['Sources Frames - Source permutation'] = perm.tolist()
sdr, sir, sar, perm = util.filter_kwargs(
bss_eval_sources,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources - Source to Distortion'] = sdr.tolist()
scores['Sources - Source to Interference'] = sir.tolist()
scores['Sources - Source to Artifact'] = sar.tolist()
scores['Sources - Source permutation'] = perm.tolist()
return scores | [
"Compute all metrics for the given reference and estimated signals.\n\n NOTE: This will always compute :func:`mir_eval.separation.bss_eval_images`\n for any valid input and will additionally compute\n :func:`mir_eval.separation.bss_eval_sources` for valid input with fewer\n than 3 dimensions.\n\n Examples\n --------\n >>> # reference_sources[n] should be an ndarray of samples of the\n >>> # n'th reference source\n >>> # estimated_sources[n] should be the same for the n'th estimated source\n >>> scores = mir_eval.separation.evaluate(reference_sources,\n ... estimated_sources)\n\n Parameters\n ----------\n reference_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])\n matrix containing true sources\n estimated_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])\n matrix containing estimated sources\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n "
] |
Please provide a description of the function:def clicks(times, fs, click=None, length=None):
# Create default click signal
if click is None:
# 1 kHz tone, 100ms
click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs))
# Exponential decay
click *= np.exp(-np.arange(fs*.1)/(fs*.01))
# Set default length
if length is None:
length = int(times.max()*fs + click.shape[0] + 1)
# Pre-allocate click signal
click_signal = np.zeros(length)
# Place clicks
for time in times:
# Compute the boundaries of the click
start = int(time*fs)
end = start + click.shape[0]
# Make sure we don't try to output past the end of the signal
if start >= length:
break
if end >= length:
click_signal[start:] = click[:length - start]
break
# Normally, just add a click here
click_signal[start:end] = click
return click_signal | [
"Returns a signal with the signal 'click' placed at each specified time\n\n Parameters\n ----------\n times : np.ndarray\n times to place clicks, in seconds\n fs : int\n desired sampling rate of the output signal\n click : np.ndarray\n click signal, defaults to a 1 kHz blip\n length : int\n desired number of samples in the output signal,\n defaults to ``times.max()*fs + click.shape[0] + 1``\n\n Returns\n -------\n click_signal : np.ndarray\n Synthesized click signal\n\n "
] |
Please provide a description of the function:def time_frequency(gram, frequencies, times, fs, function=np.sin, length=None,
n_dec=1):
# Default value for length
if times.ndim == 1:
# Convert to intervals
times = util.boundaries_to_intervals(times)
if length is None:
length = int(times[-1, 1] * fs)
times, _ = util.adjust_intervals(times, t_max=length)
# Truncate times so that the shape matches gram
n_times = gram.shape[1]
times = times[:n_times]
def _fast_synthesize(frequency):
# hack so that we can ensure an integer number of periods and samples
# rounds frequency to 1st decimal, s.t. 10 * frequency will be an int
frequency = np.round(frequency, n_dec)
# Generate 10*frequency periods at this frequency
# Equivalent to n_samples = int(n_periods * fs / frequency)
# n_periods = 10*frequency is the smallest integer that guarantees
# that n_samples will be an integer, since assuming 10*frequency
# is an integer
n_samples = int(10.0**n_dec * fs)
short_signal = function(2.0 * np.pi * np.arange(n_samples) *
frequency / fs)
# Calculate the number of loops we need to fill the duration
n_repeats = int(np.ceil(length/float(short_signal.shape[0])))
# Simulate tiling the short buffer by using stride tricks
long_signal = as_strided(short_signal,
shape=(n_repeats, len(short_signal)),
strides=(0, short_signal.itemsize))
# Use a flatiter to simulate a long 1D buffer
return long_signal.flat
def _const_interpolator(value):
def __interpolator(x):
return value
return __interpolator
# Threshold the tfgram to remove non-positive values
gram = np.maximum(gram, 0)
# Pre-allocate output signal
output = np.zeros(length)
time_centers = np.mean(times, axis=1) * float(fs)
for n, frequency in enumerate(frequencies):
# Get a waveform of length samples at this frequency
wave = _fast_synthesize(frequency)
# Interpolate the values in gram over the time grid
if len(time_centers) > 1:
gram_interpolator = interp1d(
time_centers, gram[n, :],
kind='linear', bounds_error=False,
fill_value=0.0)
# If only one time point, create constant interpolator
else:
gram_interpolator = _const_interpolator(gram[n, 0])
# Scale each time interval by the piano roll magnitude
for m, (start, end) in enumerate((times * fs).astype(int)):
# Clip the timings to make sure the indices are valid
start, end = max(start, 0), min(end, length)
# add to waveform
output[start:end] += (
wave[start:end] * gram_interpolator(np.arange(start, end)))
# Normalize, but only if there's non-zero values
norm = np.abs(output).max()
if norm >= np.finfo(output.dtype).tiny:
output /= norm
return output | [
"Reverse synthesis of a time-frequency representation of a signal\n\n Parameters\n ----------\n gram : np.ndarray\n ``gram[n, m]`` is the magnitude of ``frequencies[n]``\n from ``times[m]`` to ``times[m + 1]``\n\n Non-positive magnitudes are interpreted as silence.\n\n frequencies : np.ndarray\n array of size ``gram.shape[0]`` denoting the frequency of\n each row of gram\n times : np.ndarray, shape= ``(gram.shape[1],)`` or ``(gram.shape[1], 2)``\n Either the start time of each column in the gram,\n or the time interval corresponding to each column.\n fs : int\n desired sampling rate of the output signal\n function : function\n function to use to synthesize notes, should be :math:`2\\pi`-periodic\n length : int\n desired number of samples in the output signal,\n defaults to ``times[-1]*fs``\n n_dec : int\n the number of decimals used to approximate each sonfied frequency.\n Defaults to 1 decimal place. Higher precision will be slower.\n\n Returns\n -------\n output : np.ndarray\n synthesized version of the piano roll\n\n ",
"A faster way to synthesize a signal.\n Generate one cycle, and simulate arbitrary repetitions\n using array indexing tricks.\n ",
"Return a function that returns `value`\n no matter the input.\n "
] |
Please provide a description of the function:def pitch_contour(times, frequencies, fs, amplitudes=None, function=np.sin,
length=None, kind='linear'):
'''Sonify a pitch contour.
Parameters
----------
times : np.ndarray
time indices for each frequency measurement, in seconds
frequencies : np.ndarray
frequency measurements, in Hz.
Non-positive measurements will be interpreted as un-voiced samples.
fs : int
desired sampling rate of the output signal
amplitudes : np.ndarray
amplitude measurments, nonnegative
defaults to ``np.ones((length,))``
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``max(times)*fs``
kind : str
Interpolation mode for the frequency and amplitude values.
See: ``scipy.interpolate.interp1d`` for valid settings.
Returns
-------
output : np.ndarray
synthesized version of the pitch contour
'''
fs = float(fs)
if length is None:
length = int(times.max() * fs)
# Squash the negative frequencies.
# wave(0) = 0, so clipping here will un-voice the corresponding instants
frequencies = np.maximum(frequencies, 0.0)
# Build a frequency interpolator
f_interp = interp1d(times * fs, 2 * np.pi * frequencies / fs, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
# Estimate frequency at sample points
f_est = f_interp(np.arange(length))
if amplitudes is None:
a_est = np.ones((length, ))
else:
# build an amplitude interpolator
a_interp = interp1d(
times * fs, amplitudes, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
a_est = a_interp(np.arange(length))
# Sonify the waveform
return a_est * function(np.cumsum(f_est)) | [] |
Please provide a description of the function:def chroma(chromagram, times, fs, **kwargs):
# We'll just use time_frequency with a Shepard tone-gram
# To create the Shepard tone-gram, we copy the chromagram across 7 octaves
n_octaves = 7
# starting from C2
base_note = 24
# and weight each octave by a normal distribution
# The normal distribution has mean 72 (one octave above middle C)
# and std 6 (one half octave)
mean = 72
std = 6
notes = np.arange(12*n_octaves) + base_note
shepard_weight = np.exp(-(notes - mean)**2./(2.*std**2.))
# Copy the chromagram matrix vertically n_octaves times
gram = np.tile(chromagram.T, n_octaves).T
# This fixes issues if the supplied chromagram is int type
gram = gram.astype(float)
# Apply Sheppard weighting
gram *= shepard_weight.reshape(-1, 1)
# Compute frequencies
frequencies = 440.0*(2.0**((notes - 69)/12.0))
return time_frequency(gram, frequencies, times, fs, **kwargs) | [
"Reverse synthesis of a chromagram (semitone matrix)\n\n Parameters\n ----------\n chromagram : np.ndarray, shape=(12, times.shape[0])\n Chromagram matrix, where each row represents a semitone [C->Bb]\n i.e., ``chromagram[3, j]`` is the magnitude of D# from ``times[j]`` to\n ``times[j + 1]``\n times: np.ndarray, shape=(len(chord_labels),) or (len(chord_labels), 2)\n Either the start time of each column in the chromagram,\n or the time interval corresponding to each column.\n fs : int\n Sampling rate to synthesize audio data at\n kwargs\n Additional keyword arguments to pass to\n :func:`mir_eval.sonify.time_frequency`\n\n Returns\n -------\n output : np.ndarray\n Synthesized chromagram\n\n "
] |
Please provide a description of the function:def chords(chord_labels, intervals, fs, **kwargs):
util.validate_intervals(intervals)
# Convert from labels to chroma
roots, interval_bitmaps, _ = chord.encode_many(chord_labels)
chromagram = np.array([np.roll(interval_bitmap, root)
for (interval_bitmap, root)
in zip(interval_bitmaps, roots)]).T
return chroma(chromagram, intervals, fs, **kwargs) | [
"Synthesizes chord labels\n\n Parameters\n ----------\n chord_labels : list of str\n List of chord label strings.\n intervals : np.ndarray, shape=(len(chord_labels), 2)\n Start and end times of each chord label\n fs : int\n Sampling rate to synthesize at\n kwargs\n Additional keyword arguments to pass to\n :func:`mir_eval.sonify.time_frequency`\n\n Returns\n -------\n output : np.ndarray\n Synthesized chord labels\n\n "
] |
Please provide a description of the function:def validate(reference_onsets, estimated_onsets):
# If reference or estimated onsets are empty, warn because metric will be 0
if reference_onsets.size == 0:
warnings.warn("Reference onsets are empty.")
if estimated_onsets.size == 0:
warnings.warn("Estimated onsets are empty.")
for onsets in [reference_onsets, estimated_onsets]:
util.validate_events(onsets, MAX_TIME) | [
"Checks that the input annotations to a metric look like valid onset time\n arrays, and throws helpful errors if not.\n\n Parameters\n ----------\n reference_onsets : np.ndarray\n reference onset locations, in seconds\n estimated_onsets : np.ndarray\n estimated onset locations, in seconds\n\n "
] |
Please provide a description of the function:def f_measure(reference_onsets, estimated_onsets, window=.05):
validate(reference_onsets, estimated_onsets)
# If either list is empty, return 0s
if reference_onsets.size == 0 or estimated_onsets.size == 0:
return 0., 0., 0.
# Compute the best-case matching between reference and estimated onset
# locations
matching = util.match_events(reference_onsets, estimated_onsets, window)
precision = float(len(matching))/len(estimated_onsets)
recall = float(len(matching))/len(reference_onsets)
# Compute F-measure and return all statistics
return util.f_measure(precision, recall), precision, recall | [
"Compute the F-measure of correct vs incorrectly predicted onsets.\n \"Corectness\" is determined over a small window.\n\n Examples\n --------\n >>> reference_onsets = mir_eval.io.load_events('reference.txt')\n >>> estimated_onsets = mir_eval.io.load_events('estimated.txt')\n >>> F, P, R = mir_eval.onset.f_measure(reference_onsets,\n ... estimated_onsets)\n\n Parameters\n ----------\n reference_onsets : np.ndarray\n reference onset locations, in seconds\n estimated_onsets : np.ndarray\n estimated onset locations, in seconds\n window : float\n Window size, in seconds\n (Default value = .05)\n\n Returns\n -------\n f_measure : float\n 2*precision*recall/(precision + recall)\n precision : float\n (# true positives)/(# true positives + # false positives)\n recall : float\n (# true positives)/(# true positives + # false negatives)\n\n "
] |
Please provide a description of the function:def evaluate(reference_onsets, estimated_onsets, **kwargs):
# Compute all metrics
scores = collections.OrderedDict()
(scores['F-measure'],
scores['Precision'],
scores['Recall']) = util.filter_kwargs(f_measure, reference_onsets,
estimated_onsets, **kwargs)
return scores | [
"Compute all metrics for the given reference and estimated annotations.\n\n Examples\n --------\n >>> reference_onsets = mir_eval.io.load_events('reference.txt')\n >>> estimated_onsets = mir_eval.io.load_events('estimated.txt')\n >>> scores = mir_eval.onset.evaluate(reference_onsets,\n ... estimated_onsets)\n\n Parameters\n ----------\n reference_onsets : np.ndarray\n reference onset locations, in seconds\n estimated_onsets : np.ndarray\n estimated onset locations, in seconds\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n "
] |
Please provide a description of the function:def validate(ref_intervals, ref_pitches, est_intervals, est_pitches):
# Validate intervals
validate_intervals(ref_intervals, est_intervals)
# Make sure intervals and pitches match in length
if not ref_intervals.shape[0] == ref_pitches.shape[0]:
raise ValueError('Reference intervals and pitches have different '
'lengths.')
if not est_intervals.shape[0] == est_pitches.shape[0]:
raise ValueError('Estimated intervals and pitches have different '
'lengths.')
# Make sure all pitch values are positive
if ref_pitches.size > 0 and np.min(ref_pitches) <= 0:
raise ValueError("Reference contains at least one non-positive pitch "
"value")
if est_pitches.size > 0 and np.min(est_pitches) <= 0:
raise ValueError("Estimate contains at least one non-positive pitch "
"value") | [
"Checks that the input annotations to a metric look like time intervals\n and a pitch list, and throws helpful errors if not.\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n ref_pitches : np.ndarray, shape=(n,)\n Array of reference pitch values in Hertz\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n est_pitches : np.ndarray, shape=(m,)\n Array of estimated pitch values in Hertz\n "
] |
Please provide a description of the function:def validate_intervals(ref_intervals, est_intervals):
# If reference or estimated notes are empty, warn
if ref_intervals.size == 0:
warnings.warn("Reference notes are empty.")
if est_intervals.size == 0:
warnings.warn("Estimated notes are empty.")
# Validate intervals
util.validate_intervals(ref_intervals)
util.validate_intervals(est_intervals) | [
"Checks that the input annotations to a metric look like time intervals,\n and throws helpful errors if not.\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n "
] |
Please provide a description of the function:def match_note_offsets(ref_intervals, est_intervals, offset_ratio=0.2,
offset_min_tolerance=0.05, strict=False):
# set the comparison function
if strict:
cmp_func = np.less
else:
cmp_func = np.less_equal
# check for offset matches
offset_distances = np.abs(np.subtract.outer(ref_intervals[:, 1],
est_intervals[:, 1]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
offset_distances = np.around(offset_distances, decimals=N_DECIMALS)
ref_durations = util.intervals_to_durations(ref_intervals)
offset_tolerances = np.maximum(offset_ratio * ref_durations,
offset_min_tolerance)
offset_hit_matrix = (
cmp_func(offset_distances, offset_tolerances.reshape(-1, 1)))
# check for hits
hits = np.where(offset_hit_matrix)
# Construct the graph input
# Flip graph so that 'matching' is a list of tuples where the first item
# in each tuple is the reference note index, and the second item is the
# estimated note index.
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(util._bipartite_match(G).items())
return matching | [
"Compute a maximum matching between reference and estimated notes,\n only taking note offsets into account.\n\n Given two note sequences represented by ``ref_intervals`` and\n ``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we seek\n the largest set of correspondences ``(i, j)`` such that the offset of\n reference note ``i`` has to be within ``offset_tolerance`` of the offset of\n estimated note ``j``, where ``offset_tolerance`` is equal to\n ``offset_ratio`` times the reference note's duration, i.e. ``offset_ratio\n * ref_duration[i]`` where ``ref_duration[i] = ref_intervals[i, 1] -\n ref_intervals[i, 0]``. If the resulting ``offset_tolerance`` is less than\n ``offset_min_tolerance`` (50 ms by default) then ``offset_min_tolerance``\n is used instead.\n\n Every reference note is matched against at most one estimated note.\n\n Note there are separate functions :func:`match_note_onsets` and\n :func:`match_notes` for matching notes based on onsets only or based on\n onset, offset, and pitch, respectively. This is because the rules for\n matching note onsets and matching note offsets are different.\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n offset_ratio : float > 0\n The ratio of the reference note's duration used to define the\n ``offset_tolerance``. Default is 0.2 (20%), meaning the\n ``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50\n ms), whichever is greater.\n offset_min_tolerance : float > 0\n The minimum tolerance for offset matching. See ``offset_ratio``\n description for an explanation of how the offset tolerance is\n determined.\n strict : bool\n If ``strict=False`` (the default), threshold checks for offset\n matching are performed using ``<=`` (less than or equal). If\n ``strict=True``, the threshold checks are performed using ``<`` (less\n than).\n\n Returns\n -------\n matching : list of tuples\n A list of matched reference and estimated notes.\n ``matching[i] == (i, j)`` where reference note ``i`` matches estimated\n note ``j``.\n "
] |
Please provide a description of the function:def match_note_onsets(ref_intervals, est_intervals, onset_tolerance=0.05,
strict=False):
# set the comparison function
if strict:
cmp_func = np.less
else:
cmp_func = np.less_equal
# check for onset matches
onset_distances = np.abs(np.subtract.outer(ref_intervals[:, 0],
est_intervals[:, 0]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
onset_distances = np.around(onset_distances, decimals=N_DECIMALS)
onset_hit_matrix = cmp_func(onset_distances, onset_tolerance)
# find hits
hits = np.where(onset_hit_matrix)
# Construct the graph input
# Flip graph so that 'matching' is a list of tuples where the first item
# in each tuple is the reference note index, and the second item is the
# estimated note index.
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(util._bipartite_match(G).items())
return matching | [
"Compute a maximum matching between reference and estimated notes,\n only taking note onsets into account.\n\n Given two note sequences represented by ``ref_intervals`` and\n ``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we see\n the largest set of correspondences ``(i,j)`` such that the onset of\n reference note ``i`` is within ``onset_tolerance`` of the onset of\n estimated note ``j``.\n\n Every reference note is matched against at most one estimated note.\n\n Note there are separate functions :func:`match_note_offsets` and\n :func:`match_notes` for matching notes based on offsets only or based on\n onset, offset, and pitch, respectively. This is because the rules for\n matching note onsets and matching note offsets are different.\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n onset_tolerance : float > 0\n The tolerance for an estimated note's onset deviating from the\n reference note's onset, in seconds. Default is 0.05 (50 ms).\n strict : bool\n If ``strict=False`` (the default), threshold checks for onset matching\n are performed using ``<=`` (less than or equal). If ``strict=True``,\n the threshold checks are performed using ``<`` (less than).\n\n Returns\n -------\n matching : list of tuples\n A list of matched reference and estimated notes.\n ``matching[i] == (i, j)`` where reference note ``i`` matches estimated\n note ``j``.\n "
] |
Please provide a description of the function:def match_notes(ref_intervals, ref_pitches, est_intervals, est_pitches,
onset_tolerance=0.05, pitch_tolerance=50.0, offset_ratio=0.2,
offset_min_tolerance=0.05, strict=False):
# set the comparison function
if strict:
cmp_func = np.less
else:
cmp_func = np.less_equal
# check for onset matches
onset_distances = np.abs(np.subtract.outer(ref_intervals[:, 0],
est_intervals[:, 0]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
onset_distances = np.around(onset_distances, decimals=N_DECIMALS)
onset_hit_matrix = cmp_func(onset_distances, onset_tolerance)
# check for pitch matches
pitch_distances = np.abs(1200*np.subtract.outer(np.log2(ref_pitches),
np.log2(est_pitches)))
pitch_hit_matrix = cmp_func(pitch_distances, pitch_tolerance)
# check for offset matches if offset_ratio is not None
if offset_ratio is not None:
offset_distances = np.abs(np.subtract.outer(ref_intervals[:, 1],
est_intervals[:, 1]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
offset_distances = np.around(offset_distances, decimals=N_DECIMALS)
ref_durations = util.intervals_to_durations(ref_intervals)
offset_tolerances = np.maximum(offset_ratio * ref_durations,
offset_min_tolerance)
offset_hit_matrix = (
cmp_func(offset_distances, offset_tolerances.reshape(-1, 1)))
else:
offset_hit_matrix = True
# check for overall matches
note_hit_matrix = onset_hit_matrix * pitch_hit_matrix * offset_hit_matrix
hits = np.where(note_hit_matrix)
# Construct the graph input
# Flip graph so that 'matching' is a list of tuples where the first item
# in each tuple is the reference note index, and the second item is the
# estimated note index.
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(util._bipartite_match(G).items())
return matching | [
"Compute a maximum matching between reference and estimated notes,\n subject to onset, pitch and (optionally) offset constraints.\n\n Given two note sequences represented by ``ref_intervals``, ``ref_pitches``,\n ``est_intervals`` and ``est_pitches``\n (see :func:`mir_eval.io.load_valued_intervals`), we seek the largest set\n of correspondences ``(i, j)`` such that:\n\n 1. The onset of reference note ``i`` is within ``onset_tolerance`` of the\n onset of estimated note ``j``.\n 2. The pitch of reference note ``i`` is within ``pitch_tolerance`` of the\n pitch of estimated note ``j``.\n 3. If ``offset_ratio`` is not ``None``, the offset of reference note ``i``\n has to be within ``offset_tolerance`` of the offset of estimated note\n ``j``, where ``offset_tolerance`` is equal to ``offset_ratio`` times the\n reference note's duration, i.e. ``offset_ratio * ref_duration[i]`` where\n ``ref_duration[i] = ref_intervals[i, 1] - ref_intervals[i, 0]``. If the\n resulting ``offset_tolerance`` is less than 0.05 (50 ms), 0.05 is used\n instead.\n 4. If ``offset_ratio`` is ``None``, note offsets are ignored, and only\n criteria 1 and 2 are taken into consideration.\n\n Every reference note is matched against at most one estimated note.\n\n This is useful for computing precision/recall metrics for note\n transcription.\n\n Note there are separate functions :func:`match_note_onsets` and\n :func:`match_note_offsets` for matching notes based on onsets only or based\n on offsets only, respectively.\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n ref_pitches : np.ndarray, shape=(n,)\n Array of reference pitch values in Hertz\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n est_pitches : np.ndarray, shape=(m,)\n Array of estimated pitch values in Hertz\n onset_tolerance : float > 0\n The tolerance for an estimated note's onset deviating from the\n reference note's onset, in seconds. Default is 0.05 (50 ms).\n pitch_tolerance : float > 0\n The tolerance for an estimated note's pitch deviating from the\n reference note's pitch, in cents. Default is 50.0 (50 cents).\n offset_ratio : float > 0 or None\n The ratio of the reference note's duration used to define the\n offset_tolerance. Default is 0.2 (20%), meaning the\n ``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50\n ms), whichever is greater. If ``offset_ratio`` is set to ``None``,\n offsets are ignored in the matching.\n offset_min_tolerance : float > 0\n The minimum tolerance for offset matching. See offset_ratio description\n for an explanation of how the offset tolerance is determined. Note:\n this parameter only influences the results if ``offset_ratio`` is not\n ``None``.\n strict : bool\n If ``strict=False`` (the default), threshold checks for onset, offset,\n and pitch matching are performed using ``<=`` (less than or equal). If\n ``strict=True``, the threshold checks are performed using ``<`` (less\n than).\n\n Returns\n -------\n matching : list of tuples\n A list of matched reference and estimated notes.\n ``matching[i] == (i, j)`` where reference note ``i`` matches estimated\n note ``j``.\n "
] |
Please provide a description of the function:def precision_recall_f1_overlap(ref_intervals, ref_pitches, est_intervals,
est_pitches, onset_tolerance=0.05,
pitch_tolerance=50.0, offset_ratio=0.2,
offset_min_tolerance=0.05, strict=False,
beta=1.0):
validate(ref_intervals, ref_pitches, est_intervals, est_pitches)
# When reference notes are empty, metrics are undefined, return 0's
if len(ref_pitches) == 0 or len(est_pitches) == 0:
return 0., 0., 0., 0.
matching = match_notes(ref_intervals, ref_pitches, est_intervals,
est_pitches, onset_tolerance=onset_tolerance,
pitch_tolerance=pitch_tolerance,
offset_ratio=offset_ratio,
offset_min_tolerance=offset_min_tolerance,
strict=strict)
precision = float(len(matching))/len(est_pitches)
recall = float(len(matching))/len(ref_pitches)
f_measure = util.f_measure(precision, recall, beta=beta)
avg_overlap_ratio = average_overlap_ratio(ref_intervals, est_intervals,
matching)
return precision, recall, f_measure, avg_overlap_ratio | [
"Compute the Precision, Recall and F-measure of correct vs incorrectly\n transcribed notes, and the Average Overlap Ratio for correctly transcribed\n notes (see :func:`average_overlap_ratio`). \"Correctness\" is determined\n based on note onset, pitch and (optionally) offset: an estimated note is\n assumed correct if its onset is within +-50ms of a reference note and its\n pitch (F0) is within +- quarter tone (50 cents) of the corresponding\n reference note. If ``offset_ratio`` is ``None``, note offsets are ignored\n in the comparison. Otherwise, on top of the above requirements, a correct\n returned note is required to have an offset value within 20% (by default,\n adjustable via the ``offset_ratio`` parameter) of the reference note's\n duration around the reference note's offset, or within\n ``offset_min_tolerance`` (50 ms by default), whichever is larger.\n\n Examples\n --------\n >>> ref_intervals, ref_pitches = mir_eval.io.load_valued_intervals(\n ... 'reference.txt')\n >>> est_intervals, est_pitches = mir_eval.io.load_valued_intervals(\n ... 'estimated.txt')\n >>> (precision,\n ... recall,\n ... f_measure) = mir_eval.transcription.precision_recall_f1_overlap(\n ... ref_intervals, ref_pitches, est_intervals, est_pitches)\n >>> (precision_no_offset,\n ... recall_no_offset,\n ... f_measure_no_offset) = (\n ... mir_eval.transcription.precision_recall_f1_overlap(\n ... ref_intervals, ref_pitches, est_intervals, est_pitches,\n ... offset_ratio=None))\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n ref_pitches : np.ndarray, shape=(n,)\n Array of reference pitch values in Hertz\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n est_pitches : np.ndarray, shape=(m,)\n Array of estimated pitch values in Hertz\n onset_tolerance : float > 0\n The tolerance for an estimated note's onset deviating from the\n reference note's onset, in seconds. Default is 0.05 (50 ms).\n pitch_tolerance : float > 0\n The tolerance for an estimated note's pitch deviating from the\n reference note's pitch, in cents. Default is 50.0 (50 cents).\n offset_ratio : float > 0 or None\n The ratio of the reference note's duration used to define the\n offset_tolerance. Default is 0.2 (20%), meaning the\n ``offset_tolerance`` will equal the ``ref_duration * 0.2``, or\n ``offset_min_tolerance`` (0.05 by default, i.e. 50 ms), whichever is\n greater. If ``offset_ratio`` is set to ``None``, offsets are ignored in\n the evaluation.\n offset_min_tolerance : float > 0\n The minimum tolerance for offset matching. See ``offset_ratio``\n description for an explanation of how the offset tolerance is\n determined. Note: this parameter only influences the results if\n ``offset_ratio`` is not ``None``.\n strict : bool\n If ``strict=False`` (the default), threshold checks for onset, offset,\n and pitch matching are performed using ``<=`` (less than or equal). If\n ``strict=True``, the threshold checks are performed using ``<`` (less\n than).\n beta : float > 0\n Weighting factor for f-measure (default value = 1.0).\n\n Returns\n -------\n precision : float\n The computed precision score\n recall : float\n The computed recall score\n f_measure : float\n The computed F-measure score\n avg_overlap_ratio : float\n The computed Average Overlap Ratio score\n "
] |
Please provide a description of the function:def average_overlap_ratio(ref_intervals, est_intervals, matching):
ratios = []
for match in matching:
ref_int = ref_intervals[match[0]]
est_int = est_intervals[match[1]]
overlap_ratio = (
(min(ref_int[1], est_int[1]) - max(ref_int[0], est_int[0])) /
(max(ref_int[1], est_int[1]) - min(ref_int[0], est_int[0])))
ratios.append(overlap_ratio)
if len(ratios) == 0:
return 0
else:
return np.mean(ratios) | [
"Compute the Average Overlap Ratio between a reference and estimated\n note transcription. Given a reference and corresponding estimated note,\n their overlap ratio (OR) is defined as the ratio between the duration of\n the time segment in which the two notes overlap and the time segment\n spanned by the two notes combined (earliest onset to latest offset):\n\n >>> OR = ((min(ref_offset, est_offset) - max(ref_onset, est_onset)) /\n ... (max(ref_offset, est_offset) - min(ref_onset, est_onset)))\n\n The Average Overlap Ratio (AOR) is given by the mean OR computed over all\n matching reference and estimated notes. The metric goes from 0 (worst) to 1\n (best).\n\n Note: this function assumes the matching of reference and estimated notes\n (see :func:`match_notes`) has already been performed and is provided by the\n ``matching`` parameter. Furthermore, it is highly recommended to validate\n the intervals (see :func:`validate_intervals`) before calling this\n function, otherwise it is possible (though unlikely) for this function to\n attempt a divide-by-zero operation.\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n matching : list of tuples\n A list of matched reference and estimated notes.\n ``matching[i] == (i, j)`` where reference note ``i`` matches estimated\n note ``j``.\n\n Returns\n -------\n avg_overlap_ratio : float\n The computed Average Overlap Ratio score\n "
] |
Please provide a description of the function:def onset_precision_recall_f1(ref_intervals, est_intervals,
onset_tolerance=0.05, strict=False, beta=1.0):
validate_intervals(ref_intervals, est_intervals)
# When reference notes are empty, metrics are undefined, return 0's
if len(ref_intervals) == 0 or len(est_intervals) == 0:
return 0., 0., 0.
matching = match_note_onsets(ref_intervals, est_intervals,
onset_tolerance=onset_tolerance,
strict=strict)
onset_precision = float(len(matching))/len(est_intervals)
onset_recall = float(len(matching))/len(ref_intervals)
onset_f_measure = util.f_measure(onset_precision, onset_recall, beta=beta)
return onset_precision, onset_recall, onset_f_measure | [
"Compute the Precision, Recall and F-measure of note onsets: an estimated\n onset is considered correct if it is within +-50ms of a reference onset.\n Note that this metric completely ignores note offset and note pitch. This\n means an estimated onset will be considered correct if it matches a\n reference onset, even if the onsets come from notes with completely\n different pitches (i.e. notes that would not match with\n :func:`match_notes`).\n\n\n Examples\n --------\n >>> ref_intervals, _ = mir_eval.io.load_valued_intervals(\n ... 'reference.txt')\n >>> est_intervals, _ = mir_eval.io.load_valued_intervals(\n ... 'estimated.txt')\n >>> (onset_precision,\n ... onset_recall,\n ... onset_f_measure) = mir_eval.transcription.onset_precision_recall_f1(\n ... ref_intervals, est_intervals)\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n onset_tolerance : float > 0\n The tolerance for an estimated note's onset deviating from the\n reference note's onset, in seconds. Default is 0.05 (50 ms).\n strict : bool\n If ``strict=False`` (the default), threshold checks for onset matching\n are performed using ``<=`` (less than or equal). If ``strict=True``,\n the threshold checks are performed using ``<`` (less than).\n beta : float > 0\n Weighting factor for f-measure (default value = 1.0).\n\n Returns\n -------\n precision : float\n The computed precision score\n recall : float\n The computed recall score\n f_measure : float\n The computed F-measure score\n "
] |
Please provide a description of the function:def offset_precision_recall_f1(ref_intervals, est_intervals, offset_ratio=0.2,
offset_min_tolerance=0.05, strict=False,
beta=1.0):
validate_intervals(ref_intervals, est_intervals)
# When reference notes are empty, metrics are undefined, return 0's
if len(ref_intervals) == 0 or len(est_intervals) == 0:
return 0., 0., 0.
matching = match_note_offsets(ref_intervals, est_intervals,
offset_ratio=offset_ratio,
offset_min_tolerance=offset_min_tolerance,
strict=strict)
offset_precision = float(len(matching))/len(est_intervals)
offset_recall = float(len(matching))/len(ref_intervals)
offset_f_measure = util.f_measure(offset_precision, offset_recall,
beta=beta)
return offset_precision, offset_recall, offset_f_measure | [
"Compute the Precision, Recall and F-measure of note offsets: an\n estimated offset is considered correct if it is within +-50ms (or 20% of\n the ref note duration, which ever is greater) of a reference offset. Note\n that this metric completely ignores note onsets and note pitch. This means\n an estimated offset will be considered correct if it matches a\n reference offset, even if the offsets come from notes with completely\n different pitches (i.e. notes that would not match with\n :func:`match_notes`).\n\n\n Examples\n --------\n >>> ref_intervals, _ = mir_eval.io.load_valued_intervals(\n ... 'reference.txt')\n >>> est_intervals, _ = mir_eval.io.load_valued_intervals(\n ... 'estimated.txt')\n >>> (offset_precision,\n ... offset_recall,\n ... offset_f_measure) = mir_eval.transcription.offset_precision_recall_f1(\n ... ref_intervals, est_intervals)\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n offset_ratio : float > 0 or None\n The ratio of the reference note's duration used to define the\n offset_tolerance. Default is 0.2 (20%), meaning the\n ``offset_tolerance`` will equal the ``ref_duration * 0.2``, or\n ``offset_min_tolerance`` (0.05 by default, i.e. 50 ms), whichever is\n greater.\n offset_min_tolerance : float > 0\n The minimum tolerance for offset matching. See ``offset_ratio``\n description for an explanation of how the offset tolerance is\n determined.\n strict : bool\n If ``strict=False`` (the default), threshold checks for onset matching\n are performed using ``<=`` (less than or equal). If ``strict=True``,\n the threshold checks are performed using ``<`` (less than).\n beta : float > 0\n Weighting factor for f-measure (default value = 1.0).\n\n Returns\n -------\n precision : float\n The computed precision score\n recall : float\n The computed recall score\n f_measure : float\n The computed F-measure score\n "
] |
Please provide a description of the function:def evaluate(ref_intervals, ref_pitches, est_intervals, est_pitches, **kwargs):
# Compute all the metrics
scores = collections.OrderedDict()
# Precision, recall and f-measure taking note offsets into account
kwargs.setdefault('offset_ratio', 0.2)
orig_offset_ratio = kwargs['offset_ratio']
if kwargs['offset_ratio'] is not None:
(scores['Precision'],
scores['Recall'],
scores['F-measure'],
scores['Average_Overlap_Ratio']) = util.filter_kwargs(
precision_recall_f1_overlap, ref_intervals, ref_pitches,
est_intervals, est_pitches, **kwargs)
# Precision, recall and f-measure NOT taking note offsets into account
kwargs['offset_ratio'] = None
(scores['Precision_no_offset'],
scores['Recall_no_offset'],
scores['F-measure_no_offset'],
scores['Average_Overlap_Ratio_no_offset']) = (
util.filter_kwargs(precision_recall_f1_overlap,
ref_intervals, ref_pitches,
est_intervals, est_pitches, **kwargs))
# onset-only metrics
(scores['Onset_Precision'],
scores['Onset_Recall'],
scores['Onset_F-measure']) = (
util.filter_kwargs(onset_precision_recall_f1,
ref_intervals, est_intervals, **kwargs))
# offset-only metrics
kwargs['offset_ratio'] = orig_offset_ratio
if kwargs['offset_ratio'] is not None:
(scores['Offset_Precision'],
scores['Offset_Recall'],
scores['Offset_F-measure']) = (
util.filter_kwargs(offset_precision_recall_f1,
ref_intervals, est_intervals, **kwargs))
return scores | [
"Compute all metrics for the given reference and estimated annotations.\n\n Examples\n --------\n >>> ref_intervals, ref_pitches = mir_eval.io.load_valued_intervals(\n ... 'reference.txt')\n >>> est_intervals, est_pitches = mir_eval.io.load_valued_intervals(\n ... 'estimate.txt')\n >>> scores = mir_eval.transcription.evaluate(ref_intervals, ref_pitches,\n ... est_intervals, est_pitches)\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n ref_pitches : np.ndarray, shape=(n,)\n Array of reference pitch values in Hertz\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n est_pitches : np.ndarray, shape=(m,)\n Array of estimated pitch values in Hertz\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n "
] |
Please provide a description of the function:def validate_key(key):
if len(key.split()) != 2:
raise ValueError("'{}' is not in the form '(key) (mode)'".format(key))
key, mode = key.split()
if key.lower() not in KEY_TO_SEMITONE:
raise ValueError(
"Key {} is invalid; should be e.g. D or C# or Eb".format(key))
if mode not in ['major', 'minor']:
raise ValueError(
"Mode '{}' is invalid; must be 'major' or 'minor'".format(mode)) | [
"Checks that a key is well-formatted, e.g. in the form ``'C# major'``.\n\n Parameters\n ----------\n key : str\n Key to verify\n "
] |
Please provide a description of the function:def split_key_string(key):
key, mode = key.split()
return KEY_TO_SEMITONE[key.lower()], mode | [
"Splits a key string (of the form, e.g. ``'C# major'``), into a tuple of\n ``(key, mode)`` where ``key`` is is an integer representing the semitone\n distance from C.\n\n Parameters\n ----------\n key : str\n String representing a key.\n\n Returns\n -------\n key : int\n Number of semitones above C.\n mode : str\n String representing the mode.\n "
] |
Please provide a description of the function:def weighted_score(reference_key, estimated_key):
validate(reference_key, estimated_key)
reference_key, reference_mode = split_key_string(reference_key)
estimated_key, estimated_mode = split_key_string(estimated_key)
# If keys are the same, return 1.
if reference_key == estimated_key and reference_mode == estimated_mode:
return 1.
# If keys are the same mode and a perfect fifth (differ by 7 semitones)
if (estimated_mode == reference_mode and
(estimated_key - reference_key) % 12 == 7):
return 0.5
# Estimated key is relative minor of reference key (9 semitones)
if (estimated_mode != reference_mode == 'major' and
(estimated_key - reference_key) % 12 == 9):
return 0.3
# Estimated key is relative major of reference key (3 semitones)
if (estimated_mode != reference_mode == 'minor' and
(estimated_key - reference_key) % 12 == 3):
return 0.3
# If keys are in different modes and parallel (same key name)
if estimated_mode != reference_mode and reference_key == estimated_key:
return 0.2
# Otherwise return 0
return 0. | [
"Computes a heuristic score which is weighted according to the\n relationship of the reference and estimated key, as follows:\n\n +------------------------------------------------------+-------+\n | Relationship | Score |\n +------------------------------------------------------+-------+\n | Same key | 1.0 |\n +------------------------------------------------------+-------+\n | Estimated key is a perfect fifth above reference key | 0.5 |\n +------------------------------------------------------+-------+\n | Relative major/minor | 0.3 |\n +------------------------------------------------------+-------+\n | Parallel major/minor | 0.2 |\n +------------------------------------------------------+-------+\n | Other | 0.0 |\n +------------------------------------------------------+-------+\n\n Examples\n --------\n >>> ref_key = mir_eval.io.load_key('ref.txt')\n >>> est_key = mir_eval.io.load_key('est.txt')\n >>> score = mir_eval.key.weighted_score(ref_key, est_key)\n\n Parameters\n ----------\n reference_key : str\n Reference key string.\n estimated_key : str\n Estimated key string.\n\n Returns\n -------\n score : float\n Score representing how closely related the keys are.\n "
] |
Please provide a description of the function:def evaluate(reference_key, estimated_key, **kwargs):
# Compute all metrics
scores = collections.OrderedDict()
scores['Weighted Score'] = util.filter_kwargs(
weighted_score, reference_key, estimated_key)
return scores | [
"Compute all metrics for the given reference and estimated annotations.\n\n Examples\n --------\n >>> ref_key = mir_eval.io.load_key('reference.txt')\n >>> est_key = mir_eval.io.load_key('estimated.txt')\n >>> scores = mir_eval.key.evaluate(ref_key, est_key)\n\n Parameters\n ----------\n ref_key : str\n Reference key string.\n\n ref_key : str\n Estimated key string.\n\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n "
] |
Please provide a description of the function:def validate_voicing(ref_voicing, est_voicing):
if ref_voicing.size == 0:
warnings.warn("Reference voicing array is empty.")
if est_voicing.size == 0:
warnings.warn("Estimated voicing array is empty.")
if ref_voicing.sum() == 0:
warnings.warn("Reference melody has no voiced frames.")
if est_voicing.sum() == 0:
warnings.warn("Estimated melody has no voiced frames.")
# Make sure they're the same length
if ref_voicing.shape[0] != est_voicing.shape[0]:
raise ValueError('Reference and estimated voicing arrays should '
'be the same length.')
for voicing in [ref_voicing, est_voicing]:
# Make sure they're (effectively) boolean
if np.logical_and(voicing != 0, voicing != 1).any():
raise ValueError('Voicing arrays must be boolean.') | [
"Checks that voicing inputs to a metric are in the correct format.\n\n Parameters\n ----------\n ref_voicing : np.ndarray\n Reference boolean voicing array\n est_voicing : np.ndarray\n Estimated boolean voicing array\n\n "
] |
Please provide a description of the function:def validate(ref_voicing, ref_cent, est_voicing, est_cent):
if ref_cent.size == 0:
warnings.warn("Reference frequency array is empty.")
if est_cent.size == 0:
warnings.warn("Estimated frequency array is empty.")
# Make sure they're the same length
if ref_voicing.shape[0] != ref_cent.shape[0] or \
est_voicing.shape[0] != est_cent.shape[0] or \
ref_cent.shape[0] != est_cent.shape[0]:
raise ValueError('All voicing and frequency arrays must have the '
'same length.') | [
"Checks that voicing and frequency arrays are well-formed. To be used in\n conjunction with :func:`mir_eval.melody.validate_voicing`\n\n Parameters\n ----------\n ref_voicing : np.ndarray\n Reference boolean voicing array\n ref_cent : np.ndarray\n Reference pitch sequence in cents\n est_voicing : np.ndarray\n Estimated boolean voicing array\n est_cent : np.ndarray\n Estimate pitch sequence in cents\n\n "
] |
Please provide a description of the function:def hz2cents(freq_hz, base_frequency=10.0):
freq_cent = np.zeros(freq_hz.shape[0])
freq_nonz_ind = np.flatnonzero(freq_hz)
normalized_frequency = np.abs(freq_hz[freq_nonz_ind])/base_frequency
freq_cent[freq_nonz_ind] = 1200*np.log2(normalized_frequency)
return freq_cent | [
"Convert an array of frequency values in Hz to cents.\n 0 values are left in place.\n\n Parameters\n ----------\n freq_hz : np.ndarray\n Array of frequencies in Hz.\n base_frequency : float\n Base frequency for conversion.\n (Default value = 10.0)\n\n Returns\n -------\n cent : np.ndarray\n Array of frequencies in cents, relative to base_frequency\n\n "
] |
Please provide a description of the function:def constant_hop_timebase(hop, end_time):
# Compute new timebase. Rounding/linspace is to avoid float problems.
end_time = np.round(end_time, 10)
times = np.linspace(0, hop*int(np.floor(end_time/hop)),
int(np.floor(end_time/hop)) + 1)
times = np.round(times, 10)
return times | [
"Generates a time series from 0 to ``end_time`` with times spaced ``hop``\n apart\n\n Parameters\n ----------\n hop : float\n Spacing of samples in the time series\n end_time : float\n Time series will span ``[0, end_time]``\n\n Returns\n -------\n times : np.ndarray\n Generated timebase\n\n "
] |
Please provide a description of the function:def resample_melody_series(times, frequencies, voicing,
times_new, kind='linear'):
# If the timebases are already the same, no need to interpolate
if times.shape == times_new.shape and np.allclose(times, times_new):
return frequencies, voicing.astype(np.bool)
# Warn when the delta between the original times is not constant,
# unless times[0] == 0. and frequencies[0] == frequencies[1] (see logic at
# the beginning of to_cent_voicing)
if not (np.allclose(np.diff(times), np.diff(times).mean()) or
(np.allclose(np.diff(times[1:]), np.diff(times[1:]).mean()) and
frequencies[0] == frequencies[1])):
warnings.warn(
"Non-uniform timescale passed to resample_melody_series. Pitch "
"will be linearly interpolated, which will result in undesirable "
"behavior if silences are indicated by missing values. Silences "
"should be indicated by nonpositive frequency values.")
# Round to avoid floating point problems
times = np.round(times, 10)
times_new = np.round(times_new, 10)
# Add in an additional sample if we'll be asking for a time too large
if times_new.max() > times.max():
times = np.append(times, times_new.max())
frequencies = np.append(frequencies, 0)
voicing = np.append(voicing, 0)
# We need to fix zero transitions if interpolation is not zero or nearest
if kind != 'zero' and kind != 'nearest':
# Fill in zero values with the last reported frequency
# to avoid erroneous values when resampling
frequencies_held = np.array(frequencies)
for n, frequency in enumerate(frequencies[1:]):
if frequency == 0:
frequencies_held[n + 1] = frequencies_held[n]
# Linearly interpolate frequencies
frequencies_resampled = scipy.interpolate.interp1d(times,
frequencies_held,
kind)(times_new)
# Retain zeros
frequency_mask = scipy.interpolate.interp1d(times,
frequencies,
'zero')(times_new)
frequencies_resampled *= (frequency_mask != 0)
else:
frequencies_resampled = scipy.interpolate.interp1d(times,
frequencies,
kind)(times_new)
# Use nearest-neighbor for voicing if it was used for frequencies
if kind == 'nearest':
voicing_resampled = scipy.interpolate.interp1d(times,
voicing,
kind)(times_new)
# otherwise, always use zeroth order
else:
voicing_resampled = scipy.interpolate.interp1d(times,
voicing,
'zero')(times_new)
return frequencies_resampled, voicing_resampled.astype(np.bool) | [
"Resamples frequency and voicing time series to a new timescale. Maintains\n any zero (\"unvoiced\") values in frequencies.\n\n If ``times`` and ``times_new`` are equivalent, no resampling will be\n performed.\n\n Parameters\n ----------\n times : np.ndarray\n Times of each frequency value\n frequencies : np.ndarray\n Array of frequency values, >= 0\n voicing : np.ndarray\n Boolean array which indicates voiced or unvoiced\n times_new : np.ndarray\n Times to resample frequency and voicing sequences to\n kind : str\n kind parameter to pass to scipy.interpolate.interp1d.\n (Default value = 'linear')\n\n Returns\n -------\n frequencies_resampled : np.ndarray\n Frequency array resampled to new timebase\n voicing_resampled : np.ndarray, dtype=bool\n Boolean voicing array resampled to new timebase\n\n "
] |
Please provide a description of the function:def to_cent_voicing(ref_time, ref_freq, est_time, est_freq, base_frequency=10.,
hop=None, kind='linear'):
# Check if missing sample at time 0 and if so add one
if ref_time[0] > 0:
ref_time = np.insert(ref_time, 0, 0)
ref_freq = np.insert(ref_freq, 0, ref_freq[0])
if est_time[0] > 0:
est_time = np.insert(est_time, 0, 0)
est_freq = np.insert(est_freq, 0, est_freq[0])
# Get separated frequency array and voicing boolean array
ref_freq, ref_voicing = freq_to_voicing(ref_freq)
est_freq, est_voicing = freq_to_voicing(est_freq)
# convert both sequences to cents
ref_cent = hz2cents(ref_freq, base_frequency)
est_cent = hz2cents(est_freq, base_frequency)
# If we received a hop, use it to resample both
if hop is not None:
# Resample to common time base
ref_cent, ref_voicing = resample_melody_series(
ref_time, ref_cent, ref_voicing,
constant_hop_timebase(hop, ref_time.max()), kind)
est_cent, est_voicing = resample_melody_series(
est_time, est_cent, est_voicing,
constant_hop_timebase(hop, est_time.max()), kind)
# Otherwise, only resample estimated to the reference time base
else:
est_cent, est_voicing = resample_melody_series(
est_time, est_cent, est_voicing, ref_time, kind)
# ensure the estimated sequence is the same length as the reference
len_diff = ref_cent.shape[0] - est_cent.shape[0]
if len_diff >= 0:
est_cent = np.append(est_cent, np.zeros(len_diff))
est_voicing = np.append(est_voicing, np.zeros(len_diff))
else:
est_cent = est_cent[:ref_cent.shape[0]]
est_voicing = est_voicing[:ref_voicing.shape[0]]
return (ref_voicing.astype(bool), ref_cent,
est_voicing.astype(bool), est_cent) | [
"Converts reference and estimated time/frequency (Hz) annotations to sampled\n frequency (cent)/voicing arrays.\n\n A zero frequency indicates \"unvoiced\".\n\n A negative frequency indicates \"Predicted as unvoiced, but if it's voiced,\n this is the frequency estimate\".\n\n Parameters\n ----------\n ref_time : np.ndarray\n Time of each reference frequency value\n ref_freq : np.ndarray\n Array of reference frequency values\n est_time : np.ndarray\n Time of each estimated frequency value\n est_freq : np.ndarray\n Array of estimated frequency values\n base_frequency : float\n Base frequency in Hz for conversion to cents\n (Default value = 10.)\n hop : float\n Hop size, in seconds, to resample,\n default None which means use ref_time\n kind : str\n kind parameter to pass to scipy.interpolate.interp1d.\n (Default value = 'linear')\n\n Returns\n -------\n ref_voicing : np.ndarray, dtype=bool\n Resampled reference boolean voicing array\n ref_cent : np.ndarray\n Resampled reference frequency (cent) array\n est_voicing : np.ndarray, dtype=bool\n Resampled estimated boolean voicing array\n est_cent : np.ndarray\n Resampled estimated frequency (cent) array\n\n "
] |
Please provide a description of the function:def voicing_measures(ref_voicing, est_voicing):
validate_voicing(ref_voicing, est_voicing)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0:
return 0.
# How voicing is computed
# | ref_v | !ref_v |
# -------|-------|--------|
# est_v | TP | FP |
# -------|-------|------- |
# !est_v | FN | TN |
# -------------------------
TP = (ref_voicing*est_voicing).sum()
FP = ((ref_voicing == 0)*est_voicing).sum()
FN = (ref_voicing*(est_voicing == 0)).sum()
TN = ((ref_voicing == 0)*(est_voicing == 0)).sum()
# Voicing recall = fraction of voiced frames according the reference that
# are declared as voiced by the estimate
if TP + FN == 0:
vx_recall = 0.
else:
vx_recall = TP/float(TP + FN)
# Voicing false alarm = fraction of unvoiced frames according to the
# reference that are declared as voiced by the estimate
if FP + TN == 0:
vx_false_alm = 0.
else:
vx_false_alm = FP/float(FP + TN)
return vx_recall, vx_false_alm | [
"Compute the voicing recall and false alarm rates given two voicing\n indicator sequences, one as reference (truth) and the other as the estimate\n (prediction). The sequences must be of the same length.\n\n Examples\n --------\n >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')\n >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')\n >>> (ref_v, ref_c,\n ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,\n ... ref_freq,\n ... est_time,\n ... est_freq)\n >>> recall, false_alarm = mir_eval.melody.voicing_measures(ref_v,\n ... est_v)\n\n Parameters\n ----------\n ref_voicing : np.ndarray\n Reference boolean voicing array\n est_voicing : np.ndarray\n Estimated boolean voicing array\n\n Returns\n -------\n vx_recall : float\n Voicing recall rate, the fraction of voiced frames in ref\n indicated as voiced in est\n vx_false_alarm : float\n Voicing false alarm rate, the fraction of unvoiced frames in ref\n indicated as voiced in est\n\n "
] |
Please provide a description of the function:def raw_pitch_accuracy(ref_voicing, ref_cent, est_voicing, est_cent,
cent_tolerance=50):
validate_voicing(ref_voicing, est_voicing)
validate(ref_voicing, ref_cent, est_voicing, est_cent)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0 \
or ref_cent.size == 0 or est_cent.size == 0:
return 0.
# If there are no voiced frames in reference, metric is 0
if ref_voicing.sum() == 0:
return 0.
# Raw pitch = the number of voiced frames in the reference for which the
# estimate provides a correct frequency value (within cent_tolerance cents)
# NB: voicing estimation is ignored in this measure
matching_voicing = ref_voicing * (est_cent > 0)
cent_diff = np.abs(ref_cent - est_cent)[matching_voicing]
frame_correct = (cent_diff < cent_tolerance)
raw_pitch = (frame_correct).sum()/float(ref_voicing.sum())
return raw_pitch | [
"Compute the raw pitch accuracy given two pitch (frequency) sequences in\n cents and matching voicing indicator sequences. The first pitch and voicing\n arrays are treated as the reference (truth), and the second two as the\n estimate (prediction). All 4 sequences must be of the same length.\n\n Examples\n --------\n >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')\n >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')\n >>> (ref_v, ref_c,\n ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,\n ... ref_freq,\n ... est_time,\n ... est_freq)\n >>> raw_pitch = mir_eval.melody.raw_pitch_accuracy(ref_v, ref_c,\n ... est_v, est_c)\n\n Parameters\n ----------\n ref_voicing : np.ndarray\n Reference boolean voicing array\n ref_cent : np.ndarray\n Reference pitch sequence in cents\n est_voicing : np.ndarray\n Estimated boolean voicing array\n est_cent : np.ndarray\n Estimate pitch sequence in cents\n cent_tolerance : float\n Maximum absolute deviation for a cent value to be considerd correct\n (Default value = 50)\n\n Returns\n -------\n raw_pitch : float\n Raw pitch accuracy, the fraction of voiced frames in ref_cent for\n which est_cent provides a correct frequency values\n (within cent_tolerance cents).\n\n "
] |
Please provide a description of the function:def raw_chroma_accuracy(ref_voicing, ref_cent, est_voicing, est_cent,
cent_tolerance=50):
validate_voicing(ref_voicing, est_voicing)
validate(ref_voicing, ref_cent, est_voicing, est_cent)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0 \
or ref_cent.size == 0 or est_cent.size == 0:
return 0.
# If there are no voiced frames in reference, metric is 0
if ref_voicing.sum() == 0:
return 0.
# Raw chroma = same as raw pitch except that octave errors are ignored.
cent_diff = np.abs(ref_cent - est_cent)
octave = 1200*np.floor(cent_diff/1200.0 + 0.5)
matching_voicing = ref_voicing * (est_cent > 0)
cent_diff = np.abs(cent_diff - octave)[matching_voicing]
frame_correct = (cent_diff < cent_tolerance)
n_voiced = float(ref_voicing.sum())
raw_chroma = (frame_correct).sum()/n_voiced
return raw_chroma | [
"Compute the raw chroma accuracy given two pitch (frequency) sequences\n in cents and matching voicing indicator sequences. The first pitch and\n voicing arrays are treated as the reference (truth), and the second two as\n the estimate (prediction). All 4 sequences must be of the same length.\n\n\n Examples\n --------\n >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')\n >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')\n >>> (ref_v, ref_c,\n ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,\n ... ref_freq,\n ... est_time,\n ... est_freq)\n >>> raw_chroma = mir_eval.melody.raw_chroma_accuracy(ref_v, ref_c,\n ... est_v, est_c)\n\n\n Parameters\n ----------\n ref_voicing : np.ndarray\n Reference boolean voicing array\n ref_cent : np.ndarray\n Reference pitch sequence in cents\n est_voicing : np.ndarray\n Estimated boolean voicing array\n est_cent : np.ndarray\n Estimate pitch sequence in cents\n cent_tolerance : float\n Maximum absolute deviation for a cent value to be considered correct\n (Default value = 50)\n\n\n Returns\n -------\n raw_chroma : float\n Raw chroma accuracy, the fraction of voiced frames in ref_cent for\n which est_cent provides a correct frequency values (within\n cent_tolerance cents), ignoring octave errors\n\n\n References\n ----------\n .. [#] J. Salamon, E. Gomez, D. P. W. Ellis and G. Richard, \"Melody\n Extraction from Polyphonic Music Signals: Approaches, Applications\n and Challenges\", IEEE Signal Processing Magazine, 31(2):118-134,\n Mar. 2014.\n\n\n .. [#] G. E. Poliner, D. P. W. Ellis, A. F. Ehmann, E. Gomez, S.\n Streich, and B. Ong. \"Melody transcription from music audio:\n Approaches and evaluation\", IEEE Transactions on Audio, Speech, and\n Language Processing, 15(4):1247-1256, 2007.\n\n "
] |
Please provide a description of the function:def overall_accuracy(ref_voicing, ref_cent, est_voicing, est_cent,
cent_tolerance=50):
validate_voicing(ref_voicing, est_voicing)
validate(ref_voicing, ref_cent, est_voicing, est_cent)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0 \
or ref_cent.size == 0 or est_cent.size == 0:
return 0.
# True negatives = frames correctly estimates as unvoiced
TN = ((ref_voicing == 0)*(est_voicing == 0)).sum()
cent_diff = np.abs(ref_cent - est_cent)
frame_correct = (cent_diff[ref_voicing*est_voicing] < cent_tolerance)
accuracy = (frame_correct.sum() + TN)/float(ref_cent.shape[0])
return accuracy | [
"Compute the overall accuracy given two pitch (frequency) sequences in cents\n and matching voicing indicator sequences. The first pitch and voicing\n arrays are treated as the reference (truth), and the second two as the\n estimate (prediction). All 4 sequences must be of the same length.\n\n Examples\n --------\n >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')\n >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')\n >>> (ref_v, ref_c,\n ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,\n ... ref_freq,\n ... est_time,\n ... est_freq)\n >>> overall_accuracy = mir_eval.melody.overall_accuracy(ref_v, ref_c,\n ... est_v, est_c)\n\n Parameters\n ----------\n ref_voicing : np.ndarray\n Reference boolean voicing array\n ref_cent : np.ndarray\n Reference pitch sequence in cents\n est_voicing : np.ndarray\n Estimated boolean voicing array\n est_cent : np.ndarray\n Estimate pitch sequence in cents\n cent_tolerance : float\n Maximum absolute deviation for a cent value to be considered correct\n (Default value = 50)\n\n Returns\n -------\n overall_accuracy : float\n Overall accuracy, the total fraction of correctly estimates frames,\n where provides a correct frequency values (within cent_tolerance\n cents).\n\n "
] |
Please provide a description of the function:def evaluate(ref_time, ref_freq, est_time, est_freq, **kwargs):
# Convert to reference/estimated voicing/frequency (cent) arrays
(ref_voicing, ref_cent,
est_voicing, est_cent) = util.filter_kwargs(
to_cent_voicing, ref_time, ref_freq, est_time, est_freq, **kwargs)
# Compute metrics
scores = collections.OrderedDict()
(scores['Voicing Recall'],
scores['Voicing False Alarm']) = util.filter_kwargs(voicing_measures,
ref_voicing,
est_voicing, **kwargs)
scores['Raw Pitch Accuracy'] = util.filter_kwargs(raw_pitch_accuracy,
ref_voicing, ref_cent,
est_voicing, est_cent,
**kwargs)
scores['Raw Chroma Accuracy'] = util.filter_kwargs(raw_chroma_accuracy,
ref_voicing, ref_cent,
est_voicing, est_cent,
**kwargs)
scores['Overall Accuracy'] = util.filter_kwargs(overall_accuracy,
ref_voicing, ref_cent,
est_voicing, est_cent,
**kwargs)
return scores | [
"Evaluate two melody (predominant f0) transcriptions, where the first is\n treated as the reference (ground truth) and the second as the estimate to\n be evaluated (prediction).\n\n Examples\n --------\n >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')\n >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')\n >>> scores = mir_eval.melody.evaluate(ref_time, ref_freq,\n ... est_time, est_freq)\n\n Parameters\n ----------\n ref_time : np.ndarray\n Time of each reference frequency value\n ref_freq : np.ndarray\n Array of reference frequency values\n est_time : np.ndarray\n Time of each estimated frequency value\n est_freq : np.ndarray\n Array of estimated frequency values\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n "
] |
Please provide a description of the function:def validate_boundary(reference_intervals, estimated_intervals, trim):
if trim:
# If we're trimming, then we need at least 2 intervals
min_size = 2
else:
# If we're not trimming, then we only need one interval
min_size = 1
if len(reference_intervals) < min_size:
warnings.warn("Reference intervals are empty.")
if len(estimated_intervals) < min_size:
warnings.warn("Estimated intervals are empty.")
for intervals in [reference_intervals, estimated_intervals]:
util.validate_intervals(intervals) | [
"Checks that the input annotations to a segment boundary estimation\n metric (i.e. one that only takes in segment intervals) look like valid\n segment times, and throws helpful errors if not.\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n\n trim : bool\n will the start and end events be trimmed?\n\n "
] |
Please provide a description of the function:def validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels):
for (intervals, labels) in [(reference_intervals, reference_labels),
(estimated_intervals, estimated_labels)]:
util.validate_intervals(intervals)
if intervals.shape[0] != len(labels):
raise ValueError('Number of intervals does not match number '
'of labels')
# Check only when intervals are non-empty
if intervals.size > 0:
# Make sure intervals start at 0
if not np.allclose(intervals.min(), 0.0):
raise ValueError('Segment intervals do not start at 0')
if reference_intervals.size == 0:
warnings.warn("Reference intervals are empty.")
if estimated_intervals.size == 0:
warnings.warn("Estimated intervals are empty.")
# Check only when intervals are non-empty
if reference_intervals.size > 0 and estimated_intervals.size > 0:
if not np.allclose(reference_intervals.max(),
estimated_intervals.max()):
raise ValueError('End times do not match') | [
"Checks that the input annotations to a structure estimation metric (i.e.\n one that takes in both segment boundaries and their labels) look like valid\n segment times and labels, and throws helpful errors if not.\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n "
] |
Please provide a description of the function:def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure | [
"Boundary detection hit-rate.\n\n A hit is counted whenever an reference boundary is within ``window`` of a\n estimated boundary. Note that each boundary is matched at most once: this\n is achieved by computing the size of a maximal matching between reference\n and estimated boundary points, subject to the window constraint.\n\n Examples\n --------\n >>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # With 0.5s windowing\n >>> P05, R05, F05 = mir_eval.segment.detection(ref_intervals,\n ... est_intervals,\n ... window=0.5)\n >>> # With 3s windowing\n >>> P3, R3, F3 = mir_eval.segment.detection(ref_intervals,\n ... est_intervals,\n ... window=3)\n >>> # Ignoring hits for the beginning and end of track\n >>> P, R, F = mir_eval.segment.detection(ref_intervals,\n ... est_intervals,\n ... window=0.5,\n ... trim=True)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n window : float > 0\n size of the window of 'correctness' around ground-truth beats\n (in seconds)\n (Default value = 0.5)\n beta : float > 0\n weighting constant for F-measure.\n (Default value = 1.0)\n trim : boolean\n if ``True``, the first and last boundary times are ignored.\n Typically, these denote start (0) and end-markers.\n (Default value = False)\n\n Returns\n -------\n precision : float\n precision of estimated predictions\n recall : float\n recall of reference reference boundaries\n f_measure : float\n F-measure (weighted harmonic mean of ``precision`` and ``recall``)\n\n "
] |
Please provide a description of the function:def deviation(reference_intervals, estimated_intervals, trim=False):
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference | [
"Compute the median deviations between reference\n and estimated boundary times.\n\n Examples\n --------\n >>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')\n >>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,\n ... est_intervals)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_intervals` or\n :func:`mir_eval.io.load_labeled_intervals`.\n trim : boolean\n if ``True``, the first and last intervals are ignored.\n Typically, these denote start (0.0) and end-of-track markers.\n (Default value = False)\n\n Returns\n -------\n reference_to_estimated : float\n median time from each reference boundary to the\n closest estimated boundary\n estimated_to_reference : float\n median time from each estimated boundary to the\n closest reference boundary\n\n "
] |
Please provide a description of the function:def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure | [
"Frame-clustering segmentation evaluation by pair-wise agreement.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # Trim or pad the estimate to match reference timing\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,\n ... ref_labels,\n ... t_min=0)\n >>> (est_intervals,\n ... est_labels) = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())\n >>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,\n ... ref_labels,\n ... est_intervals,\n ... est_labels)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n frame_size : float > 0\n length (in seconds) of frames for clustering\n (Default value = 0.1)\n beta : float > 0\n beta value for F-measure\n (Default value = 1.0)\n\n Returns\n -------\n precision : float > 0\n Precision of detecting whether frames belong in the same cluster\n recall : float > 0\n Recall of detecting whether frames belong in the same cluster\n f : float > 0\n F-measure of detecting whether frames belong in the same cluster\n\n "
] |
Please provide a description of the function:def rand_index(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
# Find where they agree
matches_pos = np.logical_and(agree_ref, agree_est)
# Find where they disagree
matches_neg = np.logical_and(~agree_ref, ~agree_est)
n_pairs = len(y_ref) * (len(y_ref) - 1) / 2.0
n_matches_pos = (matches_pos.sum() - len(y_ref)) / 2.0
n_matches_neg = matches_neg.sum() / 2.0
rand = (n_matches_pos + n_matches_neg) / n_pairs
return rand | [
"(Non-adjusted) Rand index.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # Trim or pad the estimate to match reference timing\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,\n ... ref_labels,\n ... t_min=0)\n >>> (est_intervals,\n ... est_labels) = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())\n >>> rand_index = mir_eval.structure.rand_index(ref_intervals,\n ... ref_labels,\n ... est_intervals,\n ... est_labels)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n frame_size : float > 0\n length (in seconds) of frames for clustering\n (Default value = 0.1)\n beta : float > 0\n beta value for F-measure\n (Default value = 1.0)\n\n Returns\n -------\n rand_index : float > 0\n Rand index\n\n "
] |
Please provide a description of the function:def _contingency_matrix(reference_indices, estimated_indices):
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray() | [
"Computes the contingency matrix of a true labeling vs an estimated one.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n estimated_indices : np.ndarray\n Array of estimated indices\n\n Returns\n -------\n contingency_matrix : np.ndarray\n Contingency matrix, shape=(#reference indices, #estimated indices)\n .. note:: Based on sklearn.metrics.cluster.contingency_matrix\n\n "
] |
Please provide a description of the function:def _adjusted_rand_index(reference_indices, estimated_indices):
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.special.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.special.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.special.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.special.comb(n_samples,
2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return (sum_comb - prod_comb)/(mean_comb - prod_comb) | [
"Compute the Rand index, adjusted for change.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n estimated_indices : np.ndarray\n Array of estimated indices\n\n Returns\n -------\n ari : float\n Adjusted Rand index\n\n .. note:: Based on sklearn.metrics.cluster.adjusted_rand_score\n\n "
] |
Please provide a description of the function:def ari(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
return _adjusted_rand_index(y_ref, y_est) | [
"Adjusted Rand Index (ARI) for frame clustering segmentation evaluation.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # Trim or pad the estimate to match reference timing\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,\n ... ref_labels,\n ... t_min=0)\n >>> (est_intervals,\n ... est_labels) = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())\n >>> ari_score = mir_eval.structure.ari(ref_intervals, ref_labels,\n ... est_intervals, est_labels)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n frame_size : float > 0\n length (in seconds) of frames for clustering\n (Default value = 0.1)\n\n Returns\n -------\n ari_score : float > 0\n Adjusted Rand index between segmentations.\n\n "
] |
Please provide a description of the function:def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum() | [
"Compute the mutual information between two sequence labelings.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n estimated_indices : np.ndarray\n Array of estimated indices\n contingency : np.ndarray\n Pre-computed contingency matrix. If None, one will be computed.\n (Default value = None)\n\n Returns\n -------\n mi : float\n Mutual information\n\n .. note:: Based on sklearn.metrics.cluster.mutual_info_score\n\n "
] |
Please provide a description of the function:def _entropy(labels):
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum))) | [
"Calculates the entropy for a labeling.\n\n Parameters\n ----------\n labels : list-like\n List of labels.\n\n Returns\n -------\n entropy : float\n Entropy of the labeling.\n\n .. note:: Based on sklearn.metrics.cluster.entropy\n\n "
] |
Please provide a description of the function:def _adjusted_mutual_info_score(reference_indices, estimated_indices):
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# The following code is based on
# sklearn.metrics.cluster.expected_mutual_information
R, C = contingency.shape
N = float(n_samples)
a = np.sum(contingency, axis=1).astype(np.int32)
b = np.sum(contingency, axis=0).astype(np.int32)
# There are three major terms to the EMI equation, which are multiplied to
# and then summed over varying nij values.
# While nijs[0] will never be used, having it simplifies the indexing.
nijs = np.arange(0, max(np.max(a), np.max(b)) + 1, dtype='float')
# Stops divide by zero warnings. As its not used, no issue.
nijs[0] = 1
# term1 is nij / N
term1 = nijs / N
# term2 is log((N*nij) / (a * b)) == log(N * nij) - log(a * b)
# term2 uses the outer product
log_ab_outer = np.log(np.outer(a, b))
# term2 uses N * nij
log_Nnij = np.log(N * nijs)
# term3 is large, and involved many factorials. Calculate these in log
# space to stop overflows.
gln_a = scipy.special.gammaln(a + 1)
gln_b = scipy.special.gammaln(b + 1)
gln_Na = scipy.special.gammaln(N - a + 1)
gln_Nb = scipy.special.gammaln(N - b + 1)
gln_N = scipy.special.gammaln(N + 1)
gln_nij = scipy.special.gammaln(nijs + 1)
# start and end values for nij terms for each summation.
start = np.array([[v - N + w for w in b] for v in a], dtype='int')
start = np.maximum(start, 1)
end = np.minimum(np.resize(a, (C, R)).T, np.resize(b, (R, C))) + 1
# emi itself is a summation over the various values.
emi = 0
for i in range(R):
for j in range(C):
for nij in range(start[i, j], end[i, j]):
term2 = log_Nnij[nij] - log_ab_outer[i, j]
# Numerators are positive, denominators are negative.
gln = (gln_a[i] + gln_b[j] + gln_Na[i] + gln_Nb[j] -
gln_N - gln_nij[nij] -
scipy.special.gammaln(a[i] - nij + 1) -
scipy.special.gammaln(b[j] - nij + 1) -
scipy.special.gammaln(N - a[i] - b[j] + nij + 1))
term3 = np.exp(gln)
emi += (term1[nij] * term2 * term3)
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami | [
"Compute the mutual information between two sequence labelings, adjusted for\n chance.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n\n estimated_indices : np.ndarray\n Array of estimated indices\n\n Returns\n -------\n ami : float <= 1.0\n Mutual information\n\n .. note:: Based on sklearn.metrics.cluster.adjusted_mutual_info_score\n and sklearn.metrics.cluster.expected_mutual_info_score\n\n "
] |
Please provide a description of the function:def _normalized_mutual_info_score(reference_indices, estimated_indices):
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi | [
"Compute the mutual information between two sequence labelings, adjusted for\n chance.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n\n estimated_indices : np.ndarray\n Array of estimated indices\n\n Returns\n -------\n nmi : float <= 1.0\n Normalized mutual information\n\n .. note:: Based on sklearn.metrics.cluster.normalized_mutual_info_score\n\n "
] |
Please provide a description of the function:def mutual_information(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Mutual information
mutual_info = _mutual_info_score(y_ref, y_est)
# Adjusted mutual information
adj_mutual_info = _adjusted_mutual_info_score(y_ref, y_est)
# Normalized mutual information
norm_mutual_info = _normalized_mutual_info_score(y_ref, y_est)
return mutual_info, adj_mutual_info, norm_mutual_info | [
"Frame-clustering segmentation: mutual information metrics.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # Trim or pad the estimate to match reference timing\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,\n ... ref_labels,\n ... t_min=0)\n >>> (est_intervals,\n ... est_labels) = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())\n >>> mi, ami, nmi = mir_eval.structure.mutual_information(ref_intervals,\n ... ref_labels,\n ... est_intervals,\n ... est_labels)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n frame_size : float > 0\n length (in seconds) of frames for clustering\n (Default value = 0.1)\n\n Returns\n -------\n MI : float > 0\n Mutual information between segmentations\n AMI : float\n Adjusted mutual information between segmentations.\n NMI : float > 0\n Normalize mutual information between segmentations\n\n "
] |
Please provide a description of the function:def nce(reference_intervals, reference_labels, estimated_intervals,
estimated_labels, frame_size=0.1, beta=1.0, marginal=False):
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Make the contingency table: shape = (n_ref, n_est)
contingency = _contingency_matrix(y_ref, y_est).astype(float)
# Normalize by the number of frames
contingency = contingency / len(y_ref)
# Compute the marginals
p_est = contingency.sum(axis=0)
p_ref = contingency.sum(axis=1)
# H(true | prediction) = sum_j P[estimated = j] *
# sum_i P[true = i | estimated = j] log P[true = i | estimated = j]
# entropy sums over axis=0, which is true labels
true_given_est = p_est.dot(scipy.stats.entropy(contingency, base=2))
pred_given_ref = p_ref.dot(scipy.stats.entropy(contingency.T, base=2))
if marginal:
# Normalize conditional entropy by marginal entropy
z_ref = scipy.stats.entropy(p_ref, base=2)
z_est = scipy.stats.entropy(p_est, base=2)
else:
z_ref = np.log2(contingency.shape[0])
z_est = np.log2(contingency.shape[1])
score_under = 0.0
if z_ref > 0:
score_under = 1. - true_given_est / z_ref
score_over = 0.0
if z_est > 0:
score_over = 1. - pred_given_ref / z_est
f_measure = util.f_measure(score_over, score_under, beta=beta)
return score_over, score_under, f_measure | [
"Frame-clustering segmentation: normalized conditional entropy\n\n Computes cross-entropy of cluster assignment, normalized by the\n max-entropy.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # Trim or pad the estimate to match reference timing\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,\n ... ref_labels,\n ... t_min=0)\n >>> (est_intervals,\n ... est_labels) = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())\n >>> S_over, S_under, S_F = mir_eval.structure.nce(ref_intervals,\n ... ref_labels,\n ... est_intervals,\n ... est_labels)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n frame_size : float > 0\n length (in seconds) of frames for clustering\n (Default value = 0.1)\n beta : float > 0\n beta for F-measure\n (Default value = 1.0)\n\n marginal : bool\n If `False`, normalize conditional entropy by uniform entropy.\n If `True`, normalize conditional entropy by the marginal entropy.\n (Default value = False)\n\n Returns\n -------\n S_over\n Over-clustering score:\n\n - For `marginal=False`, ``1 - H(y_est | y_ref) / log(|y_est|)``\n\n - For `marginal=True`, ``1 - H(y_est | y_ref) / H(y_est)``\n\n If `|y_est|==1`, then `S_over` will be 0.\n\n S_under\n Under-clustering score:\n\n - For `marginal=False`, ``1 - H(y_ref | y_est) / log(|y_ref|)``\n\n - For `marginal=True`, ``1 - H(y_ref | y_est) / H(y_ref)``\n\n If `|y_ref|==1`, then `S_under` will be 0.\n\n S_F\n F-measure for (S_over, S_under)\n\n "
] |
Please provide a description of the function:def vmeasure(reference_intervals, reference_labels, estimated_intervals,
estimated_labels, frame_size=0.1, beta=1.0):
return nce(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=frame_size, beta=beta,
marginal=True) | [
"Frame-clustering segmentation: v-measure\n\n Computes cross-entropy of cluster assignment, normalized by the\n marginal-entropy.\n\n This is equivalent to `nce(..., marginal=True)`.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> # Trim or pad the estimate to match reference timing\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,\n ... ref_labels,\n ... t_min=0)\n >>> (est_intervals,\n ... est_labels) = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())\n >>> V_precision, V_recall, V_F = mir_eval.structure.vmeasure(ref_intervals,\n ... ref_labels,\n ... est_intervals,\n ... est_labels)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n reference_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n estimated_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n frame_size : float > 0\n length (in seconds) of frames for clustering\n (Default value = 0.1)\n beta : float > 0\n beta for F-measure\n (Default value = 1.0)\n\n Returns\n -------\n V_precision\n Over-clustering score:\n ``1 - H(y_est | y_ref) / H(y_est)``\n\n If `|y_est|==1`, then `V_precision` will be 0.\n\n V_recall\n Under-clustering score:\n ``1 - H(y_ref | y_est) / H(y_ref)``\n\n If `|y_ref|==1`, then `V_recall` will be 0.\n\n V_F\n F-measure for (V_precision, V_recall)\n\n "
] |
Please provide a description of the function:def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
# Adjust timespan of estimations relative to ground truth
ref_intervals, ref_labels = \
util.adjust_intervals(ref_intervals, labels=ref_labels, t_min=0.0)
est_intervals, est_labels = \
util.adjust_intervals(est_intervals, labels=est_labels, t_min=0.0,
t_max=ref_intervals.max())
# Now compute all the metrics
scores = collections.OrderedDict()
# Boundary detection
# Force these values for window
kwargs['window'] = .5
scores['[email protected]'], scores['[email protected]'], scores['[email protected]'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
kwargs['window'] = 3.0
scores['[email protected]'], scores['[email protected]'], scores['[email protected]'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
# Boundary deviation
scores['Ref-to-est deviation'], scores['Est-to-ref deviation'] = \
util.filter_kwargs(deviation, ref_intervals, est_intervals, **kwargs)
# Pairwise clustering
(scores['Pairwise Precision'],
scores['Pairwise Recall'],
scores['Pairwise F-measure']) = util.filter_kwargs(pairwise,
ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Rand index
scores['Rand Index'] = util.filter_kwargs(rand_index, ref_intervals,
ref_labels, est_intervals,
est_labels, **kwargs)
# Adjusted rand index
scores['Adjusted Rand Index'] = util.filter_kwargs(ari, ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Mutual information metrics
(scores['Mutual Information'],
scores['Adjusted Mutual Information'],
scores['Normalized Mutual Information']) = \
util.filter_kwargs(mutual_information, ref_intervals, ref_labels,
est_intervals, est_labels, **kwargs)
# Conditional entropy metrics
scores['NCE Over'], scores['NCE Under'], scores['NCE F-measure'] = \
util.filter_kwargs(nce, ref_intervals, ref_labels, est_intervals,
est_labels, **kwargs)
# V-measure metrics
scores['V Precision'], scores['V Recall'], scores['V-measure'] = \
util.filter_kwargs(vmeasure, ref_intervals, ref_labels, est_intervals,
est_labels, **kwargs)
return scores | [
"Compute all metrics for the given reference and estimated annotations.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> scores = mir_eval.segment.evaluate(ref_intervals, ref_labels,\n ... est_intervals, est_labels)\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n, 2)\n reference segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n ref_labels : list, shape=(n,)\n reference segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n est_intervals : np.ndarray, shape=(m, 2)\n estimated segment intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n est_labels : list, shape=(m,)\n estimated segment labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n "
] |
Please provide a description of the function:def validate_tempi(tempi, reference=True):
if tempi.size != 2:
raise ValueError('tempi must have exactly two values')
if not np.all(np.isfinite(tempi)) or np.any(tempi < 0):
raise ValueError('tempi={} must be non-negative numbers'.format(tempi))
if reference and np.all(tempi == 0):
raise ValueError('reference tempi={} must have one'
' value greater than zero'.format(tempi)) | [
"Checks that there are two non-negative tempi.\n For a reference value, at least one tempo has to be greater than zero.\n\n Parameters\n ----------\n tempi : np.ndarray\n length-2 array of tempo, in bpm\n\n reference : bool\n indicates a reference value\n\n "
] |
Please provide a description of the function:def validate(reference_tempi, reference_weight, estimated_tempi):
validate_tempi(reference_tempi, reference=True)
validate_tempi(estimated_tempi, reference=False)
if reference_weight < 0 or reference_weight > 1:
raise ValueError('Reference weight must lie in range [0, 1]') | [
"Checks that the input annotations to a metric look like valid tempo\n annotations.\n\n Parameters\n ----------\n reference_tempi : np.ndarray\n reference tempo values, in bpm\n\n reference_weight : float\n perceptual weight of slow vs fast in reference\n\n estimated_tempi : np.ndarray\n estimated tempo values, in bpm\n\n "
] |
Please provide a description of the function:def detection(reference_tempi, reference_weight, estimated_tempi, tol=0.08):
validate(reference_tempi, reference_weight, estimated_tempi)
if tol < 0 or tol > 1:
raise ValueError('invalid tolerance {}: must lie in the range '
'[0, 1]'.format(tol))
if tol == 0.:
warnings.warn('A tolerance of 0.0 may not '
'lead to the results you expect.')
hits = [False, False]
for i, ref_t in enumerate(reference_tempi):
if ref_t > 0:
# Compute the relative error for this reference tempo
f_ref_t = float(ref_t)
relative_error = np.min(np.abs(ref_t - estimated_tempi) / f_ref_t)
# Count the hits
hits[i] = relative_error <= tol
p_score = reference_weight * hits[0] + (1.0-reference_weight) * hits[1]
one_correct = bool(np.max(hits))
both_correct = bool(np.min(hits))
return p_score, one_correct, both_correct | [
"Compute the tempo detection accuracy metric.\n\n Parameters\n ----------\n reference_tempi : np.ndarray, shape=(2,)\n Two non-negative reference tempi\n\n reference_weight : float > 0\n The relative strength of ``reference_tempi[0]`` vs\n ``reference_tempi[1]``.\n\n estimated_tempi : np.ndarray, shape=(2,)\n Two non-negative estimated tempi.\n\n tol : float in [0, 1]:\n The maximum allowable deviation from a reference tempo to\n count as a hit.\n ``|est_t - ref_t| <= tol * ref_t``\n (Default value = 0.08)\n\n Returns\n -------\n p_score : float in [0, 1]\n Weighted average of recalls:\n ``reference_weight * hits[0] + (1 - reference_weight) * hits[1]``\n\n one_correct : bool\n True if at least one reference tempo was correctly estimated\n\n both_correct : bool\n True if both reference tempi were correctly estimated\n\n Raises\n ------\n ValueError\n If the input tempi are ill-formed\n\n If the reference weight is not in the range [0, 1]\n\n If ``tol < 0`` or ``tol > 1``.\n "
] |
Please provide a description of the function:def evaluate(reference_tempi, reference_weight, estimated_tempi, **kwargs):
# Compute all metrics
scores = collections.OrderedDict()
(scores['P-score'],
scores['One-correct'],
scores['Both-correct']) = util.filter_kwargs(detection, reference_tempi,
reference_weight,
estimated_tempi,
**kwargs)
return scores | [
"Compute all metrics for the given reference and estimated annotations.\n\n Parameters\n ----------\n reference_tempi : np.ndarray, shape=(2,)\n Two non-negative reference tempi\n\n reference_weight : float > 0\n The relative strength of ``reference_tempi[0]`` vs\n ``reference_tempi[1]``.\n\n estimated_tempi : np.ndarray, shape=(2,)\n Two non-negative estimated tempi.\n\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n "
] |
Please provide a description of the function:def validate(ref_time, ref_freqs, est_time, est_freqs):
util.validate_events(ref_time, max_time=MAX_TIME)
util.validate_events(est_time, max_time=MAX_TIME)
if ref_time.size == 0:
warnings.warn("Reference times are empty.")
if ref_time.ndim != 1:
raise ValueError("Reference times have invalid dimension")
if len(ref_freqs) == 0:
warnings.warn("Reference frequencies are empty.")
if est_time.size == 0:
warnings.warn("Estimated times are empty.")
if est_time.ndim != 1:
raise ValueError("Estimated times have invalid dimension")
if len(est_freqs) == 0:
warnings.warn("Estimated frequencies are empty.")
if ref_time.size != len(ref_freqs):
raise ValueError('Reference times and frequencies have unequal '
'lengths.')
if est_time.size != len(est_freqs):
raise ValueError('Estimate times and frequencies have unequal '
'lengths.')
for freq in ref_freqs:
util.validate_frequencies(freq, max_freq=MAX_FREQ, min_freq=MIN_FREQ,
allow_negatives=False)
for freq in est_freqs:
util.validate_frequencies(freq, max_freq=MAX_FREQ, min_freq=MIN_FREQ,
allow_negatives=False) | [
"Checks that the time and frequency inputs are well-formed.\n\n Parameters\n ----------\n ref_time : np.ndarray\n reference time stamps in seconds\n ref_freqs : list of np.ndarray\n reference frequencies in Hz\n est_time : np.ndarray\n estimate time stamps in seconds\n est_freqs : list of np.ndarray\n estimated frequencies in Hz\n\n "
] |
Please provide a description of the function:def resample_multipitch(times, frequencies, target_times):
if target_times.size == 0:
return []
if times.size == 0:
return [np.array([])]*len(target_times)
n_times = len(frequencies)
# scipy's interpolate doesn't handle ragged arrays. Instead, we interpolate
# the frequency index and then map back to the frequency values.
# This only works because we're using a nearest neighbor interpolator!
frequency_index = np.arange(0, n_times)
# times are already ordered so assume_sorted=True for efficiency
# since we're interpolating the index, fill_value is set to the first index
# that is out of range. We handle this in the next line.
new_frequency_index = scipy.interpolate.interp1d(
times, frequency_index, kind='nearest', bounds_error=False,
assume_sorted=True, fill_value=n_times)(target_times)
# create array of frequencies plus additional empty element at the end for
# target time stamps that are out of the interpolation range
freq_vals = frequencies + [np.array([])]
# map interpolated indices back to frequency values
frequencies_resampled = [
freq_vals[i] for i in new_frequency_index.astype(int)]
return frequencies_resampled | [
"Resamples multipitch time series to a new timescale. Values in\n ``target_times`` outside the range of ``times`` return no pitch estimate.\n\n Parameters\n ----------\n times : np.ndarray\n Array of time stamps\n frequencies : list of np.ndarray\n List of np.ndarrays of frequency values\n target_times : np.ndarray\n Array of target time stamps\n\n Returns\n -------\n frequencies_resampled : list of numpy arrays\n Frequency list of lists resampled to new timebase\n "
] |
Please provide a description of the function:def compute_num_true_positives(ref_freqs, est_freqs, window=0.5, chroma=False):
n_frames = len(ref_freqs)
true_positives = np.zeros((n_frames, ))
for i, (ref_frame, est_frame) in enumerate(zip(ref_freqs, est_freqs)):
if chroma:
# match chroma-wrapped frequency events
matching = util.match_events(
ref_frame, est_frame, window,
distance=util._outer_distance_mod_n)
else:
# match frequency events within tolerance window in semitones
matching = util.match_events(ref_frame, est_frame, window)
true_positives[i] = len(matching)
return true_positives | [
"Compute the number of true positives in an estimate given a reference.\n A frequency is correct if it is within a quartertone of the\n correct frequency.\n\n Parameters\n ----------\n ref_freqs : list of np.ndarray\n reference frequencies (MIDI)\n est_freqs : list of np.ndarray\n estimated frequencies (MIDI)\n window : float\n Window size, in semitones\n chroma : bool\n If True, computes distances modulo n.\n If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n.\n\n Returns\n -------\n true_positives : np.ndarray\n Array the same length as ref_freqs containing the number of true\n positives.\n\n "
] |
Please provide a description of the function:def compute_accuracy(true_positives, n_ref, n_est):
true_positive_sum = float(true_positives.sum())
n_est_sum = n_est.sum()
if n_est_sum > 0:
precision = true_positive_sum/n_est.sum()
else:
warnings.warn("Estimate frequencies are all empty.")
precision = 0.0
n_ref_sum = n_ref.sum()
if n_ref_sum > 0:
recall = true_positive_sum/n_ref.sum()
else:
warnings.warn("Reference frequencies are all empty.")
recall = 0.0
acc_denom = (n_est + n_ref - true_positives).sum()
if acc_denom > 0:
acc = true_positive_sum/acc_denom
else:
acc = 0.0
return precision, recall, acc | [
"Compute accuracy metrics.\n\n Parameters\n ----------\n true_positives : np.ndarray\n Array containing the number of true positives at each time point.\n n_ref : np.ndarray\n Array containing the number of reference frequencies at each time\n point.\n n_est : np.ndarray\n Array containing the number of estimate frequencies at each time point.\n\n Returns\n -------\n precision : float\n ``sum(true_positives)/sum(n_est)``\n recall : float\n ``sum(true_positives)/sum(n_ref)``\n acc : float\n ``sum(true_positives)/sum(n_est + n_ref - true_positives)``\n\n "
] |
Please provide a description of the function:def compute_err_score(true_positives, n_ref, n_est):
n_ref_sum = float(n_ref.sum())
if n_ref_sum == 0:
warnings.warn("Reference frequencies are all empty.")
return 0., 0., 0., 0.
# Substitution error
e_sub = (np.min([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum
# compute the max of (n_ref - n_est) and 0
e_miss_numerator = n_ref - n_est
e_miss_numerator[e_miss_numerator < 0] = 0
# Miss error
e_miss = e_miss_numerator.sum()/n_ref_sum
# compute the max of (n_est - n_ref) and 0
e_fa_numerator = n_est - n_ref
e_fa_numerator[e_fa_numerator < 0] = 0
# False alarm error
e_fa = e_fa_numerator.sum()/n_ref_sum
# total error
e_tot = (np.max([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum
return e_sub, e_miss, e_fa, e_tot | [
"Compute error score metrics.\n\n Parameters\n ----------\n true_positives : np.ndarray\n Array containing the number of true positives at each time point.\n n_ref : np.ndarray\n Array containing the number of reference frequencies at each time\n point.\n n_est : np.ndarray\n Array containing the number of estimate frequencies at each time point.\n\n Returns\n -------\n e_sub : float\n Substitution error\n e_miss : float\n Miss error\n e_fa : float\n False alarm error\n e_tot : float\n Total error\n\n "
] |
Please provide a description of the function:def metrics(ref_time, ref_freqs, est_time, est_freqs, **kwargs):
validate(ref_time, ref_freqs, est_time, est_freqs)
# resample est_freqs if est_times is different from ref_times
if est_time.size != ref_time.size or not np.allclose(est_time, ref_time):
warnings.warn("Estimate times not equal to reference times. "
"Resampling to common time base.")
est_freqs = resample_multipitch(est_time, est_freqs, ref_time)
# convert frequencies from Hz to continuous midi note number
ref_freqs_midi = frequencies_to_midi(ref_freqs)
est_freqs_midi = frequencies_to_midi(est_freqs)
# compute chroma wrapped midi number
ref_freqs_chroma = midi_to_chroma(ref_freqs_midi)
est_freqs_chroma = midi_to_chroma(est_freqs_midi)
# count number of occurences
n_ref = compute_num_freqs(ref_freqs_midi)
n_est = compute_num_freqs(est_freqs_midi)
# compute the number of true positives
true_positives = util.filter_kwargs(
compute_num_true_positives, ref_freqs_midi, est_freqs_midi, **kwargs)
# compute the number of true positives ignoring octave mistakes
true_positives_chroma = util.filter_kwargs(
compute_num_true_positives, ref_freqs_chroma,
est_freqs_chroma, chroma=True, **kwargs)
# compute accuracy metrics
precision, recall, accuracy = compute_accuracy(
true_positives, n_ref, n_est)
# compute error metrics
e_sub, e_miss, e_fa, e_tot = compute_err_score(
true_positives, n_ref, n_est)
# compute accuracy metrics ignoring octave mistakes
precision_chroma, recall_chroma, accuracy_chroma = compute_accuracy(
true_positives_chroma, n_ref, n_est)
# compute error metrics ignoring octave mistakes
e_sub_chroma, e_miss_chroma, e_fa_chroma, e_tot_chroma = compute_err_score(
true_positives_chroma, n_ref, n_est)
return (precision, recall, accuracy, e_sub, e_miss, e_fa, e_tot,
precision_chroma, recall_chroma, accuracy_chroma, e_sub_chroma,
e_miss_chroma, e_fa_chroma, e_tot_chroma) | [
"Compute multipitch metrics. All metrics are computed at the 'macro' level\n such that the frame true positive/false positive/false negative rates are\n summed across time and the metrics are computed on the combined values.\n\n Examples\n --------\n >>> ref_time, ref_freqs = mir_eval.io.load_ragged_time_series(\n ... 'reference.txt')\n >>> est_time, est_freqs = mir_eval.io.load_ragged_time_series(\n ... 'estimated.txt')\n >>> metris_tuple = mir_eval.multipitch.metrics(\n ... ref_time, ref_freqs, est_time, est_freqs)\n\n Parameters\n ----------\n ref_time : np.ndarray\n Time of each reference frequency value\n ref_freqs : list of np.ndarray\n List of np.ndarrays of reference frequency values\n est_time : np.ndarray\n Time of each estimated frequency value\n est_freqs : list of np.ndarray\n List of np.ndarrays of estimate frequency values\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n precision : float\n Precision (TP/(TP + FP))\n recall : float\n Recall (TP/(TP + FN))\n accuracy : float\n Accuracy (TP/(TP + FP + FN))\n e_sub : float\n Substitution error\n e_miss : float\n Miss error\n e_fa : float\n False alarm error\n e_tot : float\n Total error\n precision_chroma : float\n Chroma precision\n recall_chroma : float\n Chroma recall\n accuracy_chroma : float\n Chroma accuracy\n e_sub_chroma : float\n Chroma substitution error\n e_miss_chroma : float\n Chroma miss error\n e_fa_chroma : float\n Chroma false alarm error\n e_tot_chroma : float\n Chroma total error\n\n "
] |
Please provide a description of the function:def evaluate(ref_time, ref_freqs, est_time, est_freqs, **kwargs):
scores = collections.OrderedDict()
(scores['Precision'],
scores['Recall'],
scores['Accuracy'],
scores['Substitution Error'],
scores['Miss Error'],
scores['False Alarm Error'],
scores['Total Error'],
scores['Chroma Precision'],
scores['Chroma Recall'],
scores['Chroma Accuracy'],
scores['Chroma Substitution Error'],
scores['Chroma Miss Error'],
scores['Chroma False Alarm Error'],
scores['Chroma Total Error']) = util.filter_kwargs(
metrics, ref_time, ref_freqs, est_time, est_freqs, **kwargs)
return scores | [
"Evaluate two multipitch (multi-f0) transcriptions, where the first is\n treated as the reference (ground truth) and the second as the estimate to\n be evaluated (prediction).\n\n Examples\n --------\n >>> ref_time, ref_freq = mir_eval.io.load_ragged_time_series('ref.txt')\n >>> est_time, est_freq = mir_eval.io.load_ragged_time_series('est.txt')\n >>> scores = mir_eval.multipitch.evaluate(ref_time, ref_freq,\n ... est_time, est_freq)\n\n Parameters\n ----------\n ref_time : np.ndarray\n Time of each reference frequency value\n ref_freqs : list of np.ndarray\n List of np.ndarrays of reference frequency values\n est_time : np.ndarray\n Time of each estimated frequency value\n est_freqs : list of np.ndarray\n List of np.ndarrays of estimate frequency values\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.