code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def visualize(self, inputs: InputsType, preds: PredType,
**kwargs) -> List[np.ndarray]:
"""Visualize predictions.
Args:
inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.
preds (List[Dict]): Predictions of the model.
show (bool): Whether to display the image in a popup window.
Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
"""
if 'rec' in self.mode:
return self.actionrecog_inferencer.visualize(
inputs, preds['rec'][0], **kwargs) | Visualize predictions.
Args:
inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.
preds (List[Dict]): Predictions of the model.
show (bool): Whether to display the image in a popup window.
Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''. | visualize | python | open-mmlab/mmaction2 | mmaction/apis/inferencers/mmaction2_inferencer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py | Apache-2.0 |
def __call__(
self,
inputs: InputsType,
batch_size: int = 1,
**kwargs,
) -> dict:
"""Call the inferencer.
Args:
inputs (InputsType): Inputs for the inferencer. It can be a path
to image / image directory, or an array, or a list of these.
return_datasamples (bool): Whether to return results as
:obj:`BaseDataElement`. Defaults to False.
batch_size (int): Batch size. Defaults to 1.
**kwargs: Key words arguments passed to :meth:`preprocess`,
:meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
Each key in kwargs should be in the corresponding set of
``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
and ``postprocess_kwargs``.
Returns:
dict: Inference and visualization results.
"""
(
preprocess_kwargs,
forward_kwargs,
visualize_kwargs,
postprocess_kwargs,
) = self._dispatch_kwargs(**kwargs)
ori_inputs = self._inputs_to_list(inputs)
preds = self.forward(ori_inputs, batch_size, **forward_kwargs)
visualization = self.visualize(
ori_inputs, preds,
**visualize_kwargs) # type: ignore # noqa: E501
results = self.postprocess(preds, visualization, **postprocess_kwargs)
return results | Call the inferencer.
Args:
inputs (InputsType): Inputs for the inferencer. It can be a path
to image / image directory, or an array, or a list of these.
return_datasamples (bool): Whether to return results as
:obj:`BaseDataElement`. Defaults to False.
batch_size (int): Batch size. Defaults to 1.
**kwargs: Key words arguments passed to :meth:`preprocess`,
:meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
Each key in kwargs should be in the corresponding set of
``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
and ``postprocess_kwargs``.
Returns:
dict: Inference and visualization results. | __call__ | python | open-mmlab/mmaction2 | mmaction/apis/inferencers/mmaction2_inferencer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py | Apache-2.0 |
def _inputs_to_list(self, inputs: InputsType) -> list:
"""Preprocess the inputs to a list. The main difference from mmengine
version is that we don't list a directory cause input could be a frame
folder.
Preprocess inputs to a list according to its type:
- list or tuple: return inputs
- str: return a list containing the string. The string
could be a path to file, a url or other types of string according
to the task.
Args:
inputs (InputsType): Inputs for the inferencer.
Returns:
list: List of input for the :meth:`preprocess`.
"""
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
return list(inputs) | Preprocess the inputs to a list. The main difference from mmengine
version is that we don't list a directory cause input could be a frame
folder.
Preprocess inputs to a list according to its type:
- list or tuple: return inputs
- str: return a list containing the string. The string
could be a path to file, a url or other types of string according
to the task.
Args:
inputs (InputsType): Inputs for the inferencer.
Returns:
list: List of input for the :meth:`preprocess`. | _inputs_to_list | python | open-mmlab/mmaction2 | mmaction/apis/inferencers/mmaction2_inferencer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py | Apache-2.0 |
def postprocess(self,
preds: PredType,
visualization: Optional[List[np.ndarray]] = None,
print_result: bool = False,
pred_out_file: str = ''
) -> Union[ResType, Tuple[ResType, np.ndarray]]:
"""Postprocess predictions.
Args:
preds (Dict): Predictions of the model.
visualization (Optional[np.ndarray]): Visualized predictions.
print_result (bool): Whether to print the result.
Defaults to False.
pred_out_file (str): Output file name to store predictions
without images. Supported file formats are “json”, “yaml/yml”
and “pickle/pkl”. Defaults to ''.
Returns:
Dict or List[Dict]: Each dict contains the inference result of
each image. Possible keys are "rec_labels", "rec_scores"
"""
result_dict = {}
pred_results = [{} for _ in range(len(next(iter(preds.values()))))]
if 'rec' in self.mode:
for i, rec_pred in enumerate(preds['rec']):
result = dict(rec_labels=[], rec_scores=[])
for rec_pred_instance in rec_pred:
rec_dict_res = self.actionrecog_inferencer.pred2dict(
rec_pred_instance)
result['rec_labels'].append(rec_dict_res['pred_labels'])
result['rec_scores'].append(rec_dict_res['pred_scores'])
pred_results[i].update(result)
result_dict['predictions'] = pred_results
if print_result:
print(result_dict)
if pred_out_file != '':
mmengine.dump(result_dict, pred_out_file)
result_dict['visualization'] = visualization
return result_dict | Postprocess predictions.
Args:
preds (Dict): Predictions of the model.
visualization (Optional[np.ndarray]): Visualized predictions.
print_result (bool): Whether to print the result.
Defaults to False.
pred_out_file (str): Output file name to store predictions
without images. Supported file formats are “json”, “yaml/yml”
and “pickle/pkl”. Defaults to ''.
Returns:
Dict or List[Dict]: Each dict contains the inference result of
each image. Possible keys are "rec_labels", "rec_scores" | postprocess | python | open-mmlab/mmaction2 | mmaction/apis/inferencers/mmaction2_inferencer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py | Apache-2.0 |
def confusion_matrix(y_pred, y_real, normalize=None):
"""Compute confusion matrix.
Args:
y_pred (list[int] | np.ndarray[int]): Prediction labels.
y_real (list[int] | np.ndarray[int]): Ground truth labels.
normalize (str | None): Normalizes confusion matrix over the true
(rows), predicted (columns) conditions or all the population.
If None, confusion matrix will not be normalized. Options are
"true", "pred", "all", None. Default: None.
Returns:
np.ndarray: Confusion matrix.
"""
if normalize not in ['true', 'pred', 'all', None]:
raise ValueError("normalize must be one of {'true', 'pred', "
"'all', None}")
if isinstance(y_pred, list):
y_pred = np.array(y_pred)
if y_pred.dtype == np.int32:
y_pred = y_pred.astype(np.int64)
if not isinstance(y_pred, np.ndarray):
raise TypeError(
f'y_pred must be list or np.ndarray, but got {type(y_pred)}')
if not y_pred.dtype == np.int64:
raise TypeError(
f'y_pred dtype must be np.int64, but got {y_pred.dtype}')
if isinstance(y_real, list):
y_real = np.array(y_real)
if y_real.dtype == np.int32:
y_real = y_real.astype(np.int64)
if not isinstance(y_real, np.ndarray):
raise TypeError(
f'y_real must be list or np.ndarray, but got {type(y_real)}')
if not y_real.dtype == np.int64:
raise TypeError(
f'y_real dtype must be np.int64, but got {y_real.dtype}')
label_set = np.unique(np.concatenate((y_pred, y_real)))
num_labels = len(label_set)
max_label = label_set[-1]
label_map = np.zeros(max_label + 1, dtype=np.int64)
for i, label in enumerate(label_set):
label_map[label] = i
y_pred_mapped = label_map[y_pred]
y_real_mapped = label_map[y_real]
confusion_mat = np.bincount(
num_labels * y_real_mapped + y_pred_mapped,
minlength=num_labels**2).reshape(num_labels, num_labels)
with np.errstate(all='ignore'):
if normalize == 'true':
confusion_mat = (
confusion_mat / confusion_mat.sum(axis=1, keepdims=True))
elif normalize == 'pred':
confusion_mat = (
confusion_mat / confusion_mat.sum(axis=0, keepdims=True))
elif normalize == 'all':
confusion_mat = (confusion_mat / confusion_mat.sum())
confusion_mat = np.nan_to_num(confusion_mat)
return confusion_mat | Compute confusion matrix.
Args:
y_pred (list[int] | np.ndarray[int]): Prediction labels.
y_real (list[int] | np.ndarray[int]): Ground truth labels.
normalize (str | None): Normalizes confusion matrix over the true
(rows), predicted (columns) conditions or all the population.
If None, confusion matrix will not be normalized. Options are
"true", "pred", "all", None. Default: None.
Returns:
np.ndarray: Confusion matrix. | confusion_matrix | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def mean_class_accuracy(scores, labels):
"""Calculate mean class accuracy.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
Returns:
np.ndarray: Mean class accuracy.
"""
pred = np.argmax(scores, axis=1)
cf_mat = confusion_matrix(pred, labels).astype(float)
cls_cnt = cf_mat.sum(axis=1)
cls_hit = np.diag(cf_mat)
mean_class_acc = np.mean(
[hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)])
return mean_class_acc | Calculate mean class accuracy.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
Returns:
np.ndarray: Mean class accuracy. | mean_class_accuracy | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def top_k_classes(scores, labels, k=10, mode='accurate'):
"""Calculate the most K accurate (inaccurate) classes.
Given the prediction scores, ground truth label and top-k value,
compute the top K accurate (inaccurate) classes.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int] | np.ndarray): Ground truth labels.
k (int): Top-k values. Default: 10.
mode (str): Comparison mode for Top-k. Options are 'accurate'
and 'inaccurate'. Default: 'accurate'.
Return:
list: List of sorted (from high accuracy to low accuracy for
'accurate' mode, and from low accuracy to high accuracy for
inaccurate mode) top K classes in format of (label_id,
acc_ratio).
"""
assert mode in ['accurate', 'inaccurate']
pred = np.argmax(scores, axis=1)
cf_mat = confusion_matrix(pred, labels).astype(float)
cls_cnt = cf_mat.sum(axis=1)
cls_hit = np.diag(cf_mat)
hit_ratio = np.array(
[hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)])
if mode == 'accurate':
max_index = np.argsort(hit_ratio)[-k:][::-1]
max_value = hit_ratio[max_index]
results = list(zip(max_index, max_value))
else:
min_index = np.argsort(hit_ratio)[:k]
min_value = hit_ratio[min_index]
results = list(zip(min_index, min_value))
return results | Calculate the most K accurate (inaccurate) classes.
Given the prediction scores, ground truth label and top-k value,
compute the top K accurate (inaccurate) classes.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int] | np.ndarray): Ground truth labels.
k (int): Top-k values. Default: 10.
mode (str): Comparison mode for Top-k. Options are 'accurate'
and 'inaccurate'. Default: 'accurate'.
Return:
list: List of sorted (from high accuracy to low accuracy for
'accurate' mode, and from low accuracy to high accuracy for
inaccurate mode) top K classes in format of (label_id,
acc_ratio). | top_k_classes | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def top_k_accuracy(scores, labels, topk=(1, )):
"""Calculate top k accuracy score.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
topk (tuple[int]): K value for top_k_accuracy. Default: (1, ).
Returns:
list[float]: Top k accuracy score for each k.
"""
res = []
labels = np.array(labels)[:, np.newaxis]
for k in topk:
max_k_preds = np.argsort(scores, axis=1)[:, -k:][:, ::-1]
match_array = np.logical_or.reduce(max_k_preds == labels, axis=1)
topk_acc_score = match_array.sum() / match_array.shape[0]
res.append(topk_acc_score)
return res | Calculate top k accuracy score.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
topk (tuple[int]): K value for top_k_accuracy. Default: (1, ).
Returns:
list[float]: Top k accuracy score for each k. | top_k_accuracy | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def mmit_mean_average_precision(scores, labels):
"""Mean average precision for multi-label recognition. Used for reporting
MMIT style mAP on Multi-Moments in Times. The difference is that this
method calculates average-precision for each sample and averages them among
samples.
Args:
scores (list[np.ndarray]): Prediction scores of different classes for
each sample.
labels (list[np.ndarray]): Ground truth many-hot vector for each
sample.
Returns:
np.float64: The MMIT style mean average precision.
"""
results = []
for score, label in zip(scores, labels):
precision, recall, _ = binary_precision_recall_curve(score, label)
ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])
results.append(ap)
return np.mean(results) | Mean average precision for multi-label recognition. Used for reporting
MMIT style mAP on Multi-Moments in Times. The difference is that this
method calculates average-precision for each sample and averages them among
samples.
Args:
scores (list[np.ndarray]): Prediction scores of different classes for
each sample.
labels (list[np.ndarray]): Ground truth many-hot vector for each
sample.
Returns:
np.float64: The MMIT style mean average precision. | mmit_mean_average_precision | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def mean_average_precision(scores, labels):
"""Mean average precision for multi-label recognition.
Args:
scores (list[np.ndarray]): Prediction scores of different classes for
each sample.
labels (list[np.ndarray]): Ground truth many-hot vector for each
sample.
Returns:
np.float64: The mean average precision.
"""
results = []
scores = np.stack(scores).T
labels = np.stack(labels).T
for score, label in zip(scores, labels):
precision, recall, _ = binary_precision_recall_curve(score, label)
ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])
results.append(ap)
results = [x for x in results if not np.isnan(x)]
if results == []:
return np.nan
return np.mean(results) | Mean average precision for multi-label recognition.
Args:
scores (list[np.ndarray]): Prediction scores of different classes for
each sample.
labels (list[np.ndarray]): Ground truth many-hot vector for each
sample.
Returns:
np.float64: The mean average precision. | mean_average_precision | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def binary_precision_recall_curve(y_score, y_true):
"""Calculate the binary precision recall curve at step thresholds.
Args:
y_score (np.ndarray): Prediction scores for each class.
Shape should be (num_classes, ).
y_true (np.ndarray): Ground truth many-hot vector.
Shape should be (num_classes, ).
Returns:
precision (np.ndarray): The precision of different thresholds.
recall (np.ndarray): The recall of different thresholds.
thresholds (np.ndarray): Different thresholds at which precision and
recall are tested.
"""
assert isinstance(y_score, np.ndarray)
assert isinstance(y_true, np.ndarray)
assert y_score.shape == y_true.shape
# make y_true a boolean vector
y_true = (y_true == 1)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind='mergesort')[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
# There may be ties in values, therefore find the `distinct_value_inds`
distinct_value_inds = np.where(np.diff(y_score))[0]
threshold_inds = np.r_[distinct_value_inds, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = np.cumsum(y_true)[threshold_inds]
fps = 1 + threshold_inds - tps
thresholds = y_score[threshold_inds]
precision = tps / (tps + fps)
precision[np.isnan(precision)] = 0
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl] | Calculate the binary precision recall curve at step thresholds.
Args:
y_score (np.ndarray): Prediction scores for each class.
Shape should be (num_classes, ).
y_true (np.ndarray): Ground truth many-hot vector.
Shape should be (num_classes, ).
Returns:
precision (np.ndarray): The precision of different thresholds.
recall (np.ndarray): The recall of different thresholds.
thresholds (np.ndarray): Different thresholds at which precision and
recall are tested. | binary_precision_recall_curve | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def pairwise_temporal_iou(candidate_segments,
target_segments,
calculate_overlap_self=False):
"""Compute intersection over union between segments.
Args:
candidate_segments (np.ndarray): 1-dim/2-dim array in format
``[init, end]/[m x 2:=[init, end]]``.
target_segments (np.ndarray): 2-dim array in format
``[n x 2:=[init, end]]``.
calculate_overlap_self (bool): Whether to calculate overlap_self
(union / candidate_length) or not. Default: False.
Returns:
t_iou (np.ndarray): 1-dim array [n] /
2-dim array [n x m] with IoU ratio.
t_overlap_self (np.ndarray, optional): 1-dim array [n] /
2-dim array [n x m] with overlap_self, returns when
calculate_overlap_self is True.
"""
candidate_segments_ndim = candidate_segments.ndim
if target_segments.ndim != 2 or candidate_segments_ndim not in [1, 2]:
raise ValueError('Dimension of arguments is incorrect')
if candidate_segments_ndim == 1:
candidate_segments = candidate_segments[np.newaxis, :]
n, m = target_segments.shape[0], candidate_segments.shape[0]
t_iou = np.empty((n, m), dtype=np.float32)
if calculate_overlap_self:
t_overlap_self = np.empty((n, m), dtype=np.float32)
for i in range(m):
candidate_segment = candidate_segments[i, :]
tt1 = np.maximum(candidate_segment[0], target_segments[:, 0])
tt2 = np.minimum(candidate_segment[1], target_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = ((target_segments[:, 1] - target_segments[:, 0]) +
(candidate_segment[1] - candidate_segment[0]) -
segments_intersection)
# Compute overlap as the ratio of the intersection
# over union of two segments.
t_iou[:, i] = (segments_intersection.astype(float) / segments_union)
if calculate_overlap_self:
candidate_length = candidate_segment[1] - candidate_segment[0]
t_overlap_self[:, i] = (
segments_intersection.astype(float) / candidate_length)
if candidate_segments_ndim == 1:
t_iou = np.squeeze(t_iou, axis=1)
if calculate_overlap_self:
if candidate_segments_ndim == 1:
t_overlap_self = np.squeeze(t_overlap_self, axis=1)
return t_iou, t_overlap_self
return t_iou | Compute intersection over union between segments.
Args:
candidate_segments (np.ndarray): 1-dim/2-dim array in format
``[init, end]/[m x 2:=[init, end]]``.
target_segments (np.ndarray): 2-dim array in format
``[n x 2:=[init, end]]``.
calculate_overlap_self (bool): Whether to calculate overlap_self
(union / candidate_length) or not. Default: False.
Returns:
t_iou (np.ndarray): 1-dim array [n] /
2-dim array [n x m] with IoU ratio.
t_overlap_self (np.ndarray, optional): 1-dim array [n] /
2-dim array [n x m] with overlap_self, returns when
calculate_overlap_self is True. | pairwise_temporal_iou | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def average_recall_at_avg_proposals(ground_truth,
proposals,
total_num_proposals,
max_avg_proposals=None,
temporal_iou_thresholds=np.linspace(
0.5, 0.95, 10)):
"""Computes the average recall given an average number (percentile) of
proposals per video.
Args:
ground_truth (dict): Dict containing the ground truth instances.
proposals (dict): Dict containing the proposal instances.
total_num_proposals (int): Total number of proposals in the
proposal dict.
max_avg_proposals (int | None): Max number of proposals for one video.
Default: None.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
tuple([np.ndarray, np.ndarray, np.ndarray, float]):
(recall, average_recall, proposals_per_video, auc)
In recall, ``recall[i,j]`` is recall at i-th temporal_iou threshold
at the j-th average number (percentile) of average number of
proposals per video. The average_recall is recall averaged
over a list of temporal_iou threshold (1D array). This is
equivalent to ``recall.mean(axis=0)``. The ``proposals_per_video``
is the average number of proposals per video. The auc is the area
under ``AR@AN`` curve.
"""
total_num_videos = len(ground_truth)
if not max_avg_proposals:
max_avg_proposals = float(total_num_proposals) / total_num_videos
ratio = (max_avg_proposals * float(total_num_videos) / total_num_proposals)
# For each video, compute temporal_iou scores among the retrieved proposals
score_list = []
total_num_retrieved_proposals = 0
for video_id in ground_truth:
# Get proposals for this video.
proposals_video_id = proposals[video_id]
this_video_proposals = proposals_video_id[:, :2]
# Sort proposals by score.
sort_idx = proposals_video_id[:, 2].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :].astype(
np.float32)
# Get ground-truth instances associated to this video.
ground_truth_video_id = ground_truth[video_id]
this_video_ground_truth = ground_truth_video_id[:, :2].astype(
np.float32)
if this_video_proposals.shape[0] == 0:
n = this_video_ground_truth.shape[0]
score_list.append(np.zeros((n, 1)))
continue
if this_video_proposals.ndim != 2:
this_video_proposals = np.expand_dims(this_video_proposals, axis=0)
if this_video_ground_truth.ndim != 2:
this_video_ground_truth = np.expand_dims(
this_video_ground_truth, axis=0)
num_retrieved_proposals = np.minimum(
int(this_video_proposals.shape[0] * ratio),
this_video_proposals.shape[0])
total_num_retrieved_proposals += num_retrieved_proposals
this_video_proposals = this_video_proposals[:
num_retrieved_proposals, :]
# Compute temporal_iou scores.
t_iou = pairwise_temporal_iou(this_video_proposals,
this_video_ground_truth)
score_list.append(t_iou)
# Given that the length of the videos is really varied, we
# compute the number of proposals in terms of a ratio of the total
# proposals retrieved, i.e. average recall at a percentage of proposals
# retrieved per video.
# Computes average recall.
pcn_list = np.arange(1, 101) / 100.0 * (
max_avg_proposals * float(total_num_videos) /
total_num_retrieved_proposals)
matches = np.empty((total_num_videos, pcn_list.shape[0]))
positives = np.empty(total_num_videos)
recall = np.empty((temporal_iou_thresholds.shape[0], pcn_list.shape[0]))
# Iterates over each temporal_iou threshold.
for ridx, temporal_iou in enumerate(temporal_iou_thresholds):
# Inspect positives retrieved per video at different
# number of proposals (percentage of the total retrieved).
for i, score in enumerate(score_list):
# Total positives per video.
positives[i] = score.shape[0]
# Find proposals that satisfies minimum temporal_iou threshold.
true_positives_temporal_iou = score >= temporal_iou
# Get number of proposals as a percentage of total retrieved.
pcn_proposals = np.minimum(
(score.shape[1] * pcn_list).astype(np.int32), score.shape[1])
for j, num_retrieved_proposals in enumerate(pcn_proposals):
# Compute the number of matches
# for each percentage of the proposals
matches[i, j] = np.count_nonzero(
(true_positives_temporal_iou[:, :num_retrieved_proposals]
).sum(axis=1))
# Computes recall given the set of matches per video.
recall[ridx, :] = matches.sum(axis=0) / positives.sum()
# Recall is averaged.
avg_recall = recall.mean(axis=0)
# Get the average number of proposals per video.
proposals_per_video = pcn_list * (
float(total_num_retrieved_proposals) / total_num_videos)
# Get AUC
area_under_curve = np.trapz(avg_recall, proposals_per_video)
auc = 100. * float(area_under_curve) / proposals_per_video[-1]
return recall, avg_recall, proposals_per_video, auc | Computes the average recall given an average number (percentile) of
proposals per video.
Args:
ground_truth (dict): Dict containing the ground truth instances.
proposals (dict): Dict containing the proposal instances.
total_num_proposals (int): Total number of proposals in the
proposal dict.
max_avg_proposals (int | None): Max number of proposals for one video.
Default: None.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
tuple([np.ndarray, np.ndarray, np.ndarray, float]):
(recall, average_recall, proposals_per_video, auc)
In recall, ``recall[i,j]`` is recall at i-th temporal_iou threshold
at the j-th average number (percentile) of average number of
proposals per video. The average_recall is recall averaged
over a list of temporal_iou threshold (1D array). This is
equivalent to ``recall.mean(axis=0)``. The ``proposals_per_video``
is the average number of proposals per video. The auc is the area
under ``AR@AN`` curve. | average_recall_at_avg_proposals | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def get_weighted_score(score_list, coeff_list):
"""Get weighted score with given scores and coefficients.
Given n predictions by different classifier: [score_1, score_2, ...,
score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ...,
coeff_n] (coeff_list), return weighted score: weighted_score =
score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n
Args:
score_list (list[list[np.ndarray]]): List of list of scores, with shape
n(number of predictions) X num_samples X num_classes
coeff_list (list[float]): List of coefficients, with shape n.
Returns:
list[np.ndarray]: List of weighted scores.
"""
assert len(score_list) == len(coeff_list)
num_samples = len(score_list[0])
for i in range(1, len(score_list)):
assert len(score_list[i]) == num_samples
scores = np.array(score_list) # (num_coeff, num_samples, num_classes)
coeff = np.array(coeff_list) # (num_coeff, )
weighted_scores = list(np.dot(scores.T, coeff).T)
return weighted_scores | Get weighted score with given scores and coefficients.
Given n predictions by different classifier: [score_1, score_2, ...,
score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ...,
coeff_n] (coeff_list), return weighted score: weighted_score =
score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n
Args:
score_list (list[list[np.ndarray]]): List of list of scores, with shape
n(number of predictions) X num_samples X num_classes
coeff_list (list[float]): List of coefficients, with shape n.
Returns:
list[np.ndarray]: List of weighted scores. | get_weighted_score | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def softmax(x, dim=1):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=dim, keepdims=True))
return e_x / e_x.sum(axis=dim, keepdims=True) | Compute softmax values for each sets of scores in x. | softmax | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def interpolated_precision_recall(precision, recall):
"""Interpolated AP - VOCdevkit from VOC 2011.
Args:
precision (np.ndarray): The precision of different thresholds.
recall (np.ndarray): The recall of different thresholds.
Returns:
float: Average precision score.
"""
mprecision = np.hstack([[0], precision, [0]])
mrecall = np.hstack([[0], recall, [1]])
for i in range(len(mprecision) - 1)[::-1]:
mprecision[i] = max(mprecision[i], mprecision[i + 1])
idx = np.where(mrecall[1::] != mrecall[0:-1])[0] + 1
ap = np.sum((mrecall[idx] - mrecall[idx - 1]) * mprecision[idx])
return ap | Interpolated AP - VOCdevkit from VOC 2011.
Args:
precision (np.ndarray): The precision of different thresholds.
recall (np.ndarray): The recall of different thresholds.
Returns:
float: Average precision score. | interpolated_precision_recall | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def average_precision_at_temporal_iou(ground_truth,
prediction,
temporal_iou_thresholds=(np.linspace(
0.5, 0.95, 10))):
"""Compute average precision (in detection task) between ground truth and
predicted data frames. If multiple predictions match the same predicted
segment, only the one with highest score is matched as true positive. This
code is greatly inspired by Pascal VOC devkit.
Args:
ground_truth (dict): Dict containing the ground truth instances.
Key: 'video_id'
Value (np.ndarray): 1D array of 't-start' and 't-end'.
prediction (np.ndarray): 2D array containing the information of
proposal instances, including 'video_id', 'class_id', 't-start',
't-end' and 'score'.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
np.ndarray: 1D array of average precision score.
"""
ap = np.zeros(len(temporal_iou_thresholds), dtype=np.float32)
if len(prediction) < 1:
return ap
num_gts = 0.
lock_gt = dict()
for key in ground_truth:
lock_gt[key] = np.ones(
(len(temporal_iou_thresholds), len(ground_truth[key]))) * -1
num_gts += len(ground_truth[key])
# Sort predictions by decreasing score order.
prediction = np.array(prediction)
scores = prediction[:, 4].astype(float)
sort_idx = np.argsort(scores)[::-1]
prediction = prediction[sort_idx]
# Initialize true positive and false positive vectors.
tp = np.zeros((len(temporal_iou_thresholds), len(prediction)),
dtype=np.int32)
fp = np.zeros((len(temporal_iou_thresholds), len(prediction)),
dtype=np.int32)
# Assigning true positive to truly grount truth instances.
for idx, this_pred in enumerate(prediction):
# Check if there is at least one ground truth in the video.
if this_pred[0] in ground_truth:
this_gt = np.array(ground_truth[this_pred[0]], dtype=float)
else:
fp[:, idx] = 1
continue
t_iou = pairwise_temporal_iou(this_pred[2:4].astype(float), this_gt)
# We would like to retrieve the predictions with highest t_iou score.
t_iou_sorted_idx = t_iou.argsort()[::-1]
for t_idx, t_iou_threshold in enumerate(temporal_iou_thresholds):
for jdx in t_iou_sorted_idx:
if t_iou[jdx] < t_iou_threshold:
fp[t_idx, idx] = 1
break
if lock_gt[this_pred[0]][t_idx, jdx] >= 0:
continue
# Assign as true positive after the filters above.
tp[t_idx, idx] = 1
lock_gt[this_pred[0]][t_idx, jdx] = idx
break
if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0:
fp[t_idx, idx] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float32)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float32)
recall_cumsum = tp_cumsum / num_gts
precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum)
for t_idx in range(len(temporal_iou_thresholds)):
ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :],
recall_cumsum[t_idx, :])
return ap | Compute average precision (in detection task) between ground truth and
predicted data frames. If multiple predictions match the same predicted
segment, only the one with highest score is matched as true positive. This
code is greatly inspired by Pascal VOC devkit.
Args:
ground_truth (dict): Dict containing the ground truth instances.
Key: 'video_id'
Value (np.ndarray): 1D array of 't-start' and 't-end'.
prediction (np.ndarray): 2D array containing the information of
proposal instances, including 'video_id', 'class_id', 't-start',
't-end' and 'score'.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
np.ndarray: 1D array of average precision score. | average_precision_at_temporal_iou | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/accuracy.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py | Apache-2.0 |
def area2d_voc(b):
"""Compute the areas for a set of 2D boxes."""
return (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1]) | Compute the areas for a set of 2D boxes. | area2d_voc | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/multisports_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py | Apache-2.0 |
def overlap2d_voc(b1, b2):
"""Compute the overlaps between a set of boxes b1 and one box b2."""
xmin = np.maximum(b1[:, 0], b2[:, 0])
ymin = np.maximum(b1[:, 1], b2[:, 1])
xmax = np.minimum(b1[:, 2], b2[:, 2])
ymax = np.minimum(b1[:, 3], b2[:, 3])
width = np.maximum(0, xmax - xmin)
height = np.maximum(0, ymax - ymin)
return width * height | Compute the overlaps between a set of boxes b1 and one box b2. | overlap2d_voc | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/multisports_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py | Apache-2.0 |
def iou2d_voc(b1, b2):
"""Compute the IoU between a set of boxes b1 and 1 box b2."""
if b1.ndim == 1:
b1 = b1[None, :]
if b2.ndim == 1:
b2 = b2[None, :]
assert b2.shape[0] == 1
ov = overlap2d_voc(b1, b2)
return ov / (area2d_voc(b1) + area2d_voc(b2) - ov) | Compute the IoU between a set of boxes b1 and 1 box b2. | iou2d_voc | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/multisports_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py | Apache-2.0 |
def iou3d_voc(b1, b2):
"""Compute the IoU between two tubes with same temporal extent."""
assert b1.shape[0] == b2.shape[0]
assert np.all(b1[:, 0] == b2[:, 0])
ov = overlap2d_voc(b1[:, 1:5], b2[:, 1:5])
return np.mean(ov / (area2d_voc(b1[:, 1:5]) + area2d_voc(b2[:, 1:5]) - ov)) | Compute the IoU between two tubes with same temporal extent. | iou3d_voc | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/multisports_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py | Apache-2.0 |
def iou3dt_voc(b1, b2, spatialonly=False, temporalonly=False):
"""Compute the spatio-temporal IoU between two tubes."""
tmin = max(b1[0, 0], b2[0, 0])
tmax = min(b1[-1, 0], b2[-1, 0])
if tmax < tmin:
return 0.0
temporal_inter = tmax - tmin
temporal_union = max(b1[-1, 0], b2[-1, 0]) - min(b1[0, 0], b2[0, 0])
tube1 = b1[int(np.where(
b1[:, 0] == tmin)[0]):int(np.where(b1[:, 0] == tmax)[0]) + 1, :]
tube2 = b2[int(np.where(
b2[:, 0] == tmin)[0]):int(np.where(b2[:, 0] == tmax)[0]) + 1, :]
if temporalonly:
return temporal_inter / temporal_union
return iou3d_voc(tube1, tube2) * (1. if spatialonly else temporal_inter /
temporal_union) | Compute the spatio-temporal IoU between two tubes. | iou3dt_voc | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/multisports_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py | Apache-2.0 |
def nms_tubelets(dets, overlapThresh=0.3, top_k=None):
"""Compute the NMS for a set of scored tubelets scored tubelets are numpy
array with 4K+1 columns, last one being the score return the indices of the
tubelets to keep."""
# If there are no detections, return an empty list
if len(dets) == 0:
return dets
if top_k is None:
top_k = len(dets)
K = int((dets.shape[1] - 1) / 4)
# Coordinates of bounding boxes
x1 = [dets[:, 4 * k] for k in range(K)]
y1 = [dets[:, 4 * k + 1] for k in range(K)]
x2 = [dets[:, 4 * k + 2] for k in range(K)]
y2 = [dets[:, 4 * k + 3] for k in range(K)]
# Compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
# area = (x2 - x1 + 1) * (y2 - y1 + 1)
scores = dets[:, -1]
area = [(x2[k] - x1[k] + 1) * (y2[k] - y1[k] + 1) for k in range(K)]
order = np.argsort(scores)[::-1]
weight = np.zeros_like(scores) + 1
counter = 0
while order.size > 0:
i = order[0]
counter += 1
# Compute overlap
xx1 = [np.maximum(x1[k][i], x1[k][order[1:]]) for k in range(K)]
yy1 = [np.maximum(y1[k][i], y1[k][order[1:]]) for k in range(K)]
xx2 = [np.minimum(x2[k][i], x2[k][order[1:]]) for k in range(K)]
yy2 = [np.minimum(y2[k][i], y2[k][order[1:]]) for k in range(K)]
w = [np.maximum(0, xx2[k] - xx1[k] + 1) for k in range(K)]
h = [np.maximum(0, yy2[k] - yy1[k] + 1) for k in range(K)]
inter_area = [w[k] * h[k] for k in range(K)]
ious = sum([
inter_area[k] / (area[k][order[1:]] + area[k][i] - inter_area[k])
for k in range(K)
])
index = np.where(ious > overlapThresh * K)[0]
weight[order[index + 1]] = 1 - ious[index]
index2 = np.where(ious <= overlapThresh * K)[0]
order = order[index2 + 1]
dets[:, -1] = dets[:, -1] * weight
new_scores = dets[:, -1]
new_order = np.argsort(new_scores)[::-1]
dets = dets[new_order, :]
return dets[:top_k, :] | Compute the NMS for a set of scored tubelets scored tubelets are numpy
array with 4K+1 columns, last one being the score return the indices of the
tubelets to keep. | nms_tubelets | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/multisports_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py | Apache-2.0 |
def _import_ground_truth(ground_truth_filename):
"""Read ground truth file and return the ground truth instances and the
activity classes.
Args:
ground_truth_filename (str): Full path to the ground truth json
file.
Returns:
tuple[list, dict]: (ground_truth, activity_index).
ground_truth contains the ground truth instances, which is in a
dict format.
activity_index contains classes index.
"""
with open(ground_truth_filename, 'r') as f:
data = json.load(f)
# Checking format
activity_index, class_idx = {}, 0
ground_truth = []
for video_id, video_info in data.items():
for anno in video_info['annotations']:
if anno['label'] not in activity_index:
activity_index[anno['label']] = class_idx
class_idx += 1
# old video_anno
ground_truth_item = {}
ground_truth_item['video-id'] = video_id[2:]
ground_truth_item['t-start'] = float(anno['segment'][0])
ground_truth_item['t-end'] = float(anno['segment'][1])
ground_truth_item['label'] = activity_index[anno['label']]
ground_truth.append(ground_truth_item)
return ground_truth, activity_index | Read ground truth file and return the ground truth instances and the
activity classes.
Args:
ground_truth_filename (str): Full path to the ground truth json
file.
Returns:
tuple[list, dict]: (ground_truth, activity_index).
ground_truth contains the ground truth instances, which is in a
dict format.
activity_index contains classes index. | _import_ground_truth | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/eval_detection.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py | Apache-2.0 |
def _import_prediction(self, prediction_filename):
"""Read prediction file and return the prediction instances.
Args:
prediction_filename (str): Full path to the prediction json file.
Returns:
List: List containing the prediction instances (dictionaries).
"""
with open(prediction_filename, 'r') as f:
data = json.load(f)
# Read predictions.
prediction = []
for video_id, video_info in data['results'].items():
for result in video_info:
prediction_item = dict()
prediction_item['video-id'] = video_id
prediction_item['label'] = self.activity_index[result['label']]
prediction_item['t-start'] = float(result['segment'][0])
prediction_item['t-end'] = float(result['segment'][1])
prediction_item['score'] = result['score']
prediction.append(prediction_item)
return prediction | Read prediction file and return the prediction instances.
Args:
prediction_filename (str): Full path to the prediction json file.
Returns:
List: List containing the prediction instances (dictionaries). | _import_prediction | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/eval_detection.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py | Apache-2.0 |
def wrapper_compute_average_precision(self):
"""Computes average precision for each class."""
ap = np.zeros((len(self.tiou_thresholds), len(self.activity_index)))
# Adaptation to query faster
ground_truth_by_label = []
prediction_by_label = []
for i in range(len(self.activity_index)):
ground_truth_by_label.append([])
prediction_by_label.append([])
for gt in self.ground_truth:
ground_truth_by_label[gt['label']].append(gt)
for pred in self.prediction:
prediction_by_label[pred['label']].append(pred)
for i in range(len(self.activity_index)):
ap_result = compute_average_precision_detection(
ground_truth_by_label[i], prediction_by_label[i],
self.tiou_thresholds)
ap[:, i] = ap_result
return ap | Computes average precision for each class. | wrapper_compute_average_precision | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/eval_detection.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py | Apache-2.0 |
def evaluate(self):
"""Evaluates a prediction file.
For the detection task we measure the interpolated mean average
precision to measure the performance of a method.
"""
self.ap = self.wrapper_compute_average_precision()
self.mAP = self.ap.mean(axis=1)
self.average_mAP = self.mAP.mean()
return self.mAP, self.average_mAP | Evaluates a prediction file.
For the detection task we measure the interpolated mean average
precision to measure the performance of a method. | evaluate | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/eval_detection.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py | Apache-2.0 |
def compute_average_precision_detection(ground_truth,
prediction,
tiou_thresholds=np.linspace(
0.5, 0.95, 10)):
"""Compute average precision (detection task) between ground truth and
predictions data frames. If multiple predictions occurs for the same
predicted segment, only the one with highest score is matches as true
positive. This code is greatly inspired by Pascal VOC devkit.
Args:
ground_truth (list[dict]): List containing the ground truth instances
(dictionaries). Required keys are 'video-id', 't-start' and
't-end'.
prediction (list[dict]): List containing the prediction instances
(dictionaries). Required keys are: 'video-id', 't-start', 't-end'
and 'score'.
tiou_thresholds (np.ndarray): A 1darray indicates the temporal
intersection over union threshold, which is optional.
Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
Float: ap, Average precision score.
"""
num_thresholds = len(tiou_thresholds)
num_gts = len(ground_truth)
num_preds = len(prediction)
ap = np.zeros(num_thresholds)
if len(prediction) == 0:
return ap
num_positive = float(num_gts)
lock_gt = np.ones((num_thresholds, num_gts)) * -1
# Sort predictions by decreasing score order.
prediction.sort(key=lambda x: -x['score'])
# Initialize true positive and false positive vectors.
tp = np.zeros((num_thresholds, num_preds))
fp = np.zeros((num_thresholds, num_preds))
# Adaptation to query faster
ground_truth_by_videoid = {}
for i, item in enumerate(ground_truth):
item['index'] = i
ground_truth_by_videoid.setdefault(item['video-id'], []).append(item)
# Assigning true positive to truly grount truth instances.
for idx, pred in enumerate(prediction):
if pred['video-id'] in ground_truth_by_videoid:
gts = ground_truth_by_videoid[pred['video-id']]
else:
fp[:, idx] = 1
continue
tiou_arr = pairwise_temporal_iou(
np.array([pred['t-start'], pred['t-end']]),
np.array([np.array([gt['t-start'], gt['t-end']]) for gt in gts]))
tiou_arr = tiou_arr.reshape(-1)
# We would like to retrieve the predictions with highest tiou score.
tiou_sorted_idx = tiou_arr.argsort()[::-1]
for t_idx, tiou_threshold in enumerate(tiou_thresholds):
for j_idx in tiou_sorted_idx:
if tiou_arr[j_idx] < tiou_threshold:
fp[t_idx, idx] = 1
break
if lock_gt[t_idx, gts[j_idx]['index']] >= 0:
continue
# Assign as true positive after the filters above.
tp[t_idx, idx] = 1
lock_gt[t_idx, gts[j_idx]['index']] = idx
break
if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0:
fp[t_idx, idx] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float64)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float64)
recall_cumsum = tp_cumsum / num_positive
precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum)
for t_idx in range(len(tiou_thresholds)):
ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :],
recall_cumsum[t_idx, :])
return ap | Compute average precision (detection task) between ground truth and
predictions data frames. If multiple predictions occurs for the same
predicted segment, only the one with highest score is matches as true
positive. This code is greatly inspired by Pascal VOC devkit.
Args:
ground_truth (list[dict]): List containing the ground truth instances
(dictionaries). Required keys are 'video-id', 't-start' and
't-end'.
prediction (list[dict]): List containing the prediction instances
(dictionaries). Required keys are: 'video-id', 't-start', 't-end'
and 'score'.
tiou_thresholds (np.ndarray): A 1darray indicates the temporal
intersection over union threshold, which is optional.
Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
Float: ap, Average precision score. | compute_average_precision_detection | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/eval_detection.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py | Apache-2.0 |
def det2csv(results, custom_classes):
"""Convert detection results to csv file."""
csv_results = []
for idx in range(len(results)):
video_id = results[idx]['video_id']
timestamp = results[idx]['timestamp']
result = results[idx]['outputs']
for label, _ in enumerate(result):
for bbox in result[label]:
bbox_ = tuple(bbox.tolist())
if custom_classes is not None:
actual_label = custom_classes[label + 1]
else:
actual_label = label + 1
csv_results.append((
video_id,
timestamp,
) + bbox_[:4] + (actual_label, ) + bbox_[4:])
return csv_results | Convert detection results to csv file. | det2csv | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py | Apache-2.0 |
def results2csv(results, out_file, custom_classes=None):
"""Convert detection results to csv file."""
csv_results = det2csv(results, custom_classes)
# save space for float
def to_str(item):
if isinstance(item, float):
return f'{item:.4f}'
return str(item)
with open(out_file, 'w') as f:
for csv_result in csv_results:
f.write(','.join(map(to_str, csv_result)))
f.write('\n') | Convert detection results to csv file. | results2csv | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py | Apache-2.0 |
def print_time(message, start):
"""Print processing time."""
print('==> %g seconds to %s' % (time.time() - start, message), flush=True) | Print processing time. | print_time | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py | Apache-2.0 |
def make_image_key(video_id, timestamp):
"""Returns a unique identifier for a video id & timestamp."""
return f'{video_id},{int(timestamp):04d}' | Returns a unique identifier for a video id & timestamp. | make_image_key | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py | Apache-2.0 |
def read_csv(csv_file, class_whitelist=None):
"""Loads boxes and class labels from a CSV file in the AVA format.
CSV file format described at https://research.google.com/ava/download.html.
Args:
csv_file: A file object.
class_whitelist: If provided, boxes corresponding to (integer) class
labels not in this set are skipped.
Returns:
boxes: A dictionary mapping each unique image key (string) to a list of
boxes, given as coordinates [y1, x1, y2, x2].
labels: A dictionary mapping each unique image key (string) to a list
of integer class labels, matching the corresponding box in `boxes`.
scores: A dictionary mapping each unique image key (string) to a list
of score values labels, matching the corresponding label in `labels`.
If scores are not provided in the csv, then they will default to 1.0.
"""
entries = defaultdict(list)
boxes = defaultdict(list)
labels = defaultdict(list)
scores = defaultdict(list)
reader = csv.reader(csv_file)
for row in reader:
assert len(row) in [7, 8], 'Wrong number of columns: ' + row
image_key = make_image_key(row[0], row[1])
x1, y1, x2, y2 = [float(n) for n in row[2:6]]
action_id = int(row[6])
if class_whitelist and action_id not in class_whitelist:
continue
score = 1.0
if len(row) == 8:
score = float(row[7])
entries[image_key].append((score, action_id, y1, x1, y2, x2))
for image_key in entries:
# Evaluation API assumes boxes with descending scores
entry = sorted(entries[image_key], key=lambda tup: -tup[0])
boxes[image_key] = [x[2:] for x in entry]
labels[image_key] = [x[1] for x in entry]
scores[image_key] = [x[0] for x in entry]
return boxes, labels, scores | Loads boxes and class labels from a CSV file in the AVA format.
CSV file format described at https://research.google.com/ava/download.html.
Args:
csv_file: A file object.
class_whitelist: If provided, boxes corresponding to (integer) class
labels not in this set are skipped.
Returns:
boxes: A dictionary mapping each unique image key (string) to a list of
boxes, given as coordinates [y1, x1, y2, x2].
labels: A dictionary mapping each unique image key (string) to a list
of integer class labels, matching the corresponding box in `boxes`.
scores: A dictionary mapping each unique image key (string) to a list
of score values labels, matching the corresponding label in `labels`.
If scores are not provided in the csv, then they will default to 1.0. | read_csv | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py | Apache-2.0 |
def read_exclusions(exclusions_file):
"""Reads a CSV file of excluded timestamps.
Args:
exclusions_file: A file object containing a csv of video-id,timestamp.
Returns:
A set of strings containing excluded image keys, e.g.
"aaaaaaaaaaa,0904",
or an empty set if exclusions file is None.
"""
excluded = set()
if exclusions_file:
reader = csv.reader(exclusions_file)
for row in reader:
assert len(row) == 2, f'Expected only 2 columns, got: {row}'
excluded.add(make_image_key(row[0], row[1]))
return excluded | Reads a CSV file of excluded timestamps.
Args:
exclusions_file: A file object containing a csv of video-id,timestamp.
Returns:
A set of strings containing excluded image keys, e.g.
"aaaaaaaaaaa,0904",
or an empty set if exclusions file is None. | read_exclusions | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py | Apache-2.0 |
def read_labelmap(labelmap_file):
"""Reads a labelmap without the dependency on protocol buffers.
Args:
labelmap_file: A file object containing a label map protocol buffer.
Returns:
labelmap: The label map in the form used by the
object_detection_evaluation
module - a list of {"id": integer, "name": classname } dicts.
class_ids: A set containing all of the valid class id integers.
"""
labelmap = []
class_ids = set()
name = ''
class_id = ''
for line in labelmap_file:
if line.startswith(' name:'):
name = line.split('"')[1]
elif line.startswith(' id:') or line.startswith(' label_id:'):
class_id = int(line.strip().split(' ')[-1])
labelmap.append({'id': class_id, 'name': name})
class_ids.add(class_id)
return labelmap, class_ids | Reads a labelmap without the dependency on protocol buffers.
Args:
labelmap_file: A file object containing a label map protocol buffer.
Returns:
labelmap: The label map in the form used by the
object_detection_evaluation
module - a list of {"id": integer, "name": classname } dicts.
class_ids: A set containing all of the valid class id integers. | read_labelmap | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py | Apache-2.0 |
def ava_eval(result_file,
result_type,
label_file,
ann_file,
exclude_file,
verbose=True,
ignore_empty_frames=True,
custom_classes=None):
"""Perform ava evaluation."""
assert result_type in ['mAP']
start = time.time()
categories, class_whitelist = read_labelmap(open(label_file))
if custom_classes is not None:
custom_classes = custom_classes[1:]
assert set(custom_classes).issubset(set(class_whitelist))
class_whitelist = custom_classes
categories = [cat for cat in categories if cat['id'] in custom_classes]
# loading gt, do not need gt score
gt_bboxes, gt_labels, _ = read_csv(open(ann_file), class_whitelist)
if verbose:
print_time('Reading GT results', start)
if exclude_file is not None:
excluded_keys = read_exclusions(open(exclude_file))
else:
excluded_keys = list()
start = time.time()
boxes, labels, scores = read_csv(open(result_file), class_whitelist)
if verbose:
print_time('Reading Detection results', start)
start = time.time()
all_gt_labels = np.concatenate(list(gt_labels.values()))
gt_count = {k: np.sum(all_gt_labels == k) for k in class_whitelist}
pool = multiprocessing.Pool(32)
if ignore_empty_frames:
tups = [(gt_bboxes[k], gt_labels[k], boxes[k], labels[k], scores[k])
for k in gt_bboxes if k not in excluded_keys]
else:
tups = [(gt_bboxes.get(k, np.zeros((0, 4), dtype=np.float32)),
gt_labels.get(k, []), boxes[k], labels[k], scores[k])
for k in boxes if k not in excluded_keys]
rets = pool.map(tpfp_single, tups)
if verbose:
print_time('Calculating TP/FP', start)
start = time.time()
scores, tpfps = defaultdict(list), defaultdict(list)
for score, tpfp in rets:
for k in score:
scores[k].append(score[k])
tpfps[k].append(tpfp[k])
cls_AP = []
for k in scores:
scores[k] = np.concatenate(scores[k])
tpfps[k] = np.concatenate(tpfps[k])
precision, recall = metrics.compute_precision_recall(
scores[k], tpfps[k], gt_count[k])
ap = metrics.compute_average_precision(precision, recall)
class_name = [x['name'] for x in categories if x['id'] == k]
assert len(class_name) == 1
class_name = class_name[0]
cls_AP.append((k, class_name, ap))
if verbose:
print_time('Run Evaluator', start)
print('Per-class results: ', flush=True)
for k, class_name, ap in cls_AP:
print(f'Index: {k}, Action: {class_name}: AP: {ap:.4f};', flush=True)
overall = np.nanmean([x[2] for x in cls_AP])
person_movement = np.nanmean([x[2] for x in cls_AP if x[0] <= 14])
object_manipulation = np.nanmean([x[2] for x in cls_AP if 14 < x[0] < 64])
person_interaction = np.nanmean([x[2] for x in cls_AP if 64 <= x[0]])
print('Overall Results: ', flush=True)
print(f'Overall mAP: {overall:.4f}', flush=True)
print(f'Person Movement mAP: {person_movement:.4f}', flush=True)
print(f'Object Manipulation mAP: {object_manipulation:.4f}', flush=True)
print(f'Person Interaction mAP: {person_interaction:.4f}', flush=True)
results = {}
results['overall'] = overall
results['person_movement'] = person_movement
results['object_manipulation'] = object_manipulation
results['person_interaction'] = person_interaction
if verbose:
for k, class_name, ap in cls_AP:
print(f'Class {class_name} AP: {ap:.4f}', flush=True)
return results | Perform ava evaluation. | ava_eval | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py | Apache-2.0 |
def __init__(self, data):
"""Constructs box collection.
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data
"""
if not isinstance(data, np.ndarray):
raise ValueError('data must be a numpy array.')
if len(data.shape) != 2 or data.shape[1] != 4:
raise ValueError('Invalid dimensions for box data.')
if data.dtype != np.float32 and data.dtype != np.float64:
raise ValueError(
'Invalid data type for box data: float is required.')
if not self._is_valid_boxes(data):
raise ValueError('Invalid box data. data must be a numpy array of '
'N*[y_min, x_min, y_max, x_max]')
self.data = {'boxes': data} | Constructs box collection.
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data | __init__ | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_list.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py | Apache-2.0 |
def num_boxes(self):
"""Return number of boxes held in collections."""
return self.data['boxes'].shape[0] | Return number of boxes held in collections. | num_boxes | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_list.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py | Apache-2.0 |
def get_extra_fields(self):
"""Return all non-box fields."""
return [k for k in self.data if k != 'boxes'] | Return all non-box fields. | get_extra_fields | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_list.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py | Apache-2.0 |
def add_field(self, field, field_data):
"""Add data to a specified field.
Args:
field: a string parameter used to specify a related field to be
accessed.
field_data: a numpy array of [N, ...] representing the data
associated with the field.
Raises:
ValueError: if the field is already exist or the dimension of the
field data does not matches the number of boxes.
"""
if self.has_field(field):
raise ValueError('Field ' + field + 'already exists')
if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes(
):
raise ValueError('Invalid dimensions for field data')
self.data[field] = field_data | Add data to a specified field.
Args:
field: a string parameter used to specify a related field to be
accessed.
field_data: a numpy array of [N, ...] representing the data
associated with the field.
Raises:
ValueError: if the field is already exist or the dimension of the
field data does not matches the number of boxes. | add_field | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_list.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py | Apache-2.0 |
def get(self):
"""Convenience function for accesssing box coordinates.
Returns:
a numpy array of shape [N, 4] representing box corners
"""
return self.get_field('boxes') | Convenience function for accesssing box coordinates.
Returns:
a numpy array of shape [N, 4] representing box corners | get | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_list.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py | Apache-2.0 |
def get_field(self, field):
"""Accesses data associated with the specified field in the box
collection.
Args:
field: a string parameter used to specify a related field to be
accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError(f'field {field} does not exist')
return self.data[field] | Accesses data associated with the specified field in the box
collection.
Args:
field: a string parameter used to specify a related field to be
accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field | get_field | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_list.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py | Apache-2.0 |
def get_coordinates(self):
"""Get corner coordinates of boxes.
Returns:
a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]
"""
box_coordinates = self.get()
y_min = box_coordinates[:, 0]
x_min = box_coordinates[:, 1]
y_max = box_coordinates[:, 2]
x_max = box_coordinates[:, 3]
return [y_min, x_min, y_max, x_max] | Get corner coordinates of boxes.
Returns:
a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max] | get_coordinates | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_list.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py | Apache-2.0 |
def _is_valid_boxes(data):
"""Check whether data fulfills the format of N*[ymin, xmin, ymax,
xmin].
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Returns:
a boolean indicating whether all ymax of boxes are equal or greater
than ymin, and all xmax of boxes are equal or greater than xmin.
"""
if len(data) != 0:
for v in data:
if v[0] > v[2] or v[1] > v[3]:
return False
return True | Check whether data fulfills the format of N*[ymin, xmin, ymax,
xmin].
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Returns:
a boolean indicating whether all ymax of boxes are equal or greater
than ymin, and all xmax of boxes are equal or greater than xmin. | _is_valid_boxes | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_list.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py | Apache-2.0 |
def area(boxes):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) | Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas | area | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_ops.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_ops.py | Apache-2.0 |
def intersection(boxes1, boxes2):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths | Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area | intersection | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_ops.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_ops.py | Apache-2.0 |
def iou(boxes1, boxes2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2)
area1 = area(boxes1)
area2 = area(boxes2)
union = (
np.expand_dims(area1, axis=1) + np.expand_dims(area2, axis=0) -
intersect)
return intersect / union | Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores. | iou | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_ops.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_ops.py | Apache-2.0 |
def ioa(boxes1, boxes2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, IOA(box1, box2) != IOA(box2, box1).
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
intersect = intersection(boxes1, boxes2)
areas = np.expand_dims(area(boxes2), axis=0)
return intersect / areas | Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, IOA(box1, box2) != IOA(box2, box1).
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores. | ioa | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/np_box_ops.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_ops.py | Apache-2.0 |
def compute_precision_recall(scores, labels, num_gt):
"""Compute precision and recall.
Args:
scores: A float numpy array representing detection score
labels: A boolean numpy array representing true/false positive labels
num_gt: Number of ground truth instances
Raises:
ValueError: if the input is not of the correct format
Returns:
precision: Fraction of positive instances over detected ones. This
value is None if no ground truth labels are present.
recall: Fraction of detected positive instance over all positive
instances. This value is None if no ground truth labels are
present.
"""
if (not isinstance(labels, np.ndarray) or labels.dtype != bool
or len(labels.shape) != 1):
raise ValueError('labels must be single dimension bool numpy array')
if not isinstance(scores, np.ndarray) or len(scores.shape) != 1:
raise ValueError('scores must be single dimension numpy array')
if num_gt < np.sum(labels):
raise ValueError(
'Number of true positives must be smaller than num_gt.')
if len(scores) != len(labels):
raise ValueError('scores and labels must be of the same size.')
if num_gt == 0:
return None, None
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::-1]
labels = labels.astype(int)
true_positive_labels = labels[sorted_indices]
false_positive_labels = 1 - true_positive_labels
cum_true_positives = np.cumsum(true_positive_labels)
cum_false_positives = np.cumsum(false_positive_labels)
precision = cum_true_positives.astype(float) / (
cum_true_positives + cum_false_positives)
recall = cum_true_positives.astype(float) / num_gt
return precision, recall | Compute precision and recall.
Args:
scores: A float numpy array representing detection score
labels: A boolean numpy array representing true/false positive labels
num_gt: Number of ground truth instances
Raises:
ValueError: if the input is not of the correct format
Returns:
precision: Fraction of positive instances over detected ones. This
value is None if no ground truth labels are present.
recall: Fraction of detected positive instance over all positive
instances. This value is None if no ground truth labels are
present. | compute_precision_recall | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/metrics.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/metrics.py | Apache-2.0 |
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError('If precision is None, recall must also be None')
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(
recall, np.ndarray):
raise ValueError('precision and recall must be numpy array')
if precision.dtype != np.float64 or recall.dtype != np.float64:
raise ValueError('input must be float numpy array.')
if len(precision) != len(recall):
raise ValueError('precision and recall must be of the same size.')
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError('Precision must be in the range of [0, 1].')
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError('recall must be in the range of [0, 1].')
if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)):
raise ValueError('recall must be a non-decreasing array')
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Preprocess precision to be a non-decreasing array
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision | Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None. | compute_average_precision | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/metrics.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/metrics.py | Apache-2.0 |
def compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class):
"""Compute CorLoc according to the definition in the following paper.
https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf
Returns nans if there are no ground truth images for a class.
Args:
num_gt_imgs_per_class: 1D array, representing number of images
containing at least one object instance of a particular class
num_images_correctly_detected_per_class: 1D array, representing number
of images that are correctly detected at least one object instance
of a particular class
Returns:
corloc_per_class: A float numpy array represents the corloc score of
each class
"""
# Divide by zero expected for classes with no gt examples.
with np.errstate(divide='ignore', invalid='ignore'):
return np.where(
num_gt_imgs_per_class == 0, np.nan,
num_images_correctly_detected_per_class / num_gt_imgs_per_class) | Compute CorLoc according to the definition in the following paper.
https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf
Returns nans if there are no ground truth images for a class.
Args:
num_gt_imgs_per_class: 1D array, representing number of images
containing at least one object instance of a particular class
num_images_correctly_detected_per_class: 1D array, representing number
of images that are correctly detected at least one object instance
of a particular class
Returns:
corloc_per_class: A float numpy array represents the corloc score of
each class | compute_cor_loc | python | open-mmlab/mmaction2 | mmaction/evaluation/functional/ava_evaluation/metrics.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/metrics.py | Apache-2.0 |
def process(self, data_batch: Optional[Dict],
data_samples: Sequence[Dict]) -> None:
"""Process one batch of data samples and data_samples. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict, optional): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
data_samples = copy.deepcopy(data_samples)
for data_sample in data_samples:
results = dict()
features = data_sample['features']
video_feature = features['video_feature'].cpu().numpy()
text_feature = features['text_feature'].cpu().numpy()
results['video_feature'] = video_feature
results['text_feature'] = text_feature
self.results.append(results) | Process one batch of data samples and data_samples. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict, optional): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from the model. | process | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/retrieval_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/retrieval_metric.py | Apache-2.0 |
def compute_metrics(self, results: List) -> Dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
video_features = np.stack([res['video_feature'] for res in results])
text_features = np.stack([res['text_feature'] for res in results])
video_features = video_features / np.linalg.norm(
video_features, axis=-1, keepdims=True)
text_features = text_features / np.linalg.norm(
text_features, axis=-1, keepdims=True)
similarity = text_features @ video_features.T
sx = np.sort(-similarity)
d = np.diag(-similarity)
ind = np.where((sx - d[:, None]) == 0)[1]
metrics = OrderedDict()
for metric in self.metric_list:
if metric == 'R1':
metrics['R1'] = float(np.sum(ind == 0)) * 100 / len(ind)
elif metric == 'R5':
metrics['R5'] = float(np.sum(ind < 5)) * 100 / len(ind)
elif metric == 'R10':
metrics['R10'] = float(np.sum(ind < 10)) * 100 / len(ind)
elif metric == 'MdR':
metrics['MdR'] = np.median(ind) + 1
elif metric == 'MnR':
metrics['MnR'] = np.mean(ind) + 1
return metrics | Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results. | compute_metrics | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/retrieval_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/retrieval_metric.py | Apache-2.0 |
def process(self, data_batch, data_samples):
"""Process one batch of data samples.
The processed results should be stored in ``self.results``, which will
be used to computed the metrics when all batches have been processed.
Args:
data_batch: A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
for sample in data_samples:
gt_answer = sample.get('gt_answer')
gt_answer_weight = sample.get('gt_answer_weight')
if isinstance(gt_answer, str):
gt_answer = [gt_answer]
if gt_answer_weight is None:
gt_answer_weight = [1. / (len(gt_answer))] * len(gt_answer)
result = {
'pred_answer': sample.get('pred_answer'),
'gt_answer': gt_answer,
'gt_answer_weight': gt_answer_weight,
}
self.results.append(result) | Process one batch of data samples.
The processed results should be stored in ``self.results``, which will
be used to computed the metrics when all batches have been processed.
Args:
data_batch: A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from the model. | process | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multimodal_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multimodal_metric.py | Apache-2.0 |
def compute_metrics(self, results: List):
"""Compute the metrics from processed results.
Args:
results (dict): The processed results of each batch.
Returns:
Dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
acc = []
for result in results:
pred_answer = self._process_answer(result['pred_answer'])
gt_answer = [
self._process_answer(answer) for answer in result['gt_answer']
]
answer_weight = result['gt_answer_weight']
weight_sum = 0
for i, gt in enumerate(gt_answer):
if gt == pred_answer:
weight_sum += answer_weight[i]
vqa_acc = min(1.0, weight_sum / self.full_score_weight)
acc.append(vqa_acc)
accuracy = sum(acc) / len(acc) * 100
metrics = {'acc': accuracy}
return metrics | Compute the metrics from processed results.
Args:
results (dict): The processed results of each batch.
Returns:
Dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results. | compute_metrics | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multimodal_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multimodal_metric.py | Apache-2.0 |
def process(self, data_batch, data_samples) -> None:
"""transfer tensors in predictions to CPU."""
for sample in data_samples:
question_id = sample['question_id']
pred_answer = sample['pred_answer']
result = {
'question_id': int(question_id),
'answer': pred_answer,
}
self.results.append(result) | transfer tensors in predictions to CPU. | process | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multimodal_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multimodal_metric.py | Apache-2.0 |
def compute_metrics(self, results: List):
"""Dump the result to json file."""
mmengine.dump(results, self.file_path)
logger = MMLogger.get_current_instance()
logger.info(f'Results has been saved to {self.file_path}.')
return {} | Dump the result to json file. | compute_metrics | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multimodal_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multimodal_metric.py | Apache-2.0 |
def process(self, data_batch, data_samples):
"""Process one batch of data samples.
The processed results should be stored in ``self.results``, which will
be used to computed the metrics when all batches have been processed.
Args:
data_batch: A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
for sample in data_samples:
# gt_labels in datasample is a LabelData
label = sample['gt_label'].item()
result = {
'pred_label': sample.get('pred_label'),
'gt_label': label,
}
self.results.append(result) | Process one batch of data samples.
The processed results should be stored in ``self.results``, which will
be used to computed the metrics when all batches have been processed.
Args:
data_batch: A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from the model. | process | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multimodal_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multimodal_metric.py | Apache-2.0 |
def compute_metrics(self, results: List):
"""Compute the metrics from processed results.
Args:
results (dict): The processed results of each batch.
Returns:
Dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
preds = np.array([x['pred_label'] for x in results])
labels = np.array([x['gt_label'] for x in results])
accuracy = np.sum(preds == labels) / len(preds) * 100
metrics = {'acc': accuracy}
return metrics | Compute the metrics from processed results.
Args:
results (dict): The processed results of each batch.
Returns:
Dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results. | compute_metrics | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multimodal_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multimodal_metric.py | Apache-2.0 |
def process(self, data_batch: Sequence[dict],
data_samples: Sequence[dict]):
"""Process one batch of data and predictions.
The processed results should be stored in ``self.results``, which will
be used to computed the metrics when all batches have been processed.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[dict]): A batch of outputs from the model.
"""
for data_sample in data_samples:
pred_score = data_sample['pred_score'].cpu()
gt_label = format_label(data_sample['gt_label'])
if 'gt_score' in data_sample:
target = data_sample.get('gt_score').clone()
else:
num_classes = pred_score.size()[-1]
target = F.one_hot(gt_label, num_classes)
# Because the retrieval output logit vector will be much larger
# compared to the normal classification, to save resources, the
# evaluation results are computed each batch here and then reduce
# all results at the end.
result = RetrievalRecall.calculate(
pred_score.unsqueeze(0), target.unsqueeze(0), topk=self.topk)
self.results.append(result) | Process one batch of data and predictions.
The processed results should be stored in ``self.results``, which will
be used to computed the metrics when all batches have been processed.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[dict]): A batch of outputs from the model. | process | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multimodal_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multimodal_metric.py | Apache-2.0 |
def compute_metrics(self, results: List):
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
result_metrics = dict()
for i, k in enumerate(self.topk):
recall_at_k = sum([r[i].item() for r in results]) / len(results)
result_metrics[f'Recall@{k}'] = recall_at_k
return result_metrics | Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results. | compute_metrics | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multimodal_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multimodal_metric.py | Apache-2.0 |
def _format_pred(label, topk=None, is_indices=False):
"""format various label to List[indices]."""
if is_indices:
assert isinstance(label, Sequence), \
'`pred` must be Sequence of indices when' \
f' `pred_indices` set to True, but get {type(label)}'
for i, sample_pred in enumerate(label):
assert is_seq_of(sample_pred, int) or isinstance(
sample_pred, (np.ndarray, torch.Tensor)), \
'`pred` should be Sequence of indices when `pred_indices`' \
f'set to True. but pred[{i}] is {sample_pred}'
if topk:
label[i] = sample_pred[:min(topk, len(sample_pred))]
return label
if isinstance(label, np.ndarray):
label = torch.from_numpy(label)
elif not isinstance(label, torch.Tensor):
raise TypeError(f'The pred must be type of torch.tensor, '
f'np.ndarray or Sequence but get {type(label)}.')
topk = topk if topk else label.size()[-1]
_, indices = label.topk(topk)
return indices | format various label to List[indices]. | _format_pred | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multimodal_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multimodal_metric.py | Apache-2.0 |
def _format_target(label, is_indices=False):
"""format various label to List[indices]."""
if is_indices:
assert isinstance(label, Sequence), \
'`target` must be Sequence of indices when' \
f' `target_indices` set to True, but get {type(label)}'
for i, sample_gt in enumerate(label):
assert is_seq_of(sample_gt, int) or isinstance(
sample_gt, (np.ndarray, torch.Tensor)), \
'`target` should be Sequence of indices when ' \
f'`target_indices` set to True. but target[{i}] is {sample_gt}'
return label
if isinstance(label, np.ndarray):
label = torch.from_numpy(label)
elif isinstance(label, Sequence) and not mmengine.is_str(label):
label = torch.tensor(label)
elif not isinstance(label, torch.Tensor):
raise TypeError(f'The pred must be type of torch.tensor, '
f'np.ndarray or Sequence but get {type(label)}.')
indices = [sample_gt.nonzero().squeeze(-1) for sample_gt in label]
return indices | format various label to List[indices]. | _format_target | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multimodal_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multimodal_metric.py | Apache-2.0 |
def process(self, data_batch: Sequence[Tuple[Any, dict]],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, dict]]): A batch of data
from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
for pred in predictions:
self.results.append(pred)
if self.metric_type == 'AR@AN':
data_batch = data_batch['data_samples']
for data_sample in data_batch:
video_info = data_sample.metainfo
video_id = video_info['video_name'][2:]
this_video_gt = []
for ann in video_info['annotations']:
t_start, t_end = ann['segment']
label = ann['label']
this_video_gt.append([t_start, t_end, label])
self.ground_truth[video_id] = np.array(this_video_gt) | Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, dict]]): A batch of data
from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model. | process | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/anet_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py | Apache-2.0 |
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
If `metric_type` is 'TEM', only dump middle results and do not compute
any metrics.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
self.dump_results(results)
if self.metric_type == 'AR@AN':
return self.compute_ARAN(results)
return OrderedDict() | Compute the metrics from processed results.
If `metric_type` is 'TEM', only dump middle results and do not compute
any metrics.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results. | compute_metrics | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/anet_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py | Apache-2.0 |
def compute_ARAN(self, results: list) -> dict:
"""AR@AN evaluation metric."""
temporal_iou_thresholds = self.metric_options.setdefault(
'AR@AN', {}).setdefault('temporal_iou_thresholds',
np.linspace(0.5, 0.95, 10))
max_avg_proposals = self.metric_options.setdefault(
'AR@AN', {}).setdefault('max_avg_proposals', 100)
if isinstance(temporal_iou_thresholds, list):
temporal_iou_thresholds = np.array(temporal_iou_thresholds)
eval_results = OrderedDict()
proposal, num_proposals = self._import_proposals(results)
recall, _, _, auc = average_recall_at_avg_proposals(
self.ground_truth,
proposal,
num_proposals,
max_avg_proposals=max_avg_proposals,
temporal_iou_thresholds=temporal_iou_thresholds)
eval_results['auc'] = auc
eval_results['AR@1'] = np.mean(recall[:, 0])
eval_results['AR@5'] = np.mean(recall[:, 4])
eval_results['AR@10'] = np.mean(recall[:, 9])
eval_results['AR@100'] = np.mean(recall[:, 99])
return eval_results | AR@AN evaluation metric. | compute_ARAN | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/anet_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py | Apache-2.0 |
def dump_results(self, results, version='VERSION 1.3'):
"""Save middle or final results to disk."""
if self.output_format == 'json':
result_dict = self.proposals2json(results)
output_dict = {
'version': version,
'results': result_dict,
'external_data': {}
}
mmengine.dump(output_dict, self.out)
elif self.output_format == 'csv':
os.makedirs(self.out, exist_ok=True)
header = 'action,start,end,tmin,tmax'
for result in results:
video_name, outputs = result
output_path = osp.join(self.out, video_name + '.csv')
np.savetxt(
output_path,
outputs,
header=header,
delimiter=',',
comments='')
else:
raise ValueError(
f'The output format {self.output_format} is not supported.') | Save middle or final results to disk. | dump_results | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/anet_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py | Apache-2.0 |
def proposals2json(results, show_progress=False):
"""Convert all proposals to a final dict(json) format.
Args:
results (list[dict]): All proposals.
show_progress (bool): Whether to show the progress bar.
Defaults: False.
Returns:
dict: The final result dict. E.g.
.. code-block:: Python
dict(video-1=[dict(segment=[1.1,2.0]. score=0.9),
dict(segment=[50.1, 129.3], score=0.6)])
"""
result_dict = {}
print('Convert proposals to json format')
if show_progress:
prog_bar = mmcv.ProgressBar(len(results))
for result in results:
video_name = result['video_name']
result_dict[video_name[2:]] = result['proposal_list']
if show_progress:
prog_bar.update()
return result_dict | Convert all proposals to a final dict(json) format.
Args:
results (list[dict]): All proposals.
show_progress (bool): Whether to show the progress bar.
Defaults: False.
Returns:
dict: The final result dict. E.g.
.. code-block:: Python
dict(video-1=[dict(segment=[1.1,2.0]. score=0.9),
dict(segment=[50.1, 129.3], score=0.6)]) | proposals2json | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/anet_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py | Apache-2.0 |
def _import_proposals(results):
"""Read predictions from results."""
proposals = {}
num_proposals = 0
for result in results:
video_id = result['video_name'][2:]
this_video_proposals = []
for proposal in result['proposal_list']:
t_start, t_end = proposal['segment']
score = proposal['score']
this_video_proposals.append([t_start, t_end, score])
num_proposals += 1
proposals[video_id] = np.array(this_video_proposals)
return proposals, num_proposals | Read predictions from results. | _import_proposals | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/anet_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py | Apache-2.0 |
def process(self, data_batch: Sequence[Tuple[Any, dict]],
data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, dict]]): A batch of data
from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from
the model.
"""
for data_sample in data_samples:
result = dict()
pred = data_sample['pred_instances']
result['video_id'] = data_sample['video_id']
result['timestamp'] = data_sample['timestamp']
outputs = bbox2result(
pred['bboxes'],
pred['scores'],
num_classes=self.num_classes,
thr=self.action_thr)
result['outputs'] = outputs
self.results.append(result) | Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, dict]]): A batch of data
from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from
the model. | process | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/ava_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/ava_metric.py | Apache-2.0 |
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
time_now = datetime.now().strftime('%Y%m%d_%H%M%S')
temp_file = f'AVA_{time_now}_result.csv'
results2csv(results, temp_file, self.custom_classes)
eval_results = ava_eval(
temp_file,
self.options[0],
self.label_file,
self.ann_file,
self.exclude_file,
ignore_empty_frames=True,
custom_classes=self.custom_classes)
os.remove(temp_file)
return eval_results | Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results. | compute_metrics | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/ava_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/ava_metric.py | Apache-2.0 |
def process(self, data_batch: Sequence[Tuple[Any, dict]],
data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, dict]]): A batch of data
from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from
the model.
"""
for pred in data_samples:
video_key = pred['video_id'].split('.mp4')[0]
frm_num = pred['timestamp']
bboxes = pred['pred_instances']['bboxes'].cpu().numpy()
cls_scores = pred['pred_instances']['scores'].cpu().numpy()
det_result = [video_key, frm_num, bboxes, cls_scores]
self.results.append(det_result) | Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, dict]]): A batch of data
from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from
the model. | process | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multisports_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multisports_metric.py | Apache-2.0 |
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
test_videos = self.annos['test_videos'][0]
resolutions = self.annos['resolution']
detections = []
for result in results:
video_key, frm_num, bboxes, cls_scores = result
for bbox, cls_score in zip(bboxes, cls_scores):
video_idx = test_videos.index(video_key)
pred_label = np.argmax(cls_score)
score = cls_score[pred_label]
h, w = resolutions[video_key]
bbox *= np.array([w, h, w, h])
instance_result = np.array(
[video_idx, frm_num, pred_label, score, *bbox])
detections.append(instance_result)
frm_detections = np.array(detections)
metric_result = dict()
f_map = frameAP(self.annos, frm_detections,
self.metric_options['F_mAP']['thr'], self.verbose)
metric_result.update({'frameAP': round(f_map, 4)})
video_tubes = link_tubes(
self.annos,
frm_detections,
len_thre=self.metric_options['V_mAP']['tube_thr'])
v_map = {}
for thr in self.metric_options['V_mAP']['thr']:
map = videoAP(
self.annos, video_tubes, thr, print_info=self.verbose)
v_map.update({f'v_map@{thr}': round(map, 4)})
metric_result.update(v_map)
if self.metric_options['V_mAP'].get('all'):
all_map = videoAP_all(self.annos, video_tubes)
metric_result.update(all_map)
return metric_result | Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results. | compute_metrics | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/multisports_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/multisports_metric.py | Apache-2.0 |
def process(self, data_batch: Sequence[Tuple[Any, dict]],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, dict]]): A batch of data
from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
for pred in predictions:
self.results.append(pred) | Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, dict]]): A batch of data
from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model. | process | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/video_grounding_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/video_grounding_metric.py | Apache-2.0 |
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
eval_results = dict()
for topK in self.topK_list:
total = len(results)
correct = 0.0
for result in results:
gt = result['gt']
predictions = result['predictions'][:topK]
for prediction in predictions:
IoU = self.calculate_IoU(gt, prediction)
if IoU > self.threshold:
correct += 1
break
acc = correct / total
eval_results[f'Recall@Top{topK}_IoU={self.threshold}'] = acc
return eval_results | Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results. | compute_metrics | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/video_grounding_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/video_grounding_metric.py | Apache-2.0 |
def to_tensor(value):
"""Convert value to torch.Tensor."""
if isinstance(value, np.ndarray):
value = torch.from_numpy(value)
elif isinstance(value, Sequence) and not mmengine.is_str(value):
value = torch.tensor(value)
elif not isinstance(value, torch.Tensor):
raise TypeError(f'{type(value)} is not an available argument.')
return value | Convert value to torch.Tensor. | to_tensor | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/acc_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py | Apache-2.0 |
def process(self, data_batch: Sequence[Tuple[Any, Dict]],
data_samples: Sequence[Dict]) -> None:
"""Process one batch of data samples and data_samples. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
data_samples = copy.deepcopy(data_samples)
for data_sample in data_samples:
result = dict()
pred = data_sample['pred_score']
label = data_sample['gt_label']
# Ad-hoc for RGBPoseConv3D
if isinstance(pred, dict):
for item_name, score in pred.items():
pred[item_name] = score.cpu().numpy()
else:
pred = pred.cpu().numpy()
result['pred'] = pred
if label.size(0) == 1:
# single-label
result['label'] = label.item()
else:
# multi-label
result['label'] = label.cpu().numpy()
self.results.append(result) | Process one batch of data samples and data_samples. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from the model. | process | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/acc_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py | Apache-2.0 |
def compute_metrics(self, results: List) -> Dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
labels = [x['label'] for x in results]
eval_results = dict()
# Ad-hoc for RGBPoseConv3D
if isinstance(results[0]['pred'], dict):
for item_name in results[0]['pred'].keys():
preds = [x['pred'][item_name] for x in results]
eval_result = self.calculate(preds, labels)
eval_results.update(
{f'{item_name}_{k}': v
for k, v in eval_result.items()})
if len(results[0]['pred']) == 2 and \
'rgb' in results[0]['pred'] and \
'pose' in results[0]['pred']:
rgb = [x['pred']['rgb'] for x in results]
pose = [x['pred']['pose'] for x in results]
preds = {
'1:1': get_weighted_score([rgb, pose], [1, 1]),
'2:1': get_weighted_score([rgb, pose], [2, 1]),
'1:2': get_weighted_score([rgb, pose], [1, 2])
}
for k in preds:
eval_result = self.calculate(preds[k], labels)
eval_results.update({
f'RGBPose_{k}_{key}': v
for key, v in eval_result.items()
})
return eval_results
# Simple Acc Calculation
else:
preds = [x['pred'] for x in results]
return self.calculate(preds, labels) | Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results. | compute_metrics | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/acc_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py | Apache-2.0 |
def calculate(self, preds: List[np.ndarray],
labels: List[Union[int, np.ndarray]]) -> Dict:
"""Compute the metrics from processed results.
Args:
preds (list[np.ndarray]): List of the prediction scores.
labels (list[int | np.ndarray]): List of the labels.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
eval_results = OrderedDict()
metric_options = copy.deepcopy(self.metric_options)
for metric in self.metrics:
if metric == 'top_k_accuracy':
topk = metric_options.setdefault('top_k_accuracy',
{}).setdefault(
'topk', (1, 5))
if not isinstance(topk, (int, tuple)):
raise TypeError('topk must be int or tuple of int, '
f'but got {type(topk)}')
if isinstance(topk, int):
topk = (topk, )
top_k_acc = top_k_accuracy(preds, labels, topk)
for k, acc in zip(topk, top_k_acc):
eval_results[f'top{k}'] = acc
if metric == 'mean_class_accuracy':
mean1 = mean_class_accuracy(preds, labels)
eval_results['mean1'] = mean1
if metric in [
'mean_average_precision',
'mmit_mean_average_precision',
]:
if metric == 'mean_average_precision':
mAP = mean_average_precision(preds, labels)
eval_results['mean_average_precision'] = mAP
elif metric == 'mmit_mean_average_precision':
mAP = mmit_mean_average_precision(preds, labels)
eval_results['mmit_mean_average_precision'] = mAP
return eval_results | Compute the metrics from processed results.
Args:
preds (list[np.ndarray]): List of the prediction scores.
labels (list[int | np.ndarray]): List of the labels.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results. | calculate | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/acc_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py | Apache-2.0 |
def calculate(pred, target, num_classes=None) -> dict:
"""Calculate the confusion matrix for single-label task.
Args:
pred (torch.Tensor | np.ndarray | Sequence): The prediction
results. It can be labels (N, ), or scores of every
class (N, C).
target (torch.Tensor | np.ndarray | Sequence): The target of
each prediction with shape (N, ).
num_classes (Optional, int): The number of classes. If the ``pred``
is label instead of scores, this argument is required.
Defaults to None.
Returns:
torch.Tensor: The confusion matrix.
"""
pred = to_tensor(pred)
target_label = to_tensor(target).int()
assert pred.size(0) == target_label.size(0), \
f"The size of pred ({pred.size(0)}) doesn't match "\
f'the target ({target_label.size(0)}).'
assert target_label.ndim == 1
if pred.ndim == 1:
assert num_classes is not None, \
'Please specify the `num_classes` if the `pred` is labels ' \
'intead of scores.'
pred_label = pred
else:
num_classes = num_classes or pred.size(1)
pred_label = torch.argmax(pred, dim=1).flatten()
with torch.no_grad():
indices = num_classes * target_label + pred_label
matrix = torch.bincount(indices, minlength=num_classes**2)
matrix = matrix.reshape(num_classes, num_classes)
return matrix | Calculate the confusion matrix for single-label task.
Args:
pred (torch.Tensor | np.ndarray | Sequence): The prediction
results. It can be labels (N, ), or scores of every
class (N, C).
target (torch.Tensor | np.ndarray | Sequence): The target of
each prediction with shape (N, ).
num_classes (Optional, int): The number of classes. If the ``pred``
is label instead of scores, this argument is required.
Defaults to None.
Returns:
torch.Tensor: The confusion matrix. | calculate | python | open-mmlab/mmaction2 | mmaction/evaluation/metrics/acc_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py | Apache-2.0 |
def merge_args(cfg, args):
"""Merge CLI arguments to config."""
if args.no_validate:
cfg.val_cfg = None
cfg.val_dataloader = None
cfg.val_evaluator = None
cfg.launcher = args.launcher
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.get('type', 'OptimWrapper')
assert optim_wrapper in ['OptimWrapper', 'AmpOptimWrapper'], \
'`--amp` is not supported custom optimizer wrapper type ' \
f'`{optim_wrapper}.'
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.setdefault('loss_scale', 'dynamic')
# resume training
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# enable auto scale learning rate
if args.auto_scale_lr:
cfg.auto_scale_lr.enable = True
# set random seeds
if cfg.get('randomness', None) is None:
cfg.randomness = dict(
seed=args.seed,
diff_rank_seed=args.diff_rank_seed,
deterministic=args.deterministic)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
return cfg | Merge CLI arguments to config. | merge_args | python | open-mmlab/mmaction2 | tools/train.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/train.py | Apache-2.0 |
def merge_args(cfg, args):
"""Merge CLI arguments to config."""
# -------------------- visualization --------------------
if args.show or (args.show_dir is not None):
assert 'visualization' in cfg.default_hooks, \
'VisualizationHook is not set in the `default_hooks` field of ' \
'config. Please set `visualization=dict(type="VisualizationHook")`'
cfg.default_hooks.visualization.enable = True
cfg.default_hooks.visualization.show = args.show
cfg.default_hooks.visualization.wait_time = args.wait_time
cfg.default_hooks.visualization.out_dir = args.show_dir
cfg.default_hooks.visualization.interval = args.interval
# -------------------- Dump predictions --------------------
if args.dump is not None:
assert args.dump.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
dump_metric = dict(type='DumpResults', out_file_path=args.dump)
if isinstance(cfg.test_evaluator, (list, tuple)):
cfg.test_evaluator = list(cfg.test_evaluator)
cfg.test_evaluator.append(dump_metric)
else:
cfg.test_evaluator = [cfg.test_evaluator, dump_metric]
return cfg | Merge CLI arguments to config. | merge_args | python | open-mmlab/mmaction2 | tools/test.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/test.py | Apache-2.0 |
def plot_curve(lr_list, args, param_name, iters_per_epoch, by_epoch=True):
"""Plot learning rate vs iter graph."""
try:
import seaborn as sns
sns.set_style(args.style)
except ImportError:
pass
wind_w, wind_h = args.window_size.split('*')
wind_w, wind_h = int(wind_w), int(wind_h)
plt.figure(figsize=(wind_w, wind_h))
ax: plt.Axes = plt.subplot()
ax.plot(lr_list, linewidth=1)
if by_epoch:
ax.xaxis.tick_top()
ax.set_xlabel('Iters')
ax.xaxis.set_label_position('top')
sec_ax = ax.secondary_xaxis(
'bottom',
functions=(lambda x: x / iters_per_epoch,
lambda y: y * iters_per_epoch))
sec_ax.set_xlabel('Epochs')
else:
plt.xlabel('Iters')
plt.ylabel(param_name)
if args.title is None:
plt.title(f'{osp.basename(args.config)} {param_name} curve')
else:
plt.title(args.title) | Plot learning rate vs iter graph. | plot_curve | python | open-mmlab/mmaction2 | tools/visualizations/vis_scheduler.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/visualizations/vis_scheduler.py | Apache-2.0 |
def build_inputs(model: nn.Module,
video_path: str,
use_frames: bool = False) -> Dict:
"""build inputs for GradCAM.
Note that, building inputs for GradCAM is exactly the same as building
inputs for Recognizer test stage. Codes from `inference_recognizer`.
Args:
model (nn.Module): Recognizer model.
video_path (str): video file/url or rawframes directory.
use_frames (bool): whether to use rawframes as input.
Defaults to False.
Returns:
dict: Both GradCAM inputs and Recognizer test stage inputs,
including two keys, ``inputs`` and ``data_samples``.
"""
if not (osp.exists(video_path) or video_path.startswith('http')):
raise RuntimeError(f"'{video_path}' is missing")
if osp.isfile(video_path) and use_frames:
raise RuntimeError(
f"'{video_path}' is a video file, not a rawframe directory")
if osp.isdir(video_path) and not use_frames:
raise RuntimeError(
f"'{video_path}' is a rawframe directory, not a video file")
cfg = model.cfg
# build the data pipeline
test_pipeline = cfg.test_pipeline
test_pipeline = Compose(test_pipeline)
# prepare data
if use_frames:
filename_tmpl = cfg.test_dataloader.dataset.get(
'filename_tmpl', 'img_{:05}.jpg')
start_index = cfg.test_dataloader.dataset.get('start_index', 1)
data = dict(
frame_dir=video_path,
total_frames=len(os.listdir(video_path)),
label=-1,
start_index=start_index,
filename_tmpl=filename_tmpl,
modality='RGB')
else:
start_index = cfg.test_dataloader.dataset.get('start_index', 0)
data = dict(
filename=video_path,
label=-1,
start_index=start_index,
modality='RGB')
data = test_pipeline(data)
data = pseudo_collate([data])
return data | build inputs for GradCAM.
Note that, building inputs for GradCAM is exactly the same as building
inputs for Recognizer test stage. Codes from `inference_recognizer`.
Args:
model (nn.Module): Recognizer model.
video_path (str): video file/url or rawframes directory.
use_frames (bool): whether to use rawframes as input.
Defaults to False.
Returns:
dict: Both GradCAM inputs and Recognizer test stage inputs,
including two keys, ``inputs`` and ``data_samples``. | build_inputs | python | open-mmlab/mmaction2 | tools/visualizations/vis_cam.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/visualizations/vis_cam.py | Apache-2.0 |
def _resize_frames(frame_list: List[np.ndarray],
scale: Optional[Tuple[int]] = None,
keep_ratio: bool = True,
interpolation: str = 'bilinear') -> List[np.ndarray]:
"""Resize frames according to given scale.
Codes are modified from `mmaction/datasets/transforms/processing.py`,
`Resize` class.
Args:
frame_list (list[np.ndarray]): Frames to be resized.
scale (tuple[int]): If keep_ratio is True, it serves as scaling
factor or maximum size: the image will be rescaled as large
as possible within the scale. Otherwise, it serves as (w, h)
of output size.
keep_ratio (bool): If set to True, Images will be resized without
changing the aspect ratio. Otherwise, it will resize images to a
given size. Defaults to True.
interpolation (str): Algorithm used for interpolation:
'nearest' | 'bilinear'. Defaults to ``'bilinear'``.
Returns:
list[np.ndarray]: Resized frames.
"""
if scale is None or (scale[0] == -1 and scale[1] == -1):
return frame_list
scale = tuple(scale)
max_long_edge = max(scale)
max_short_edge = min(scale)
if max_short_edge == -1:
scale = (np.inf, max_long_edge)
img_h, img_w, _ = frame_list[0].shape
if keep_ratio:
new_w, new_h = mmcv.rescale_size((img_w, img_h), scale)
else:
new_w, new_h = scale
frame_list = [
mmcv.imresize(img, (new_w, new_h), interpolation=interpolation)
for img in frame_list
]
return frame_list | Resize frames according to given scale.
Codes are modified from `mmaction/datasets/transforms/processing.py`,
`Resize` class.
Args:
frame_list (list[np.ndarray]): Frames to be resized.
scale (tuple[int]): If keep_ratio is True, it serves as scaling
factor or maximum size: the image will be rescaled as large
as possible within the scale. Otherwise, it serves as (w, h)
of output size.
keep_ratio (bool): If set to True, Images will be resized without
changing the aspect ratio. Otherwise, it will resize images to a
given size. Defaults to True.
interpolation (str): Algorithm used for interpolation:
'nearest' | 'bilinear'. Defaults to ``'bilinear'``.
Returns:
list[np.ndarray]: Resized frames. | _resize_frames | python | open-mmlab/mmaction2 | tools/visualizations/vis_cam.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/visualizations/vis_cam.py | Apache-2.0 |
def make_grid(videos, names, rescale_factor=None):
"""Concat list of pictures into a single big picture, align height here."""
vis = Visualizer()
ori_shapes = [vid[0].shape[:2] for vid in videos]
if rescale_factor is not None:
videos = [[mmcv.imrescale(img, rescale_factor) for img in video]
for video in videos]
max_height = int(max(vid[0].shape[0] for vid in videos) * 1.4)
min_width = min(vid[0].shape[1] for vid in videos)
horizontal_gap = min_width // 10
img_scale = _get_adaptive_scale((max_height, min_width))
texts = []
text_positions = []
start_x = 0
for i, vid in enumerate(videos):
for j, img in enumerate(vid):
pad_height = (max_height - img.shape[0]) // 2
pad_width = horizontal_gap // 2
# make border
videos[i][j] = cv2.copyMakeBorder(
img,
pad_height,
max_height - img.shape[0] - pad_height +
int(img_scale * 30 * 2),
pad_width,
pad_width,
cv2.BORDER_CONSTANT,
value=(255, 255, 255))
texts.append(f'{names[i]}\n{ori_shapes[i]}')
text_positions.append(
[start_x + img.shape[1] // 2 + pad_width, max_height])
start_x += img.shape[1] + horizontal_gap
out_frames = []
for i in range(len(videos[0])):
imgs = [vid[i] for vid in videos]
display_img = np.concatenate(imgs, axis=1)
vis.set_image(display_img)
img_scale = _get_adaptive_scale(display_img.shape[:2])
vis.draw_texts(
texts,
positions=np.array(text_positions),
font_sizes=img_scale * 7,
colors='black',
horizontal_alignments='center',
font_families='monospace')
out_frames.append(vis.get_image())
return out_frames | Concat list of pictures into a single big picture, align height here. | make_grid | python | open-mmlab/mmaction2 | tools/visualizations/browse_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/visualizations/browse_dataset.py | Apache-2.0 |
def cuhk17_top1():
"""Assign label for each proposal with the cuhk17 result, which is the #2
entry in http://activity-net.org/challenges/2017/evaluation.html."""
if not osp.exists('cuhk_anet17_pred.json'):
os.system('wget https://download.openmmlab.com/'
'mmaction/localization/cuhk_anet17_pred.json')
proposal = mmengine.load(args.proposal)
results = proposal['results']
cuhk_pred = mmengine.load('cuhk_anet17_pred.json')['results']
def get_topk(preds, k):
preds.sort(key=lambda x: x['score'])
return preds[-k:]
for k, v in results.items():
action_pred = cuhk_pred[k]
top1 = get_topk(action_pred, 1)
top1_label = top1[0]['label']
new_value = []
for item in v:
x = dict(label=top1_label)
x.update(item)
new_value.append(x)
results[k] = new_value
proposal['results'] = results
mmengine.dump(proposal, args.det_output) | Assign label for each proposal with the cuhk17 result, which is the #2
entry in http://activity-net.org/challenges/2017/evaluation.html. | cuhk17_top1 | python | open-mmlab/mmaction2 | tools/analysis_tools/report_map.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/analysis_tools/report_map.py | Apache-2.0 |
def __call__(self, results):
"""Select frames to verify.
Select the first, last and three random frames, Required key is
"total_frames", added or modified key is "frame_inds".
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
assert results['total_frames'] > 0
# first and last frames
results['frame_inds'] = np.array([0, results['total_frames'] - 1])
# choose 3 random frames
if results['total_frames'] > 2:
results['frame_inds'] = np.concatenate([
results['frame_inds'],
np.random.randint(1, results['total_frames'] - 1, 3)
])
return results | Select frames to verify.
Select the first, last and three random frames, Required key is
"total_frames", added or modified key is "frame_inds".
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | __call__ | python | open-mmlab/mmaction2 | tools/analysis_tools/check_videos.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/analysis_tools/check_videos.py | Apache-2.0 |
def resize_videos(vid_item):
"""Generate resized video cache.
Args:
vid_item (list): Video item containing video full path,
video relative path.
Returns:
bool: Whether generate video cache successfully.
"""
full_path, vid_path = vid_item
# Change the output video extension to .mp4 if '--to-mp4' flag is set
if args.to_mp4:
vid_path = vid_path.split('.')
assert len(vid_path) == 2, \
f"Video path '{vid_path}' contain more than one dot"
vid_path = vid_path[0] + '.mp4'
out_full_path = osp.join(args.out_dir, vid_path)
dir_name = osp.dirname(vid_path)
out_dir = osp.join(args.out_dir, dir_name)
if not osp.exists(out_dir):
os.makedirs(out_dir)
result = os.popen(
f'ffprobe -hide_banner -loglevel error -select_streams v:0 -show_entries stream=width,height -of csv=p=0 {full_path}' # noqa:E501
)
w, h = [int(d) for d in result.readline().rstrip().split(',')]
if w > h:
cmd = (f'ffmpeg -hide_banner -loglevel error -i {full_path} '
f'-vf {"mpdecimate," if args.remove_dup else ""}'
f'scale=-2:{args.scale} '
f'{"-vsync vfr" if args.remove_dup else ""} '
f'-c:v libx264 {"-g 16" if args.dense else ""} '
f'-an {out_full_path} -y')
else:
cmd = (f'ffmpeg -hide_banner -loglevel error -i {full_path} '
f'-vf {"mpdecimate," if args.remove_dup else ""}'
f'scale={args.scale}:-2 '
f'{"-vsync vfr" if args.remove_dup else ""} '
f'-c:v libx264 {"-g 16" if args.dense else ""} '
f'-an {out_full_path} -y')
os.popen(cmd)
print(f'{vid_path} done')
sys.stdout.flush()
return True | Generate resized video cache.
Args:
vid_item (list): Video item containing video full path,
video relative path.
Returns:
bool: Whether generate video cache successfully. | resize_videos | python | open-mmlab/mmaction2 | tools/data/resize_videos.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/resize_videos.py | Apache-2.0 |
def count_files(directory, prefix_list):
"""Count file number with a given directory and prefix.
Args:
directory (str): Data directory to be search.
prefix_list (list): List or prefix.
Returns:
list (int): Number list of the file with the prefix.
"""
lst = os.listdir(directory)
cnt_list = [len(fnmatch.filter(lst, x + '*')) for x in prefix_list]
return cnt_list | Count file number with a given directory and prefix.
Args:
directory (str): Data directory to be search.
prefix_list (list): List or prefix.
Returns:
list (int): Number list of the file with the prefix. | parse_directory.count_files | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_directory(path,
rgb_prefix='img_',
flow_x_prefix='flow_x_',
flow_y_prefix='flow_y_',
level=1):
"""Parse directories holding extracted frames from standard benchmarks.
Args:
path (str): Directory path to parse frames.
rgb_prefix (str): Prefix of generated rgb frames name.
default: 'img_'.
flow_x_prefix (str): Prefix of generated flow x name.
default: `flow_x_`.
flow_y_prefix (str): Prefix of generated flow y name.
default: `flow_y_`.
level (int): Directory level for glob searching. Options are 1 and 2.
default: 1.
Returns:
dict: frame info dict with video id as key and tuple(path(str),
rgb_num(int), flow_x_num(int)) as value.
"""
print(f'parse frames under directory {path}')
if level == 1:
# Only search for one-level directory
def locate_directory(x):
return osp.basename(x)
frame_dirs = glob.glob(osp.join(path, '*'))
elif level == 2:
# search for two-level directory
def locate_directory(x):
return osp.join(osp.basename(osp.dirname(x)), osp.basename(x))
frame_dirs = glob.glob(osp.join(path, '*', '*'))
else:
raise ValueError('level can be only 1 or 2')
def count_files(directory, prefix_list):
"""Count file number with a given directory and prefix.
Args:
directory (str): Data directory to be search.
prefix_list (list): List or prefix.
Returns:
list (int): Number list of the file with the prefix.
"""
lst = os.listdir(directory)
cnt_list = [len(fnmatch.filter(lst, x + '*')) for x in prefix_list]
return cnt_list
# check RGB
frame_dict = {}
for i, frame_dir in enumerate(frame_dirs):
total_num = count_files(frame_dir,
(rgb_prefix, flow_x_prefix, flow_y_prefix))
dir_name = locate_directory(frame_dir)
num_x = total_num[1]
num_y = total_num[2]
if num_x != num_y:
raise ValueError(f'x and y direction have different number '
f'of flow images in video directory: {frame_dir}')
if i % 200 == 0:
print(f'{i} videos parsed')
frame_dict[dir_name] = (frame_dir, total_num[0], num_x)
print('frame directory analysis done')
return frame_dict | Parse directories holding extracted frames from standard benchmarks.
Args:
path (str): Directory path to parse frames.
rgb_prefix (str): Prefix of generated rgb frames name.
default: 'img_'.
flow_x_prefix (str): Prefix of generated flow x name.
default: `flow_x_`.
flow_y_prefix (str): Prefix of generated flow y name.
default: `flow_y_`.
level (int): Directory level for glob searching. Options are 1 and 2.
default: 1.
Returns:
dict: frame info dict with video id as key and tuple(path(str),
rgb_num(int), flow_x_num(int)) as value. | parse_directory | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def line_to_map(line):
"""A function to map line string to video and label.
Args:
line (str): A long directory path, which is a text path.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
"""
items = line.strip().split()
video = osp.splitext(items[0])[0]
if level == 1:
video = osp.basename(video)
label = items[0]
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
label = class_mapping[osp.dirname(items[0])]
return video, label | A function to map line string to video and label.
Args:
line (str): A long directory path, which is a text path.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label. | parse_ucf101_splits.line_to_map | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_ucf101_splits(level):
"""Parse UCF-101 dataset into "train", "val", "test" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of UCF-101.
"""
class_index_file = 'data/ucf101/annotations/classInd.txt'
train_file_template = 'data/ucf101/annotations/trainlist{:02d}.txt'
test_file_template = 'data/ucf101/annotations/testlist{:02d}.txt'
with open(class_index_file, 'r') as fin:
class_index = [x.strip().split() for x in fin]
class_mapping = {x[1]: int(x[0]) - 1 for x in class_index}
def line_to_map(line):
"""A function to map line string to video and label.
Args:
line (str): A long directory path, which is a text path.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
"""
items = line.strip().split()
video = osp.splitext(items[0])[0]
if level == 1:
video = osp.basename(video)
label = items[0]
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
label = class_mapping[osp.dirname(items[0])]
return video, label
splits = []
for i in range(1, 4):
with open(train_file_template.format(i), 'r') as fin:
train_list = [line_to_map(x) for x in fin]
with open(test_file_template.format(i), 'r') as fin:
test_list = [line_to_map(x) for x in fin]
splits.append((train_list, test_list))
return splits | Parse UCF-101 dataset into "train", "val", "test" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of UCF-101. | parse_ucf101_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_jester_splits(level):
"""Parse Jester into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Jester dataset.
"""
# Read the annotations
class_index_file = 'data/jester/annotations/jester-v1-labels.csv'
train_file = 'data/jester/annotations/jester-v1-train.csv'
val_file = 'data/jester/annotations/jester-v1-validation.csv'
test_file = 'data/jester/annotations/jester-v1-test.csv'
with open(class_index_file, 'r') as fin:
class_index = [x.strip() for x in fin]
class_mapping = {class_index[idx]: idx for idx in range(len(class_index))}
def line_to_map(line, test_mode=False):
items = line.strip().split(';')
video = items[0]
if level == 1:
video = osp.basename(video)
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
if test_mode:
return video
label = class_mapping[items[1]]
return video, label
with open(train_file, 'r') as fin:
train_list = [line_to_map(x) for x in fin]
with open(val_file, 'r') as fin:
val_list = [line_to_map(x) for x in fin]
with open(test_file, 'r') as fin:
test_list = [line_to_map(x, test_mode=True) for x in fin]
splits = ((train_list, val_list, test_list), )
return splits | Parse Jester into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Jester dataset. | parse_jester_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_sthv1_splits(level):
"""Parse Something-Something dataset V1 into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Something-Something V1 dataset.
"""
# Read the annotations
# yapf: disable
class_index_file = 'data/sthv1/annotations/something-something-v1-labels.csv' # noqa
# yapf: enable
train_file = 'data/sthv1/annotations/something-something-v1-train.csv'
val_file = 'data/sthv1/annotations/something-something-v1-validation.csv'
test_file = 'data/sthv1/annotations/something-something-v1-test.csv'
with open(class_index_file, 'r') as fin:
class_index = [x.strip() for x in fin]
class_mapping = {class_index[idx]: idx for idx in range(len(class_index))}
def line_to_map(line, test_mode=False):
items = line.strip().split(';')
video = items[0]
if level == 1:
video = osp.basename(video)
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
if test_mode:
return video
label = class_mapping[items[1]]
return video, label
with open(train_file, 'r') as fin:
train_list = [line_to_map(x) for x in fin]
with open(val_file, 'r') as fin:
val_list = [line_to_map(x) for x in fin]
with open(test_file, 'r') as fin:
test_list = [line_to_map(x, test_mode=True) for x in fin]
splits = ((train_list, val_list, test_list), )
return splits | Parse Something-Something dataset V1 into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Something-Something V1 dataset. | parse_sthv1_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_sthv2_splits(level):
"""Parse Something-Something dataset V2 into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Something-Something V2 dataset.
"""
# Read the annotations
# yapf: disable
class_index_file = 'data/sthv2/annotations/something-something-v2-labels.json' # noqa
# yapf: enable
train_file = 'data/sthv2/annotations/something-something-v2-train.json'
val_file = 'data/sthv2/annotations/something-something-v2-validation.json'
test_file = 'data/sthv2/annotations/something-something-v2-test.json'
with open(class_index_file, 'r') as fin:
class_mapping = json.loads(fin.read())
def line_to_map(item, test_mode=False):
video = item['id']
if level == 1:
video = osp.basename(video)
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
if test_mode:
return video
template = item['template'].replace('[', '')
template = template.replace(']', '')
label = int(class_mapping[template])
return video, label
with open(train_file, 'r') as fin:
items = json.loads(fin.read())
train_list = [line_to_map(item) for item in items]
with open(val_file, 'r') as fin:
items = json.loads(fin.read())
val_list = [line_to_map(item) for item in items]
with open(test_file, 'r') as fin:
items = json.loads(fin.read())
test_list = [line_to_map(item, test_mode=True) for item in items]
splits = ((train_list, val_list, test_list), )
return splits | Parse Something-Something dataset V2 into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Something-Something V2 dataset. | parse_sthv2_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_mmit_splits():
"""Parse Multi-Moments in Time dataset into "train", "val" splits.
Returns:
list: "train", "val", "test" splits of Multi-Moments in Time.
"""
# Read the annotations
def line_to_map(x):
video = osp.splitext(x[0])[0]
labels = [int(digit) for digit in x[1:]]
return video, labels
csv_reader = csv.reader(open('data/mmit/annotations/trainingSet.csv'))
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open('data/mmit/annotations/validationSet.csv'))
val_list = [line_to_map(x) for x in csv_reader]
test_list = val_list # not test for mit
splits = ((train_list, val_list, test_list), )
return splits | Parse Multi-Moments in Time dataset into "train", "val" splits.
Returns:
list: "train", "val", "test" splits of Multi-Moments in Time. | parse_mmit_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def convert_label(s, keep_whitespaces=False):
"""Convert label name to a formal string.
Remove redundant '"' and convert whitespace to '_'.
Args:
s (str): String to be converted.
keep_whitespaces(bool): Whether to keep whitespace. Default: False.
Returns:
str: Converted string.
"""
if not keep_whitespaces:
return s.replace('"', '').replace(' ', '_')
return s.replace('"', '') | Convert label name to a formal string.
Remove redundant '"' and convert whitespace to '_'.
Args:
s (str): String to be converted.
keep_whitespaces(bool): Whether to keep whitespace. Default: False.
Returns:
str: Converted string. | parse_kinetics_splits.convert_label | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def line_to_map(x, test=False):
"""A function to map line string to video and label.
Args:
x (str): A single line from Kinetics csv file.
test (bool): Indicate whether the line comes from test
annotation file.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
"""
if test:
# video = f'{x[0]}_{int(x[1]):06d}_{int(x[2]):06d}'
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
label = -1 # label unknown
return video, label
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
if level == 2:
video = f'{convert_label(x[0])}/{video}'
else:
assert level == 1
label = class_mapping[convert_label(x[0])]
return video, label | A function to map line string to video and label.
Args:
x (str): A single line from Kinetics csv file.
test (bool): Indicate whether the line comes from test
annotation file.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label. | parse_kinetics_splits.line_to_map | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_kinetics_splits(level, dataset):
"""Parse Kinetics dataset into "train", "val", "test" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
dataset (str): Denotes the version of Kinetics that needs to be parsed,
choices are "kinetics400", "kinetics600" and "kinetics700".
Returns:
list: "train", "val", "test" splits of Kinetics.
"""
def convert_label(s, keep_whitespaces=False):
"""Convert label name to a formal string.
Remove redundant '"' and convert whitespace to '_'.
Args:
s (str): String to be converted.
keep_whitespaces(bool): Whether to keep whitespace. Default: False.
Returns:
str: Converted string.
"""
if not keep_whitespaces:
return s.replace('"', '').replace(' ', '_')
return s.replace('"', '')
def line_to_map(x, test=False):
"""A function to map line string to video and label.
Args:
x (str): A single line from Kinetics csv file.
test (bool): Indicate whether the line comes from test
annotation file.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
"""
if test:
# video = f'{x[0]}_{int(x[1]):06d}_{int(x[2]):06d}'
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
label = -1 # label unknown
return video, label
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
if level == 2:
video = f'{convert_label(x[0])}/{video}'
else:
assert level == 1
label = class_mapping[convert_label(x[0])]
return video, label
train_file = f'data/{dataset}/annotations/kinetics_train.csv'
val_file = f'data/{dataset}/annotations/kinetics_val.csv'
test_file = f'data/{dataset}/annotations/kinetics_test.csv'
csv_reader = csv.reader(open(train_file))
# skip the first line
next(csv_reader)
labels_sorted = sorted({convert_label(row[0]) for row in csv_reader})
class_mapping = {label: i for i, label in enumerate(labels_sorted)}
csv_reader = csv.reader(open(train_file))
next(csv_reader)
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open(val_file))
next(csv_reader)
val_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open(test_file))
next(csv_reader)
test_list = [line_to_map(x, test=True) for x in csv_reader]
splits = ((train_list, val_list, test_list), )
return splits | Parse Kinetics dataset into "train", "val", "test" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
dataset (str): Denotes the version of Kinetics that needs to be parsed,
choices are "kinetics400", "kinetics600" and "kinetics700".
Returns:
list: "train", "val", "test" splits of Kinetics. | parse_kinetics_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.