code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def parse_mit_splits():
"""Parse Moments in Time dataset into "train", "val" splits.
Returns:
list: "train", "val", "test" splits of Moments in Time.
"""
# Read the annotations
class_mapping = {}
with open('data/mit/annotations/moments_categories.txt') as f_cat:
for line in f_cat.readlines():
cat, digit = line.rstrip().split(',')
class_mapping[cat] = int(digit)
def line_to_map(x):
video = osp.splitext(x[0])[0]
label = class_mapping[osp.dirname(x[0])]
return video, label
csv_reader = csv.reader(open('data/mit/annotations/trainingSet.csv'))
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open('data/mit/annotations/validationSet.csv'))
val_list = [line_to_map(x) for x in csv_reader]
test_list = val_list # no test for mit
splits = ((train_list, val_list, test_list), )
return splits | Parse Moments in Time dataset into "train", "val" splits.
Returns:
list: "train", "val", "test" splits of Moments in Time. | parse_mit_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def generate_class_index_file():
"""This function will generate a `ClassInd.txt` for HMDB51 in a format
like UCF101, where class id starts with 1."""
video_path = 'data/hmdb51/videos'
annotation_dir = 'data/hmdb51/annotations'
class_list = sorted(os.listdir(video_path))
class_dict = dict()
if not osp.exists(class_index_file):
with open(class_index_file, 'w') as f:
content = []
for class_id, class_name in enumerate(class_list):
# like `ClassInd.txt` in UCF-101,
# the class_id begins with 1
class_dict[class_name] = class_id + 1
cur_line = ' '.join([str(class_id + 1), class_name])
content.append(cur_line)
content = '\n'.join(content)
f.write(content)
else:
print(f'{class_index_file} has been generated before.')
class_dict = {
class_name: class_id + 1
for class_id, class_name in enumerate(class_list)
}
for i in range(1, 4):
train_content = []
test_content = []
for class_name in class_dict:
filename = class_name + f'_test_split{i}.txt'
filename_path = osp.join(annotation_dir, filename)
with open(filename_path, 'r') as fin:
for line in fin:
video_info = line.strip().split()
video_name = video_info[0]
if video_info[1] == '1':
target_line = ' '.join([
osp.join(class_name, video_name),
str(class_dict[class_name])
])
train_content.append(target_line)
elif video_info[1] == '2':
target_line = ' '.join([
osp.join(class_name, video_name),
str(class_dict[class_name])
])
test_content.append(target_line)
train_content = '\n'.join(train_content)
test_content = '\n'.join(test_content)
with open(train_file_template.format(i), 'w') as fout:
fout.write(train_content)
with open(test_file_template.format(i), 'w') as fout:
fout.write(test_content) | This function will generate a `ClassInd.txt` for HMDB51 in a format
like UCF101, where class id starts with 1. | parse_hmdb51_split.generate_class_index_file | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_hmdb51_split(level):
train_file_template = 'data/hmdb51/annotations/trainlist{:02d}.txt'
test_file_template = 'data/hmdb51/annotations/testlist{:02d}.txt'
class_index_file = 'data/hmdb51/annotations/classInd.txt'
def generate_class_index_file():
"""This function will generate a `ClassInd.txt` for HMDB51 in a format
like UCF101, where class id starts with 1."""
video_path = 'data/hmdb51/videos'
annotation_dir = 'data/hmdb51/annotations'
class_list = sorted(os.listdir(video_path))
class_dict = dict()
if not osp.exists(class_index_file):
with open(class_index_file, 'w') as f:
content = []
for class_id, class_name in enumerate(class_list):
# like `ClassInd.txt` in UCF-101,
# the class_id begins with 1
class_dict[class_name] = class_id + 1
cur_line = ' '.join([str(class_id + 1), class_name])
content.append(cur_line)
content = '\n'.join(content)
f.write(content)
else:
print(f'{class_index_file} has been generated before.')
class_dict = {
class_name: class_id + 1
for class_id, class_name in enumerate(class_list)
}
for i in range(1, 4):
train_content = []
test_content = []
for class_name in class_dict:
filename = class_name + f'_test_split{i}.txt'
filename_path = osp.join(annotation_dir, filename)
with open(filename_path, 'r') as fin:
for line in fin:
video_info = line.strip().split()
video_name = video_info[0]
if video_info[1] == '1':
target_line = ' '.join([
osp.join(class_name, video_name),
str(class_dict[class_name])
])
train_content.append(target_line)
elif video_info[1] == '2':
target_line = ' '.join([
osp.join(class_name, video_name),
str(class_dict[class_name])
])
test_content.append(target_line)
train_content = '\n'.join(train_content)
test_content = '\n'.join(test_content)
with open(train_file_template.format(i), 'w') as fout:
fout.write(train_content)
with open(test_file_template.format(i), 'w') as fout:
fout.write(test_content)
generate_class_index_file()
with open(class_index_file, 'r') as fin:
class_index = [x.strip().split() for x in fin]
class_mapping = {x[1]: int(x[0]) - 1 for x in class_index}
def line_to_map(line):
items = line.strip().split()
video = osp.splitext(items[0])[0]
if level == 1:
video = osp.basename(video)
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
label = class_mapping[osp.dirname(items[0])]
return video, label
splits = []
for i in range(1, 4):
with open(train_file_template.format(i), 'r') as fin:
train_list = [line_to_map(x) for x in fin]
with open(test_file_template.format(i), 'r') as fin:
test_list = [line_to_map(x) for x in fin]
splits.append((train_list, test_list))
return splits | This function will generate a `ClassInd.txt` for HMDB51 in a format
like UCF101, where class id starts with 1. | parse_hmdb51_split | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def process_norm_proposal_file(norm_proposal_file, frame_dict):
"""Process the normalized proposal file and denormalize it.
Args:
norm_proposal_file (str): Name of normalized proposal file.
frame_dict (dict): Information of frame folders.
"""
proposal_file = norm_proposal_file.replace('normalized_', '')
norm_proposals = load_localize_proposal_file(norm_proposal_file)
processed_proposal_list = []
for idx, norm_proposal in enumerate(norm_proposals):
video_id = norm_proposal[0]
frame_info = frame_dict[video_id]
num_frames = frame_info[1]
frame_path = osp.basename(frame_info[0])
gt = [[
int(x[0]),
int(float(x[1]) * num_frames),
int(float(x[2]) * num_frames)
] for x in norm_proposal[2]]
proposal = [[
int(x[0]),
float(x[1]),
float(x[2]),
int(float(x[3]) * num_frames),
int(float(x[4]) * num_frames)
] for x in norm_proposal[3]]
gt_dump = '\n'.join(['{} {} {}'.format(*x) for x in gt])
gt_dump += '\n' if len(gt) else ''
proposal_dump = '\n'.join(
['{} {:.04f} {:.04f} {} {}'.format(*x) for x in proposal])
proposal_dump += '\n' if len(proposal) else ''
processed_proposal_list.append(
f'# {idx}\n{frame_path}\n{num_frames}\n1'
f'\n{len(gt)}\n{gt_dump}{len(proposal)}\n{proposal_dump}')
with open(proposal_file, 'w') as f:
f.writelines(processed_proposal_list) | Process the normalized proposal file and denormalize it.
Args:
norm_proposal_file (str): Name of normalized proposal file.
frame_dict (dict): Information of frame folders. | process_norm_proposal_file | python | open-mmlab/mmaction2 | tools/data/denormalize_proposal_file.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/denormalize_proposal_file.py | Apache-2.0 |
def lines2dictlist(lines, format):
"""Convert lines in 'txt' format to dictionaries in 'json' format.
Currently support single-label and multi-label.
Example of a single-label rawframes annotation txt file:
.. code-block:: txt
(frame_dir num_frames label)
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
Example of a multi-label rawframes annotation txt file:
.. code-block:: txt
(frame_dir num_frames label1 label2 ...)
some/directory-1 163 1 3 5
some/directory-2 122 1 2
some/directory-3 258 2
Example of a single-label videos annotation txt file:
.. code-block:: txt
(filename label)
some/path/000.mp4 1
some/path/001.mp4 1
some/path/002.mp4 2
Example of a multi-label videos annotation txt file:
.. code-block:: txt
(filename label1 label2 ...)
some/path/000.mp4 1 3 5
some/path/001.mp4 1 4 8
some/path/002.mp4 2 4 9
Args:
lines (list): List of lines in 'txt' label format.
format (str): Data format, choices are 'rawframes' and 'videos'.
Returns:
list[dict]: For rawframes format, each dict has keys: frame_dir,
total_frames, label; for videos format, each diction has keys:
filename, label.
"""
lines = [x.split() for x in lines]
if format == 'rawframes':
data = [
dict(
frame_dir=line[0],
total_frames=int(line[1]),
label=[int(x) for x in line[2:]]) for line in lines
]
elif format == 'videos':
data = [
dict(filename=line[0], label=[int(x) for x in line[1:]])
for line in lines
]
return data | Convert lines in 'txt' format to dictionaries in 'json' format.
Currently support single-label and multi-label.
Example of a single-label rawframes annotation txt file:
.. code-block:: txt
(frame_dir num_frames label)
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
Example of a multi-label rawframes annotation txt file:
.. code-block:: txt
(frame_dir num_frames label1 label2 ...)
some/directory-1 163 1 3 5
some/directory-2 122 1 2
some/directory-3 258 2
Example of a single-label videos annotation txt file:
.. code-block:: txt
(filename label)
some/path/000.mp4 1
some/path/001.mp4 1
some/path/002.mp4 2
Example of a multi-label videos annotation txt file:
.. code-block:: txt
(filename label1 label2 ...)
some/path/000.mp4 1 3 5
some/path/001.mp4 1 4 8
some/path/002.mp4 2 4 9
Args:
lines (list): List of lines in 'txt' label format.
format (str): Data format, choices are 'rawframes' and 'videos'.
Returns:
list[dict]: For rawframes format, each dict has keys: frame_dir,
total_frames, label; for videos format, each diction has keys:
filename, label. | lines2dictlist | python | open-mmlab/mmaction2 | tools/data/anno_txt2json.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/anno_txt2json.py | Apache-2.0 |
def load_wav(self, path):
"""Load an audio file into numpy array."""
return librosa.core.load(path, sr=self.sample_rate)[0] | Load an audio file into numpy array. | load_wav | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def audio_normalize(samples, desired_rms=0.1, eps=1e-4):
"""RMS normalize the audio data."""
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return samples | RMS normalize the audio data. | audio_normalize | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def generate_spectrogram_magphase(self, audio, with_phase=False):
"""Separate a complex-valued spectrogram D into its magnitude (S)
and phase (P) components, so that D = S * P.
Args:
audio (np.ndarray): The input audio signal.
with_phase (bool): Determines whether to output the
phase components. Default: False.
Returns:
np.ndarray: magnitude and phase component of the complex-valued
spectrogram.
"""
spectro = librosa.core.stft(
audio,
hop_length=self.get_hop_size(),
n_fft=self.fft_size,
center=True)
spectro_mag, spectro_phase = librosa.core.magphase(spectro)
spectro_mag = np.expand_dims(spectro_mag, axis=0)
if with_phase:
spectro_phase = np.expand_dims(np.angle(spectro_phase), axis=0)
return spectro_mag, spectro_phase
return spectro_mag | Separate a complex-valued spectrogram D into its magnitude (S)
and phase (P) components, so that D = S * P.
Args:
audio (np.ndarray): The input audio signal.
with_phase (bool): Determines whether to output the
phase components. Default: False.
Returns:
np.ndarray: magnitude and phase component of the complex-valued
spectrogram. | generate_spectrogram_magphase | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def save_wav(self, wav, path):
"""Save the wav to disk."""
# 32767 = (2 ^ 15 - 1) maximum of int16
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
wavfile.write(path, self.sample_rate, wav.astype(np.int16)) | Save the wav to disk. | save_wav | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def trim(self, quantized):
"""Trim the audio wavfile."""
start, end = self.start_and_end_indices(quantized,
self.silence_threshold)
return quantized[start:end] | Trim the audio wavfile. | trim | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def adjust_time_resolution(self, quantized, mel):
"""Adjust time resolution by repeating features.
Args:
quantized (np.ndarray): (T,)
mel (np.ndarray): (N, D)
Returns:
tuple: Tuple of (T,) and (T, D)
"""
assert quantized.ndim == 1
assert mel.ndim == 2
upsample_factor = quantized.size // mel.shape[0]
mel = np.repeat(mel, upsample_factor, axis=0)
n_pad = quantized.size - mel.shape[0]
if n_pad != 0:
assert n_pad > 0
mel = np.pad(
mel, [(0, n_pad), (0, 0)], mode='constant', constant_values=0)
# trim
start, end = self.start_and_end_indices(quantized,
self.silence_threshold)
return quantized[start:end], mel[start:end, :] | Adjust time resolution by repeating features.
Args:
quantized (np.ndarray): (T,)
mel (np.ndarray): (N, D)
Returns:
tuple: Tuple of (T,) and (T, D) | adjust_time_resolution | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def start_and_end_indices(quantized, silence_threshold=2):
"""Trim the audio file when reaches the silence threshold."""
for start in range(quantized.size):
if abs(quantized[start] - 127) > silence_threshold:
break
for end in range(quantized.size - 1, 1, -1):
if abs(quantized[end] - 127) > silence_threshold:
break
assert abs(quantized[start] - 127) > silence_threshold
assert abs(quantized[end] - 127) > silence_threshold
return start, end | Trim the audio file when reaches the silence threshold. | start_and_end_indices | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def melspectrogram(self, y):
"""Generate the melspectrogram."""
D = self._lws_processor().stft(y).T
S = self._amp_to_db(self._linear_to_mel(np.abs(D))) - self.ref_level_db
if not self.allow_clipping_in_normalization:
assert S.max() <= 0 and S.min() - self.min_level_db >= 0
return self._normalize(S) | Generate the melspectrogram. | melspectrogram | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def get_hop_size(self):
"""Calculate the hop size."""
hop_size = self.hop_size
if hop_size is None:
assert self.frame_shift_ms is not None
hop_size = int(self.frame_shift_ms / 1000 * self.sample_rate)
return hop_size | Calculate the hop size. | get_hop_size | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def _lws_processor(self):
"""Perform local weighted sum.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
"""
return lws.lws(self.fft_size, self.get_hop_size(), mode='speech') | Perform local weighted sum.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_. | _lws_processor | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def lws_num_frames(length, fsize, fshift):
"""Compute number of time frames of lws spectrogram.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
"""
pad = (fsize - fshift)
if length % fshift == 0:
M = (length + pad * 2 - fsize) // fshift + 1
else:
M = (length + pad * 2 - fsize) // fshift + 2
return M | Compute number of time frames of lws spectrogram.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_. | lws_num_frames | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def lws_pad_lr(self, x, fsize, fshift):
"""Compute left and right padding lws internally uses.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
"""
M = self.lws_num_frames(len(x), fsize, fshift)
pad = (fsize - fshift)
T = len(x) + 2 * pad
r = (M - 1) * fshift + fsize - T
return pad, pad + r | Compute left and right padding lws internally uses.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_. | lws_pad_lr | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def _linear_to_mel(self, spectrogram):
"""Warp linear scale spectrograms to the mel scale.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_
"""
global _mel_basis
_mel_basis = self._build_mel_basis()
return np.dot(_mel_basis, spectrogram) | Warp linear scale spectrograms to the mel scale.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_ | _linear_to_mel | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def _build_mel_basis(self):
"""Build mel filters.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_
"""
assert self.fmax <= self.sample_rate // 2
return librosa.filters.mel(
self.sample_rate,
self.fft_size,
fmin=self.fmin,
fmax=self.fmax,
n_mels=self.num_mels) | Build mel filters.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_ | _build_mel_basis | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def build_list(split):
"""Build RGB and Flow file list with a given split.
Args:
split (list): Split to be generate file list.
Returns:
tuple[list, list]: (rgb_list, flow_list), rgb_list is the
generated file list for rgb, flow_list is the generated
file list for flow.
"""
rgb_list, flow_list = list(), list()
for item in split:
if item[0] not in frame_info:
continue
if frame_info[item[0]][1] > 0:
# rawframes
rgb_cnt = frame_info[item[0]][1]
flow_cnt = frame_info[item[0]][2]
if isinstance(item[1], int):
rgb_list.append(f'{item[0]} {rgb_cnt} {item[1]}\n')
flow_list.append(f'{item[0]} {flow_cnt} {item[1]}\n')
elif isinstance(item[1], list):
# only for multi-label datasets like mmit
rgb_list.append(f'{item[0]} {rgb_cnt} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
rgb_list.append(f'{item[0]} {flow_cnt} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
else:
raise ValueError(
'frame_info should be ' +
'[`video`(str), `label`(int)|`labels(list[int])`')
else:
# videos
if isinstance(item[1], int):
rgb_list.append(f'{frame_info[item[0]][0]} {item[1]}\n')
flow_list.append(f'{frame_info[item[0]][0]} {item[1]}\n')
elif isinstance(item[1], list):
# only for multi-label datasets like mmit
rgb_list.append(f'{frame_info[item[0]][0]} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
flow_list.append(
f'{frame_info[item[0]][0]} ' +
' '.join([str(digit) for digit in item[1]]) + '\n')
else:
raise ValueError(
'frame_info should be ' +
'[`video`(str), `label`(int)|`labels(list[int])`')
if shuffle:
random.shuffle(rgb_list)
random.shuffle(flow_list)
return rgb_list, flow_list | Build RGB and Flow file list with a given split.
Args:
split (list): Split to be generate file list.
Returns:
tuple[list, list]: (rgb_list, flow_list), rgb_list is the
generated file list for rgb, flow_list is the generated
file list for flow. | build_file_list.build_list | python | open-mmlab/mmaction2 | tools/data/build_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_file_list.py | Apache-2.0 |
def build_file_list(splits, frame_info, shuffle=False):
"""Build file list for a certain data split.
Args:
splits (tuple): Data split to generate file list.
frame_info (dict): Dict mapping from frames to path. e.g.,
'Skiing/v_Skiing_g18_c02': ('data/ucf101/rawframes/Skiing/v_Skiing_g18_c02', 0, 0). # noqa: E501
shuffle (bool): Whether to shuffle the file list.
Returns:
tuple: RGB file list for training and testing, together with
Flow file list for training and testing.
"""
def build_list(split):
"""Build RGB and Flow file list with a given split.
Args:
split (list): Split to be generate file list.
Returns:
tuple[list, list]: (rgb_list, flow_list), rgb_list is the
generated file list for rgb, flow_list is the generated
file list for flow.
"""
rgb_list, flow_list = list(), list()
for item in split:
if item[0] not in frame_info:
continue
if frame_info[item[0]][1] > 0:
# rawframes
rgb_cnt = frame_info[item[0]][1]
flow_cnt = frame_info[item[0]][2]
if isinstance(item[1], int):
rgb_list.append(f'{item[0]} {rgb_cnt} {item[1]}\n')
flow_list.append(f'{item[0]} {flow_cnt} {item[1]}\n')
elif isinstance(item[1], list):
# only for multi-label datasets like mmit
rgb_list.append(f'{item[0]} {rgb_cnt} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
rgb_list.append(f'{item[0]} {flow_cnt} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
else:
raise ValueError(
'frame_info should be ' +
'[`video`(str), `label`(int)|`labels(list[int])`')
else:
# videos
if isinstance(item[1], int):
rgb_list.append(f'{frame_info[item[0]][0]} {item[1]}\n')
flow_list.append(f'{frame_info[item[0]][0]} {item[1]}\n')
elif isinstance(item[1], list):
# only for multi-label datasets like mmit
rgb_list.append(f'{frame_info[item[0]][0]} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
flow_list.append(
f'{frame_info[item[0]][0]} ' +
' '.join([str(digit) for digit in item[1]]) + '\n')
else:
raise ValueError(
'frame_info should be ' +
'[`video`(str), `label`(int)|`labels(list[int])`')
if shuffle:
random.shuffle(rgb_list)
random.shuffle(flow_list)
return rgb_list, flow_list
train_rgb_list, train_flow_list = build_list(splits[0])
test_rgb_list, test_flow_list = build_list(splits[1])
return (train_rgb_list, test_rgb_list), (train_flow_list, test_flow_list) | Build file list for a certain data split.
Args:
splits (tuple): Data split to generate file list.
frame_info (dict): Dict mapping from frames to path. e.g.,
'Skiing/v_Skiing_g18_c02': ('data/ucf101/rawframes/Skiing/v_Skiing_g18_c02', 0, 0). # noqa: E501
shuffle (bool): Whether to shuffle the file list.
Returns:
tuple: RGB file list for training and testing, together with
Flow file list for training and testing. | build_file_list | python | open-mmlab/mmaction2 | tools/data/build_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_file_list.py | Apache-2.0 |
def encode_video(frame_dir_item):
"""Encode frames to video using ffmpeg.
Args:
frame_dir_item (list): Rawframe item containing raw frame directory
full path, rawframe directory (short) path, rawframe directory id.
Returns:
bool: Whether synthesize video successfully.
"""
full_path, frame_dir_path, frame_dir_id = frame_dir_item
out_full_path = args.out_dir
img_name_tmpl = args.filename_tmpl + '.' + args.in_format
img_path = osp.join(full_path, img_name_tmpl)
out_vid_name = frame_dir_path + '.' + args.ext
out_vid_path = osp.join(out_full_path, out_vid_name)
cmd = osp.join(
f"ffmpeg -start_number {args.start_idx} -r {args.fps} -i '{img_path}' "
f"-vcodec {args.vcodec} '{out_vid_path}'")
os.system(cmd)
print(f'{frame_dir_id} {frame_dir_path} done')
sys.stdout.flush()
return True | Encode frames to video using ffmpeg.
Args:
frame_dir_item (list): Rawframe item containing raw frame directory
full path, rawframe directory (short) path, rawframe directory id.
Returns:
bool: Whether synthesize video successfully. | encode_video | python | open-mmlab/mmaction2 | tools/data/build_videos.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_videos.py | Apache-2.0 |
def extract_frame(vid_item):
"""Generate optical flow using dense flow.
Args:
vid_item (list): Video item containing video full path,
video (short) path, video id.
Returns:
bool: Whether generate optical flow successfully.
"""
full_path, vid_path, vid_id, method, task, report_file = vid_item
if '/' in vid_path:
act_name = osp.basename(osp.dirname(vid_path))
out_full_path = osp.join(args.out_dir, act_name)
else:
out_full_path = args.out_dir
run_success = -1
if task == 'rgb':
if args.use_opencv:
# Not like using denseflow,
# Use OpenCV will not make a sub directory with the video name
try:
video_name = osp.splitext(osp.basename(vid_path))[0]
out_full_path = osp.join(out_full_path, video_name)
vr = mmcv.VideoReader(full_path)
for i, vr_frame in enumerate(vr):
if vr_frame is not None:
w, h, _ = np.shape(vr_frame)
if args.new_short == 0:
if args.new_width == 0 or args.new_height == 0:
# Keep original shape
out_img = vr_frame
else:
out_img = mmcv.imresize(
vr_frame,
(args.new_width, args.new_height))
else:
if min(h, w) == h:
new_h = args.new_short
new_w = int((new_h / h) * w)
else:
new_w = args.new_short
new_h = int((new_w / w) * h)
out_img = mmcv.imresize(vr_frame, (new_h, new_w))
mmcv.imwrite(out_img,
f'{out_full_path}/img_{i + 1:05d}.jpg')
else:
warnings.warn(
'Length inconsistent!'
f'Early stop with {i + 1} out of {len(vr)} frames.'
)
break
run_success = 0
except Exception:
run_success = -1
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
run_success = os.system(cmd)
elif task == 'flow':
if args.input_frames:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} --nh={args.new_height} -v --if')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v --if')
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} --nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v')
run_success = os.system(cmd)
else:
if args.new_short == 0:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v')
run_success_rgb = os.system(cmd_rgb)
run_success_flow = os.system(cmd_flow)
if run_success_flow == 0 and run_success_rgb == 0:
run_success = 0
if run_success == 0:
print(f'{task} {vid_id} {vid_path} {method} done')
sys.stdout.flush()
lock.acquire()
with open(report_file, 'a') as f:
line = full_path + '\n'
f.write(line)
lock.release()
else:
print(f'{task} {vid_id} {vid_path} {method} got something wrong')
sys.stdout.flush()
return True | Generate optical flow using dense flow.
Args:
vid_item (list): Video item containing video full path,
video (short) path, video id.
Returns:
bool: Whether generate optical flow successfully. | extract_frame | python | open-mmlab/mmaction2 | tools/data/build_rawframes.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_rawframes.py | Apache-2.0 |
def extract_audio_wav(line):
"""Extract the audio wave from video streams using FFMPEG."""
video_id, _ = osp.splitext(osp.basename(line))
video_dir = osp.dirname(line)
video_rel_dir = osp.relpath(video_dir, args.root)
dst_dir = osp.join(args.dst_root, video_rel_dir)
os.popen(f'mkdir -p {dst_dir}')
try:
if osp.exists(f'{dst_dir}/{video_id}.wav'):
return
cmd = f'ffmpeg -i ./{line} -map 0:a -y {dst_dir}/{video_id}.wav'
os.popen(cmd)
except BaseException:
with open('extract_wav_err_file.txt', 'a+') as f:
f.write(f'{line}\n') | Extract the audio wave from video streams using FFMPEG. | extract_audio_wav | python | open-mmlab/mmaction2 | tools/data/extract_audio.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/extract_audio.py | Apache-2.0 |
def pool_feature(data, num_proposals=100, num_sample_bins=3, pool_type='mean'):
"""Pool features with arbitrary temporal length.
Args:
data (list[np.ndarray] | np.ndarray): Features of an untrimmed video,
with arbitrary temporal length.
num_proposals (int): The temporal dim of pooled feature. Default: 100.
num_sample_bins (int): How many points to sample to get the feature
vector at one timestamp. Default: 3.
pool_type (str): Type of pooling to pool features. Choices are
['mean', 'max']. Default: 'mean'.
Returns:
np.ndarray: The pooled feature with shape num_proposals x feature_dim.
"""
if len(data) == 1:
return np.concatenate([data] * num_proposals)
x_range = list(range(len(data)))
f = scipy.interpolate.interp1d(x_range, data, axis=0)
eps = 1e-4
start, end = eps, len(data) - 1 - eps
anchor_size = (end - start) / num_proposals
ptr = start
feature = []
for _ in range(num_proposals):
x_new = [
ptr + i / num_sample_bins * anchor_size
for i in range(num_sample_bins)
]
y_new = f(x_new)
if pool_type == 'mean':
y_new = np.mean(y_new, axis=0)
elif pool_type == 'max':
y_new = np.max(y_new, axis=0)
else:
raise NotImplementedError('Unsupported pool type')
feature.append(y_new)
ptr += anchor_size
feature = np.stack(feature)
return feature | Pool features with arbitrary temporal length.
Args:
data (list[np.ndarray] | np.ndarray): Features of an untrimmed video,
with arbitrary temporal length.
num_proposals (int): The temporal dim of pooled feature. Default: 100.
num_sample_bins (int): How many points to sample to get the feature
vector at one timestamp. Default: 3.
pool_type (str): Type of pooling to pool features. Choices are
['mean', 'max']. Default: 'mean'.
Returns:
np.ndarray: The pooled feature with shape num_proposals x feature_dim. | pool_feature | python | open-mmlab/mmaction2 | tools/data/activitynet/activitynet_feature_postprocessing.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/activitynet_feature_postprocessing.py | Apache-2.0 |
def download_clip(video_identifier,
output_filename,
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
if not os.path.exists(output_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',
'-f', 'mp4', '-o',
'"%s"' % output_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
attempts += 1
if attempts == num_attempts:
return status, 'Fail'
else:
break
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
return status, 'Downloaded' | Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored. | download_clip | python | open-mmlab/mmaction2 | tools/data/activitynet/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/download.py | Apache-2.0 |
def download_clip_wrapper(youtube_id, output_dir):
"""Wrapper for parallel processing purposes."""
# we do this to align with names in annotations
output_filename = os.path.join(output_dir, 'v_' + youtube_id + '.mp4')
if os.path.exists(output_filename):
status = tuple(['v_' + youtube_id, True, 'Exists'])
return status
downloaded, log = download_clip(youtube_id, output_filename)
status = tuple(['v_' + youtube_id, downloaded, log])
return status | Wrapper for parallel processing purposes. | download_clip_wrapper | python | open-mmlab/mmaction2 | tools/data/activitynet/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/download.py | Apache-2.0 |
def parse_activitynet_annotations(input_csv, is_bsn_case=False):
"""Returns a list of YoutubeID.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'video,numFrame,seconds,fps,rfps,subset,featureFrame'
returns:
-------
youtube_ids: list
List of all YoutubeIDs in ActivityNet.
"""
if is_bsn_case:
lines = open(input_csv).readlines()
lines = lines[1:]
# YoutubeIDs do not have prefix `v_`
youtube_ids = [x.split(',')[0][2:] for x in lines]
else:
data = mmengine.load(anno_file)['database']
youtube_ids = list(data.keys())
return youtube_ids | Returns a list of YoutubeID.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'video,numFrame,seconds,fps,rfps,subset,featureFrame'
returns:
-------
youtube_ids: list
List of all YoutubeIDs in ActivityNet. | parse_activitynet_annotations | python | open-mmlab/mmaction2 | tools/data/activitynet/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/download.py | Apache-2.0 |
def load_annotations(ann_file):
"""Load the annotation according to ann_file into video_infos."""
video_infos = []
anno_database = mmengine.load(ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos | Load the annotation according to ann_file into video_infos. | load_annotations | python | open-mmlab/mmaction2 | tools/data/activitynet/convert_proposal_format.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/convert_proposal_format.py | Apache-2.0 |
def import_ground_truth(video_infos, activity_index):
"""Read ground truth data from video_infos."""
ground_truth = {}
for video_info in video_infos:
video_id = video_info['video_name'][2:]
this_video_ground_truths = []
for ann in video_info['annotations']:
t_start, t_end = ann['segment']
label = activity_index[ann['label']]
this_video_ground_truths.append([t_start, t_end, label])
ground_truth[video_id] = np.array(this_video_ground_truths)
return ground_truth | Read ground truth data from video_infos. | import_ground_truth | python | open-mmlab/mmaction2 | tools/data/activitynet/convert_proposal_format.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/convert_proposal_format.py | Apache-2.0 |
def import_proposals(result_dict):
"""Read predictions from result dict."""
proposals = {}
num_proposals = 0
for video_id in result_dict:
result = result_dict[video_id]
this_video_proposals = []
for proposal in result:
t_start, t_end = proposal['segment']
score = proposal['score']
this_video_proposals.append([t_start, t_end, score])
num_proposals += 1
proposals[video_id] = np.array(this_video_proposals)
return proposals, num_proposals | Read predictions from result dict. | import_proposals | python | open-mmlab/mmaction2 | tools/data/activitynet/convert_proposal_format.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/convert_proposal_format.py | Apache-2.0 |
def dump_formatted_proposal(video_idx, video_id, num_frames, fps, gts,
proposals, tiou, t_overlap_self,
formatted_proposal_file):
"""dump the formatted proposal file, which is the input proposal file of
action classifier (e.g: SSN).
Args:
video_idx (int): Index of video.
video_id (str): ID of video.
num_frames (int): Total frames of the video.
fps (float): Fps of the video.
gts (np.ndarray[float]): t_start, t_end and label of groundtruths.
proposals (np.ndarray[float]): t_start, t_end and score of proposals.
tiou (np.ndarray[float]): 2-dim array with IoU ratio.
t_overlap_self (np.ndarray[float]): 2-dim array with overlap_self
(union / self_len) ratio.
formatted_proposal_file (open file object): Open file object of
formatted_proposal_file.
"""
formatted_proposal_file.write(
f'#{video_idx}\n{video_id}\n{num_frames}\n{fps}\n{gts.shape[0]}\n')
for gt in gts:
formatted_proposal_file.write(f'{int(gt[2])} {gt[0]} {gt[1]}\n')
formatted_proposal_file.write(f'{proposals.shape[0]}\n')
best_iou = np.amax(tiou, axis=0)
best_iou_index = np.argmax(tiou, axis=0)
best_overlap = np.amax(t_overlap_self, axis=0)
best_overlap_index = np.argmax(t_overlap_self, axis=0)
for i in range(proposals.shape[0]):
index_iou = best_iou_index[i]
index_overlap = best_overlap_index[i]
label_iou = gts[index_iou][2]
label_overlap = gts[index_overlap][2]
if label_iou != label_overlap:
label = label_iou if label_iou != 0 else label_overlap
else:
label = label_iou
if best_iou[i] == 0 and best_overlap[i] == 0:
formatted_proposal_file.write(
f'0 0 0 {proposals[i][0]} {proposals[i][1]}\n')
else:
formatted_proposal_file.write(
f'{int(label)} {best_iou[i]} {best_overlap[i]} '
f'{proposals[i][0]} {proposals[i][1]}\n') | dump the formatted proposal file, which is the input proposal file of
action classifier (e.g: SSN).
Args:
video_idx (int): Index of video.
video_id (str): ID of video.
num_frames (int): Total frames of the video.
fps (float): Fps of the video.
gts (np.ndarray[float]): t_start, t_end and label of groundtruths.
proposals (np.ndarray[float]): t_start, t_end and score of proposals.
tiou (np.ndarray[float]): 2-dim array with IoU ratio.
t_overlap_self (np.ndarray[float]): 2-dim array with overlap_self
(union / self_len) ratio.
formatted_proposal_file (open file object): Open file object of
formatted_proposal_file. | dump_formatted_proposal | python | open-mmlab/mmaction2 | tools/data/activitynet/convert_proposal_format.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/convert_proposal_format.py | Apache-2.0 |
def create_video_folders(dataset, output_dir, tmp_dir):
"""Creates a directory for each label name in the dataset."""
if 'label-name' not in dataset.columns:
this_dir = os.path.join(output_dir, 'test')
if not os.path.exists(this_dir):
os.makedirs(this_dir)
# I should return a dict but ...
return this_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
label_to_dir = {}
for label_name in dataset['label-name'].unique():
this_dir = os.path.join(output_dir, label_name)
if not os.path.exists(this_dir):
os.makedirs(this_dir)
label_to_dir[label_name] = this_dir
return label_to_dir | Creates a directory for each label name in the dataset. | create_video_folders | python | open-mmlab/mmaction2 | tools/data/kinetics/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/kinetics/download.py | Apache-2.0 |
def construct_video_filename(row, label_to_dir, trim_format='%06d'):
"""Given a dataset row, this function constructs the output filename for a
given video."""
basename = '%s_%s_%s.mp4' % (row['video-id'],
trim_format % row['start-time'],
trim_format % row['end-time'])
if not isinstance(label_to_dir, dict):
dirname = label_to_dir
else:
dirname = label_to_dir[row['label-name']]
output_filename = os.path.join(dirname, basename)
return output_filename | Given a dataset row, this function constructs the output filename for a
given video. | construct_video_filename | python | open-mmlab/mmaction2 | tools/data/kinetics/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/kinetics/download.py | Apache-2.0 |
def download_clip(video_identifier,
output_filename,
start_time,
end_time,
tmp_dir='/tmp/kinetics/.tmp_dir',
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
start_time: float
Indicates the beginning time in seconds from where the video
will be trimmed.
end_time: float
Indicates the ending time in seconds of the trimmed video.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
# Construct command line for getting the direct video link.
tmp_filename = os.path.join(tmp_dir, '%s.%%(ext)s' % uuid.uuid4())
if not os.path.exists(output_filename):
if not os.path.exists(tmp_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings',
'--no-check-certificate', '-f', 'mp4', '-o',
'"%s"' % tmp_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
attempts += 1
if attempts == num_attempts:
return status, err.output
else:
break
tmp_filename = glob.glob('%s*' % tmp_filename.split('.')[0])[0]
# Construct command to trim the videos (ffmpeg required).
command = [
'ffmpeg', '-i',
'"%s"' % tmp_filename, '-ss',
str(start_time), '-t',
str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy',
'-threads', '1', '-loglevel', 'panic',
'"%s"' % output_filename
]
command = ' '.join(command)
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
return status, err.output
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
os.remove(tmp_filename)
return status, 'Downloaded' | Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
start_time: float
Indicates the beginning time in seconds from where the video
will be trimmed.
end_time: float
Indicates the ending time in seconds of the trimmed video. | download_clip | python | open-mmlab/mmaction2 | tools/data/kinetics/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/kinetics/download.py | Apache-2.0 |
def download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir):
"""Wrapper for parallel processing purposes."""
output_filename = construct_video_filename(row, label_to_dir, trim_format)
clip_id = os.path.basename(output_filename).split('.mp4')[0]
if os.path.exists(output_filename):
status = tuple([clip_id, True, 'Exists'])
return status
downloaded, log = download_clip(
row['video-id'],
output_filename,
row['start-time'],
row['end-time'],
tmp_dir=tmp_dir)
status = tuple([clip_id, downloaded, log])
return status | Wrapper for parallel processing purposes. | download_clip_wrapper | python | open-mmlab/mmaction2 | tools/data/kinetics/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/kinetics/download.py | Apache-2.0 |
def parse_kinetics_annotations(input_csv, ignore_is_cc=False):
"""Returns a parsed DataFrame.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'YouTube Identifier,Start time,End time,Class label'
returns:
-------
dataset: DataFrame
Pandas with the following columns:
'video-id', 'start-time', 'end-time', 'label-name'
"""
df = pd.read_csv(input_csv)
if 'youtube_id' in df.columns:
columns = OrderedDict([('youtube_id', 'video-id'),
('time_start', 'start-time'),
('time_end', 'end-time'),
('label', 'label-name')])
df.rename(columns=columns, inplace=True)
if ignore_is_cc:
df = df.loc[:, df.columns.tolist()[:-1]]
return df | Returns a parsed DataFrame.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'YouTube Identifier,Start time,End time,Class label'
returns:
-------
dataset: DataFrame
Pandas with the following columns:
'video-id', 'start-time', 'end-time', 'label-name' | parse_kinetics_annotations | python | open-mmlab/mmaction2 | tools/data/kinetics/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/kinetics/download.py | Apache-2.0 |
def construct_video_filename(item, trim_format, output_dir):
"""Given a dataset row, this function constructs the output filename for a
given video."""
youtube_id, start_time, end_time = item
start_time, end_time = int(start_time * 10), int(end_time * 10)
basename = '%s_%s_%s.mp4' % (youtube_id, trim_format % start_time,
trim_format % end_time)
output_filename = os.path.join(output_dir, basename)
return output_filename | Given a dataset row, this function constructs the output filename for a
given video. | construct_video_filename | python | open-mmlab/mmaction2 | tools/data/hvu/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/hvu/download.py | Apache-2.0 |
def download_clip(video_identifier,
output_filename,
start_time,
end_time,
tmp_dir='/tmp/hvu/.tmp_dir',
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
start_time: float
Indicates the beginning time in seconds from where the video
will be trimmed.
end_time: float
Indicates the ending time in seconds of the trimmed video.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
tmp_filename = os.path.join(tmp_dir, '%s.%%(ext)s' % uuid.uuid4())
if not os.path.exists(output_filename):
if not os.path.exists(tmp_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings',
'--no-check-certificate', '-f', 'mp4', '-o',
'"%s"' % tmp_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
attempts += 1
if attempts == num_attempts:
return status, 'Downloading Failed'
else:
break
tmp_filename = glob.glob('%s*' % tmp_filename.split('.')[0])[0]
# Construct command to trim the videos (ffmpeg required).
command = [
'ffmpeg', '-i',
'"%s"' % tmp_filename, '-ss',
str(start_time), '-t',
str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy',
'-threads', '1', '-loglevel', 'panic',
'"%s"' % output_filename
]
command = ' '.join(command)
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return status, 'Trimming Failed'
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
os.remove(tmp_filename)
return status, 'Downloaded' | Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
start_time: float
Indicates the beginning time in seconds from where the video
will be trimmed.
end_time: float
Indicates the ending time in seconds of the trimmed video. | download_clip | python | open-mmlab/mmaction2 | tools/data/hvu/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/hvu/download.py | Apache-2.0 |
def download_clip_wrapper(item, trim_format, tmp_dir, output_dir):
"""Wrapper for parallel processing purposes."""
output_filename = construct_video_filename(item, trim_format, output_dir)
clip_id = os.path.basename(output_filename).split('.mp4')[0]
if os.path.exists(output_filename):
status = tuple([clip_id, True, 'Exists'])
return status
youtube_id, start_time, end_time = item
downloaded, log = download_clip(
youtube_id, output_filename, start_time, end_time, tmp_dir=tmp_dir)
status = tuple([clip_id, downloaded, log])
return status | Wrapper for parallel processing purposes. | download_clip_wrapper | python | open-mmlab/mmaction2 | tools/data/hvu/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/hvu/download.py | Apache-2.0 |
def parse_hvu_annotations(input_csv):
"""Returns a parsed DataFrame.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'Tags, youtube_id, time_start, time_end'
returns:
-------
dataset: List of tuples. Each tuple consists of
(youtube_id, time_start, time_end). The type of time is float.
"""
lines = open(input_csv).readlines()
lines = [x.strip().split(',')[1:] for x in lines[1:]]
lines = [(x[0], float(x[1]), float(x[2])) for x in lines]
return lines | Returns a parsed DataFrame.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'Tags, youtube_id, time_start, time_end'
returns:
-------
dataset: List of tuples. Each tuple consists of
(youtube_id, time_start, time_end). The type of time is float. | parse_hvu_annotations | python | open-mmlab/mmaction2 | tools/data/hvu/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/hvu/download.py | Apache-2.0 |
def count_files(directory, prefix_list):
"""Count file number with a given directory and prefix.
Args:
directory (str): Data directory to be search.
prefix_list (list): List or prefix.
Returns:
list (int): Number list of the file with the prefix.
"""
lst = os.listdir(directory)
cnt_list = [len(fnmatch.filter(lst, x + '*')) for x in prefix_list]
return cnt_list | Count file number with a given directory and prefix.
Args:
directory (str): Data directory to be search.
prefix_list (list): List or prefix.
Returns:
list (int): Number list of the file with the prefix. | parse_directory.count_files | python | open-mmlab/mmaction2 | tools/data/hvu/generate_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/hvu/generate_file_list.py | Apache-2.0 |
def parse_directory(path,
rgb_prefix='img_',
flow_x_prefix='flow_x_',
flow_y_prefix='flow_y_',
level=1):
"""Parse directories holding extracted frames from standard benchmarks.
Args:
path (str): Directory path to parse frames.
rgb_prefix (str): Prefix of generated rgb frames name.
default: 'img_'.
flow_x_prefix (str): Prefix of generated flow x name.
default: `flow_x_`.
flow_y_prefix (str): Prefix of generated flow y name.
default: `flow_y_`.
level (int): Directory level for glob searching. Options are 1 and 2.
default: 1.
Returns:
dict: frame info dict with video id as key and tuple(path(str),
rgb_num(int), flow_x_num(int)) as value.
"""
print(f'parse frames under directory {path}')
if level == 1:
# Only search for one-level directory
def locate_directory(x):
return osp.basename(x)
frame_dirs = glob.glob(osp.join(path, '*'))
elif level == 2:
# search for two-level directory
def locate_directory(x):
return osp.join(osp.basename(osp.dirname(x)), osp.basename(x))
frame_dirs = glob.glob(osp.join(path, '*', '*'))
else:
raise ValueError('level can be only 1 or 2')
def count_files(directory, prefix_list):
"""Count file number with a given directory and prefix.
Args:
directory (str): Data directory to be search.
prefix_list (list): List or prefix.
Returns:
list (int): Number list of the file with the prefix.
"""
lst = os.listdir(directory)
cnt_list = [len(fnmatch.filter(lst, x + '*')) for x in prefix_list]
return cnt_list
# check RGB
frame_dict = {}
for i, frame_dir in enumerate(frame_dirs):
total_num = count_files(frame_dir,
(rgb_prefix, flow_x_prefix, flow_y_prefix))
dir_name = locate_directory(frame_dir)
num_x = total_num[1]
num_y = total_num[2]
if num_x != num_y:
raise ValueError(f'x and y direction have different number '
f'of flow images in video directory: {frame_dir}')
if i % 200 == 0:
print(f'{i} videos parsed')
frame_dict[dir_name] = (frame_dir, total_num[0], num_x)
print('frame directory analysis done')
return frame_dict | Parse directories holding extracted frames from standard benchmarks.
Args:
path (str): Directory path to parse frames.
rgb_prefix (str): Prefix of generated rgb frames name.
default: 'img_'.
flow_x_prefix (str): Prefix of generated flow x name.
default: `flow_x_`.
flow_y_prefix (str): Prefix of generated flow y name.
default: `flow_y_`.
level (int): Directory level for glob searching. Options are 1 and 2.
default: 1.
Returns:
dict: frame info dict with video id as key and tuple(path(str),
rgb_num(int), flow_x_num(int)) as value. | parse_directory | python | open-mmlab/mmaction2 | tools/data/hvu/generate_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/hvu/generate_file_list.py | Apache-2.0 |
def get_kinetics_frames(kinetics_anotation_file: str) -> dict:
"""Given the AVA-kinetics anotation file, return a lookup to map the video
id and the the set of timestamps involved of this video id.
Args:
kinetics_anotation_file (str): Path to the AVA-like anotation file for
the kinetics subset.
Returns:
dict: the dict keys are the kinetics videos' video id. The values are
the set of timestamps involved.
"""
with open(kinetics_anotation_file) as f:
anotated_frames = [i.split(',') for i in f.readlines()]
anotated_frames = [i for i in anotated_frames if len(i) == 7]
anotated_frames = [(i[0], int(float(i[1]))) for i in anotated_frames]
frame_lookup = defaultdict(set)
for video_id, timestamp in anotated_frames:
frame_lookup[video_id].add(timestamp)
return frame_lookup | Given the AVA-kinetics anotation file, return a lookup to map the video
id and the the set of timestamps involved of this video id.
Args:
kinetics_anotation_file (str): Path to the AVA-like anotation file for
the kinetics subset.
Returns:
dict: the dict keys are the kinetics videos' video id. The values are
the set of timestamps involved. | get_kinetics_frames | python | open-mmlab/mmaction2 | tools/data/ava_kinetics/cut_kinetics.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/ava_kinetics/cut_kinetics.py | Apache-2.0 |
def filter_missing_videos(kinetics_list: str, frame_lookup: dict) -> dict:
"""Given the kinetics700 dataset list, remove the video ids from the lookup
that are missing videos or frames.
Args:
kinetics_list (str): Path to the kinetics700 dataset list.
The content of the list should be:
```
Path_to_video1 label_1\n
Path_to_video2 label_2\n
...
Path_to_videon label_n\n
```
The start and end of the video must be contained in the filename.
For example:
```
class602/o3lCwWyyc_s_000012_000022.mp4\n
```
frame_lookup (dict): the dict from `get_kinetics_frames`.
Returns:
dict: the dict keys are the kinetics videos' video id. The values are
the a list of tuples:
(start_of_the_video, end_of_the_video, video_path)
"""
video_lookup = defaultdict(set)
with open(kinetics_list) as f:
for line in f.readlines():
video_path = line.split(' ')[0] # remove label information
video_name = video_path.split('/')[-1] # get the file name
video_name = video_name.split('.')[0] # remove file extensions
video_name = video_name.split('_')
video_id = '_'.join(video_name[:-2])
if video_id not in frame_lookup:
continue
start, end = int(video_name[-2]), int(video_name[-1])
frames = frame_lookup[video_id]
frames = [frame for frame in frames if start < frame < end]
if len(frames) == 0:
continue
start, end = max(start, min(frames) - 2), min(end, max(frames) + 2)
video_lookup[video_id].add((start, end, video_path))
# Some frame ids exist in multiple videos in the Kinetics dataset.
# The reason is the part of one video may fall into different categories.
# Remove the duplicated records
for video in video_lookup:
if len(video_lookup[video]) == 1:
continue
info_list = list(video_lookup[video])
removed_list = []
for i, info_i in enumerate(info_list):
start_i, end_i, _ = info_i
for j in range(i + 1, len(info_list)):
start_j, end_j, _ = info_list[j]
if start_i <= start_j and end_j <= end_i:
removed_list.append(j)
elif start_j <= start_i and end_i <= end_j:
removed_list.append(i)
new_list = []
for i, info in enumerate(info_list):
if i not in removed_list:
new_list.append(info)
video_lookup[video] = set(new_list)
return video_lookup | Given the kinetics700 dataset list, remove the video ids from the lookup
that are missing videos or frames.
Args:
kinetics_list (str): Path to the kinetics700 dataset list.
The content of the list should be:
```
Path_to_video1 label_1\n
Path_to_video2 label_2\n
...
Path_to_videon label_n\n
```
The start and end of the video must be contained in the filename.
For example:
```
class602/o3lCwWyyc_s_000012_000022.mp4\n
```
frame_lookup (dict): the dict from `get_kinetics_frames`.
Returns:
dict: the dict keys are the kinetics videos' video id. The values are
the a list of tuples:
(start_of_the_video, end_of_the_video, video_path) | filter_missing_videos | python | open-mmlab/mmaction2 | tools/data/ava_kinetics/cut_kinetics.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/ava_kinetics/cut_kinetics.py | Apache-2.0 |
def remove_failed_video(video_path: str) -> None:
"""Given the path to the video, delete the video if it cannot be read or if
the actual length of the video is 0.75 seconds shorter than expected."""
try:
v = decord.VideoReader(video_path)
fps = v.get_avg_fps()
num_frames = len(v)
x = video_path.split('.')[0].split('_')
time = int(x[-1]) - int(x[-2])
if num_frames < (time - 3 / 4) * fps:
os.remove(video_path)
except: # noqa: E722
os.remove(video_path)
return | Given the path to the video, delete the video if it cannot be read or if
the actual length of the video is 0.75 seconds shorter than expected. | remove_failed_video | python | open-mmlab/mmaction2 | tools/data/ava_kinetics/cut_kinetics.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/ava_kinetics/cut_kinetics.py | Apache-2.0 |
def format_det_result():
"""convert test results to specified format in MultiSports competition."""
test_results = load(args.test_result)
annos = load(args.anno_path)
test_videos = annos['test_videos'][0]
resolutions = annos['resolution']
frm_dets = []
for pred in track(test_results, description='formating...'):
video_key = pred['video_id'].split('.mp4')[0]
frm_num = pred['timestamp']
bboxes = pred['pred_instances']['bboxes']
cls_scores = pred['pred_instances']['scores']
for bbox, cls_score in zip(bboxes, cls_scores):
video_idx = test_videos.index(video_key)
pred_label = np.argmax(cls_score)
score = cls_score[pred_label]
h, w = resolutions[video_key]
bbox *= np.array([w, h, w, h])
instance_result = np.array(
[video_idx, frm_num, pred_label, score, *bbox])
frm_dets.append(instance_result)
frm_dets = np.array(frm_dets)
video_tubes = link_tubes(annos, frm_dets, K=1)
dump(frm_dets, args.frm_out_path)
dump(video_tubes, args.tube_out_path) | convert test results to specified format in MultiSports competition. | format_det_result | python | open-mmlab/mmaction2 | tools/data/multisports/format_det_result.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/multisports/format_det_result.py | Apache-2.0 |
def download(video_identifier,
output_filename,
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
if not os.path.exists(output_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',
'-f', 'mp4', '-o',
'"%s"' % output_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
attempts += 1
if attempts == num_attempts:
return status, 'Fail'
else:
break
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
return status, 'Downloaded' | Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored. | download | python | open-mmlab/mmaction2 | tools/data/gym/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/gym/download.py | Apache-2.0 |
def download_wrapper(youtube_id, output_dir):
"""Wrapper for parallel processing purposes."""
# we do this to align with names in annotations
output_filename = os.path.join(output_dir, youtube_id + '.mp4')
if os.path.exists(output_filename):
status = tuple([youtube_id, True, 'Exists'])
return status
downloaded, log = download(youtube_id, output_filename)
status = tuple([youtube_id, downloaded, log])
return status | Wrapper for parallel processing purposes. | download_wrapper | python | open-mmlab/mmaction2 | tools/data/gym/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/gym/download.py | Apache-2.0 |
def _compress_images(input_output_pair, size=224):
"""Scale and downsample an input image to a given fps and size (shorter
side size).
This also removes the audio from the image.
"""
input_image_path, output_image_path = input_output_pair
try:
resize_image(input_image_path, output_image_path, size)
except Exception as e:
print(f'Caught Exception {e}') | Scale and downsample an input image to a given fps and size (shorter
side size).
This also removes the audio from the image. | _compress_images | python | open-mmlab/mmaction2 | tools/data/msrvtt/compress.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/msrvtt/compress.py | Apache-2.0 |
def _compress_videos(input_output_pair, size=224, fps=3):
"""Scale and downsample an input video to a given fps and size (shorter
side size).
This also removes the audio from the video.
"""
input_file_path, output_file_path = input_output_pair
try:
command = [
'ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-i',
input_file_path,
'-filter:v', # no audio
f"scale='if(gt(a,1),trunc(oh*a/2)*2,{size})':'if(gt(a,1),{size},trunc(ow*a/2)*2)'", # noqa: E501
'-map',
'0:v', # no audio
'-r',
str(fps), # frames per second
# '-g', str(16),
output_file_path,
]
subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except Exception as e:
raise e | Scale and downsample an input video to a given fps and size (shorter
side size).
This also removes the audio from the video. | _compress_videos | python | open-mmlab/mmaction2 | tools/data/msrvtt/compress.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/msrvtt/compress.py | Apache-2.0 |
def merge_args(cfg, args):
"""Merge CLI arguments to config."""
test_pipeline = cfg.test_dataloader.dataset.pipeline
# -------------------- Feature Head --------------------
if not args.dump_score:
backbone_type2name = dict(
ResNet3dSlowFast='slowfast',
MobileNetV2TSM='tsm',
ResNetTSM='tsm',
)
if cfg.model.type == 'RecognizerGCN':
backbone_name = 'gcn'
else:
backbone_name = backbone_type2name.get(cfg.model.backbone.type)
num_segments = None
if backbone_name == 'tsm':
for idx, transform in enumerate(test_pipeline):
if transform.type == 'UntrimmedSampleFrames':
clip_len = transform['clip_len']
continue
elif transform.type == 'SampleFrames':
clip_len = transform['num_clips']
num_segments = cfg.model.backbone.get('num_segments', 8)
assert num_segments == clip_len, \
f'num_segments and clip length must same for TSM, but got ' \
f'num_segments {num_segments} clip_len {clip_len}'
if cfg.model.test_cfg is not None:
max_testing_views = cfg.model.test_cfg.get(
'max_testing_views', num_segments)
assert max_testing_views % num_segments == 0, \
'tsm needs to infer with batchsize of multiple ' \
'of num_segments.'
spatial_type = None if args.spatial_type == 'keep' else \
args.spatial_type
temporal_type = None if args.temporal_type == 'keep' else \
args.temporal_type
feature_head = dict(
type='FeatureHead',
spatial_type=spatial_type,
temporal_type=temporal_type,
backbone_name=backbone_name,
num_segments=num_segments)
cfg.model.cls_head = feature_head
# ---------------------- multiple view ----------------------
if not args.multi_view:
# average features among multiple views
cfg.model.cls_head['average_clips'] = 'score'
if cfg.model.type == 'Recognizer3D':
for idx, transform in enumerate(test_pipeline):
if transform.type == 'SampleFrames':
test_pipeline[idx]['num_clips'] = 1
for idx, transform in enumerate(test_pipeline):
if transform.type == 'SampleFrames':
test_pipeline[idx]['twice_sample'] = False
# if transform.type in ['ThreeCrop', 'TenCrop']:
if transform.type == 'TenCrop':
test_pipeline[idx].type = 'CenterCrop'
# -------------------- pipeline settings --------------------
# assign video list and video root
if args.video_list is not None:
cfg.test_dataloader.dataset.ann_file = args.video_list
if args.video_root is not None:
if cfg.test_dataloader.dataset.type == 'VideoDataset':
cfg.test_dataloader.dataset.data_prefix = dict(
video=args.video_root)
elif cfg.test_dataloader.dataset.type == 'RawframeDataset':
cfg.test_dataloader.dataset.data_prefix = dict(img=args.video_root)
args.video_list = cfg.test_dataloader.dataset.ann_file
args.video_root = cfg.test_dataloader.dataset.data_prefix
# use UntrimmedSampleFrames for long video inference
if args.long_video_mode:
# preserve features of multiple clips
cfg.model.cls_head['average_clips'] = None
cfg.test_dataloader.batch_size = 1
is_recognizer2d = (cfg.model.type == 'Recognizer2D')
frame_interval = args.frame_interval
for idx, transform in enumerate(test_pipeline):
if transform.type == 'UntrimmedSampleFrames':
clip_len = transform['clip_len']
continue
# replace SampleFrame by UntrimmedSampleFrames
elif transform.type in ['SampleFrames', 'UniformSample']:
assert args.clip_interval is not None, \
'please specify clip interval for long video inference'
if is_recognizer2d:
# clip_len of UntrimmedSampleFrames is same as
# num_clips for 2D Recognizer.
clip_len = transform['num_clips']
else:
clip_len = transform['clip_len']
if frame_interval is None:
# take frame_interval of SampleFrames as default
frame_interval = transform.get('frame_interval')
assert frame_interval is not None, \
'please specify frame interval for long video ' \
'inference when use UniformSample or 2D Recognizer'
sample_cfgs = dict(
type='UntrimmedSampleFrames',
clip_len=clip_len,
clip_interval=args.clip_interval,
frame_interval=frame_interval)
test_pipeline[idx] = sample_cfgs
continue
# flow input will stack all frames
if cfg.test_dataloader.dataset.get('modality') == 'Flow':
clip_len = 1
if is_recognizer2d:
from mmaction.models import ActionDataPreprocessor
from mmaction.registry import MODELS
@MODELS.register_module()
class LongVideoDataPreprocessor(ActionDataPreprocessor):
"""DataPreprocessor for 2D recognizer to infer on long video.
Which would stack the num_clips to batch dimension, to preserve
feature of each clip (no average among clips)
"""
def __init__(self, num_frames=8, **kwargs) -> None:
super().__init__(**kwargs)
self.num_frames = num_frames
def preprocess(self, inputs, data_samples, training=False):
batch_inputs, data_samples = super().preprocess(
inputs, data_samples, training)
# [N*M, T, C, H, W]
nclip_batch_inputs = batch_inputs.view(
(-1, self.num_frames) + batch_inputs.shape[2:])
# data_samples = data_samples * \
# nclip_batch_inputs.shape[0]
return nclip_batch_inputs, data_samples
preprocessor_cfg = cfg.model.data_preprocessor
preprocessor_cfg.type = 'LongVideoDataPreprocessor'
preprocessor_cfg['num_frames'] = clip_len
# -------------------- Dump predictions --------------------
args.dump = osp.join(args.output_prefix, 'total_feats.pkl')
dump_metric = dict(type='DumpResults', out_file_path=args.dump)
cfg.test_evaluator = [dump_metric]
cfg.work_dir = osp.join(args.output_prefix, 'work_dir')
return cfg | Merge CLI arguments to config. | merge_args | python | open-mmlab/mmaction2 | tools/misc/clip_feature_extraction.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/misc/clip_feature_extraction.py | Apache-2.0 |
def flow_to_img(raw_flow, bound=20.):
"""Convert flow to gray image.
Args:
raw_flow (np.ndarray[float]): Estimated flow with the shape (w, h).
bound (float): Bound for the flow-to-image normalization. Default: 20.
Returns:
np.ndarray[uint8]: The result list of np.ndarray[uint8], with shape
(w, h).
"""
flow = np.clip(raw_flow, -bound, bound)
flow += bound
flow *= (255 / float(2 * bound))
flow = flow.astype(np.uint8)
return flow | Convert flow to gray image.
Args:
raw_flow (np.ndarray[float]): Estimated flow with the shape (w, h).
bound (float): Bound for the flow-to-image normalization. Default: 20.
Returns:
np.ndarray[uint8]: The result list of np.ndarray[uint8], with shape
(w, h). | flow_to_img | python | open-mmlab/mmaction2 | tools/misc/flow_extraction.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/misc/flow_extraction.py | Apache-2.0 |
def generate_flow(frames, method='tvl1'):
"""Estimate flow with given frames.
Args:
frames (list[np.ndarray[uint8]]): List of rgb frames, with shape
(w, h, 3).
method (str): Use which method to generate flow. Options are 'tvl1'
and 'farneback'. Default: 'tvl1'.
Returns:
list[np.ndarray[float]]: The result list of np.ndarray[float], with
shape (w, h, 2).
"""
assert method in ['tvl1', 'farneback']
gray_frames = [cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) for frame in frames]
if method == 'tvl1':
tvl1 = cv2.optflow.DualTVL1OpticalFlow_create()
def op(x, y):
return tvl1.calc(x, y, None)
elif method == 'farneback':
def op(x, y):
return cv2.calcOpticalFlowFarneback(x, y, None, 0.5, 3, 15, 3, 5,
1.2, 0)
gray_st = gray_frames[:-1]
gray_ed = gray_frames[1:]
flow = [op(x, y) for x, y in zip(gray_st, gray_ed)]
return flow | Estimate flow with given frames.
Args:
frames (list[np.ndarray[uint8]]): List of rgb frames, with shape
(w, h, 3).
method (str): Use which method to generate flow. Options are 'tvl1'
and 'farneback'. Default: 'tvl1'.
Returns:
list[np.ndarray[float]]: The result list of np.ndarray[float], with
shape (w, h, 2). | generate_flow | python | open-mmlab/mmaction2 | tools/misc/flow_extraction.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/misc/flow_extraction.py | Apache-2.0 |
def extract_dense_flow(path,
dest,
bound=20.,
save_rgb=False,
start_idx=0,
rgb_tmpl='img_{:05d}.jpg',
flow_tmpl='{}_{:05d}.jpg',
method='tvl1'):
"""Extract dense flow given video or frames, save them as gray-scale
images.
Args:
path (str): Location of the input video.
dest (str): The directory to store the extracted flow images.
bound (float): Bound for the flow-to-image normalization. Default: 20.
save_rgb (bool): Save extracted RGB frames. Default: False.
start_idx (int): The starting frame index if use frames as input, the
first image is path.format(start_idx). Default: 0.
rgb_tmpl (str): The template of RGB frame names, Default:
'img_{:05d}.jpg'.
flow_tmpl (str): The template of Flow frame names, Default:
'{}_{:05d}.jpg'.
method (str): Use which method to generate flow. Options are 'tvl1'
and 'farneback'. Default: 'tvl1'.
"""
frames = []
assert osp.exists(path)
video = cv2.VideoCapture(path)
flag, f = video.read()
while flag:
frames.append(f)
flag, f = video.read()
flow = generate_flow(frames, method=method)
flow_x = [flow_to_img(x[:, :, 0], bound) for x in flow]
flow_y = [flow_to_img(x[:, :, 1], bound) for x in flow]
if not osp.exists(dest):
os.system('mkdir -p ' + dest)
flow_x_names = [
osp.join(dest, flow_tmpl.format('x', ind + start_idx))
for ind in range(len(flow_x))
]
flow_y_names = [
osp.join(dest, flow_tmpl.format('y', ind + start_idx))
for ind in range(len(flow_y))
]
num_frames = len(flow)
for i in range(num_frames):
cv2.imwrite(flow_x_names[i], flow_x[i])
cv2.imwrite(flow_y_names[i], flow_y[i])
if save_rgb:
img_names = [
osp.join(dest, rgb_tmpl.format(ind + start_idx))
for ind in range(len(frames))
]
for frame, name in zip(frames, img_names):
cv2.imwrite(name, frame) | Extract dense flow given video or frames, save them as gray-scale
images.
Args:
path (str): Location of the input video.
dest (str): The directory to store the extracted flow images.
bound (float): Bound for the flow-to-image normalization. Default: 20.
save_rgb (bool): Save extracted RGB frames. Default: False.
start_idx (int): The starting frame index if use frames as input, the
first image is path.format(start_idx). Default: 0.
rgb_tmpl (str): The template of RGB frame names, Default:
'img_{:05d}.jpg'.
flow_tmpl (str): The template of Flow frame names, Default:
'{}_{:05d}.jpg'.
method (str): Use which method to generate flow. Options are 'tvl1'
and 'farneback'. Default: 'tvl1'. | extract_dense_flow | python | open-mmlab/mmaction2 | tools/misc/flow_extraction.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/misc/flow_extraction.py | Apache-2.0 |
def load_video_infos(ann_file):
"""Load the video annotations.
Args:
ann_file (str): A json file path of the annotation file.
Returns:
list[dict]: A list containing annotations for videos.
"""
video_infos = []
anno_database = mmengine.load(ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos | Load the video annotations.
Args:
ann_file (str): A json file path of the annotation file.
Returns:
list[dict]: A list containing annotations for videos. | load_video_infos | python | open-mmlab/mmaction2 | tools/misc/bsn_proposal_generation.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/misc/bsn_proposal_generation.py | Apache-2.0 |
def generate_proposals(ann_file, tem_results_dir, pgm_proposals_dir,
pgm_proposals_thread, **kwargs):
"""Generate proposals using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results
pgm_proposals_dir (str): Directory to save generated proposals.
pgm_proposals_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_candidate_proposals".
"""
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = num_videos // pgm_proposals_thread
processes = []
manager = mp.Manager()
result_dict = manager.dict()
kwargs['result_dict'] = result_dict
for tid in range(pgm_proposals_thread - 1):
tmp_video_list = range(tid * num_videos_per_thread,
(tid + 1) * num_videos_per_thread)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range((pgm_proposals_thread - 1) * num_videos_per_thread,
num_videos)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
# save results
os.makedirs(pgm_proposals_dir, exist_ok=True)
prog_bar = mmengine.ProgressBar(num_videos)
header = 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'
for video_name in result_dict:
proposals = result_dict[video_name]
proposal_path = osp.join(pgm_proposals_dir, video_name + '.csv')
np.savetxt(
proposal_path,
proposals,
header=header,
delimiter=',',
comments='')
prog_bar.update() | Generate proposals using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results
pgm_proposals_dir (str): Directory to save generated proposals.
pgm_proposals_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_candidate_proposals". | generate_proposals | python | open-mmlab/mmaction2 | tools/misc/bsn_proposal_generation.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/misc/bsn_proposal_generation.py | Apache-2.0 |
def generate_features(ann_file, tem_results_dir, pgm_proposals_dir,
pgm_features_dir, pgm_features_thread, **kwargs):
"""Generate proposals features using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results.
pgm_proposals_dir (str): Directory to read generated proposals.
pgm_features_dir (str): Directory to save generated features.
pgm_features_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_bsp_feature".
"""
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = num_videos // pgm_features_thread
processes = []
manager = mp.Manager()
feature_return_dict = manager.dict()
kwargs['result_dict'] = feature_return_dict
for tid in range(pgm_features_thread - 1):
tmp_video_list = range(tid * num_videos_per_thread,
(tid + 1) * num_videos_per_thread)
p = mp.Process(
target=generate_bsp_feature,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range((pgm_features_thread - 1) * num_videos_per_thread,
num_videos)
p = mp.Process(
target=generate_bsp_feature,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
# save results
os.makedirs(pgm_features_dir, exist_ok=True)
prog_bar = mmengine.ProgressBar(num_videos)
for video_name in feature_return_dict.keys():
bsp_feature = feature_return_dict[video_name]
feature_path = osp.join(pgm_features_dir, video_name + '.npy')
np.save(feature_path, bsp_feature)
prog_bar.update() | Generate proposals features using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results.
pgm_proposals_dir (str): Directory to read generated proposals.
pgm_features_dir (str): Directory to save generated features.
pgm_features_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_bsp_feature". | generate_features | python | open-mmlab/mmaction2 | tools/misc/bsn_proposal_generation.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/misc/bsn_proposal_generation.py | Apache-2.0 |
def mmaction2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
label_file: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMAction2 model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file (str): In MMAction2 config format.
checkpoint_file (str): In MMAction2 checkpoint format.
output_folder (str): Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
label_file (str): A txt file which contains the action category names.
model_name (str | None): If not None, used for naming the
`{model_name}.mar` file that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version (str): Model's version.
force (bool): If True, if there is an existing `{model_name}.mar` file
under `output_folder` it will be overwritten.
"""
mkdir_or_exist(output_folder)
config = Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
shutil.copy(label_file, f'{tmpdir}/label_map.txt')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmaction_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': f'{tmpdir}/label_map.txt',
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest) | Converts MMAction2 model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file (str): In MMAction2 config format.
checkpoint_file (str): In MMAction2 checkpoint format.
output_folder (str): Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
label_file (str): A txt file which contains the action category names.
model_name (str | None): If not None, used for naming the
`{model_name}.mar` file that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version (str): Model's version.
force (bool): If True, if there is an existing `{model_name}.mar` file
under `output_folder` it will be overwritten. | mmaction2torchserve | python | open-mmlab/mmaction2 | tools/deployment/mmaction2torchserve.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/deployment/mmaction2torchserve.py | Apache-2.0 |
def add_frames(self, idx, frames, processed_frames):
"""Add the clip and corresponding id.
Args:
idx (int): the current index of the clip.
frames (list[ndarray]): list of images in "BGR" format.
processed_frames (list[ndarray]): list of resize and normed images
in "BGR" format.
"""
self.frames = frames
self.processed_frames = processed_frames
self.id = idx
self.img_shape = processed_frames[0].shape[:2] | Add the clip and corresponding id.
Args:
idx (int): the current index of the clip.
frames (list[ndarray]): list of images in "BGR" format.
processed_frames (list[ndarray]): list of resize and normed images
in "BGR" format. | add_frames | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def add_bboxes(self, display_bboxes):
"""Add correspondding bounding boxes."""
self.display_bboxes = display_bboxes
self.stdet_bboxes = display_bboxes.clone()
self.stdet_bboxes[:, ::2] = self.stdet_bboxes[:, ::2] * self.ratio[0]
self.stdet_bboxes[:, 1::2] = self.stdet_bboxes[:, 1::2] * self.ratio[1] | Add correspondding bounding boxes. | add_bboxes | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def add_action_preds(self, preds):
"""Add the corresponding action predictions."""
self.action_preds = preds | Add the corresponding action predictions. | add_action_preds | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def get_model_inputs(self, device):
"""Convert preprocessed images to MMAction2 STDet model inputs."""
cur_frames = [self.processed_frames[idx] for idx in self.frames_inds]
input_array = np.stack(cur_frames).transpose((3, 0, 1, 2))[np.newaxis]
input_tensor = torch.from_numpy(input_array).to(device)
datasample = ActionDataSample()
datasample.proposals = InstanceData(bboxes=self.stdet_bboxes)
datasample.set_metainfo(dict(img_shape=self.img_shape))
return dict(
inputs=input_tensor, data_samples=[datasample], mode='predict') | Convert preprocessed images to MMAction2 STDet model inputs. | get_model_inputs | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def _do_detect(self, image):
"""Get human bboxes with shape [n, 4].
The format of bboxes is (xmin, ymin, xmax, ymax) in pixels.
""" | Get human bboxes with shape [n, 4].
The format of bboxes is (xmin, ymin, xmax, ymax) in pixels. | _do_detect | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def predict(self, task):
"""Add keyframe bboxes to task."""
# keyframe idx == (clip_len * frame_interval) // 2
keyframe = task.frames[len(task.frames) // 2]
# call detector
bboxes = self._do_detect(keyframe)
# convert bboxes to torch.Tensor and move to target device
if isinstance(bboxes, np.ndarray):
bboxes = torch.from_numpy(bboxes).to(self.device)
elif isinstance(bboxes, torch.Tensor) and bboxes.device != self.device:
bboxes = bboxes.to(self.device)
# update task
task.add_bboxes(bboxes)
return task | Add keyframe bboxes to task. | predict | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def _do_detect(self, image):
"""Get bboxes in shape [n, 4] and values in pixels."""
det_data_sample = inference_detector(self.model, image)
pred_instance = det_data_sample.pred_instances.cpu().numpy()
# We only keep human detection bboxs with score larger
# than `det_score_thr` and category id equal to `det_cat_id`.
valid_idx = np.logical_and(pred_instance.labels == self.person_classid,
pred_instance.scores > self.score_thr)
bboxes = pred_instance.bboxes[valid_idx]
# result = result[result[:, 4] >= self.score_thr][:, :4]
return bboxes | Get bboxes in shape [n, 4] and values in pixels. | _do_detect | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def predict(self, task):
"""Spatio-temporval Action Detection model inference."""
# No need to do inference if no one in keyframe
if len(task.stdet_bboxes) == 0:
return task
with torch.no_grad():
result = self.model(**task.get_model_inputs(self.device))
scores = result[0].pred_instances.scores
# pack results of human detector and stdet
preds = []
for _ in range(task.stdet_bboxes.shape[0]):
preds.append([])
for class_id in range(scores.shape[1]):
if class_id not in self.label_map:
continue
for bbox_id in range(task.stdet_bboxes.shape[0]):
if scores[bbox_id][class_id] > self.score_thr:
preds[bbox_id].append((self.label_map[class_id],
scores[bbox_id][class_id].item()))
# update task
# `preds` is `list[list[tuple]]`. The outer brackets indicate
# different bboxes and the intter brackets indicate different action
# results for the same bbox. tuple contains `class_name` and `score`.
task.add_action_preds(preds)
return task | Spatio-temporval Action Detection model inference. | predict | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def read_fn(self):
"""Main function for read thread.
Contains three steps:
1) Read and preprocess (resize + norm) frames from source.
2) Create task by frames from previous step and buffer.
3) Put task into read queue.
"""
was_read = True
start_time = time.time()
while was_read and not self.stopped:
# init task
task = TaskInfo()
task.clip_vis_length = self.clip_vis_length
task.frames_inds = self.frames_inds
task.ratio = self.ratio
# read buffer
frames = []
processed_frames = []
if len(self.buffer) != 0:
frames = self.buffer
if len(self.processed_buffer) != 0:
processed_frames = self.processed_buffer
# read and preprocess frames from source and update task
with self.read_lock:
before_read = time.time()
read_frame_cnt = self.window_size - len(frames)
while was_read and len(frames) < self.window_size:
was_read, frame = self.cap.read()
if not self.webcam:
# Reading frames too fast may lead to unexpected
# performance degradation. If you have enough
# resource, this line could be commented.
time.sleep(1 / self.output_fps)
if was_read:
frames.append(mmcv.imresize(frame, self.display_size))
processed_frame = mmcv.imresize(
frame, self.stdet_input_size).astype(np.float32)
_ = mmcv.imnormalize_(processed_frame,
**self.img_norm_cfg)
processed_frames.append(processed_frame)
task.add_frames(self.read_id + 1, frames, processed_frames)
# update buffer
if was_read:
self.buffer = frames[-self.buffer_size:]
self.processed_buffer = processed_frames[-self.buffer_size:]
# update read state
with self.read_id_lock:
self.read_id += 1
self.not_end = was_read
self.read_queue.put((was_read, copy.deepcopy(task)))
cur_time = time.time()
logger.debug(
f'Read thread: {1000*(cur_time - start_time):.0f} ms, '
f'{read_frame_cnt / (cur_time - before_read):.0f} fps')
start_time = cur_time | Main function for read thread.
Contains three steps:
1) Read and preprocess (resize + norm) frames from source.
2) Create task by frames from previous step and buffer.
3) Put task into read queue. | read_fn | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def display_fn(self):
"""Main function for display thread.
Read data from display queue and display predictions.
"""
start_time = time.time()
while not self.stopped:
# get the state of the read thread
with self.read_id_lock:
read_id = self.read_id
not_end = self.not_end
with self.display_lock:
# If video ended and we have display all frames.
if not not_end and self.display_id == read_id:
break
# If the next task are not available, wait.
if (len(self.display_queue) == 0 or
self.display_queue.get(self.display_id + 1) is None):
time.sleep(0.02)
continue
# get display data and update state
self.display_id += 1
was_read, task = self.display_queue[self.display_id]
del self.display_queue[self.display_id]
display_id = self.display_id
# do display predictions
with self.output_lock:
if was_read and task.id == 0:
# the first task
cur_display_inds = range(self.display_inds[-1] + 1)
elif not was_read:
# the last task
cur_display_inds = range(self.display_inds[0],
len(task.frames))
else:
cur_display_inds = self.display_inds
for frame_id in cur_display_inds:
frame = task.frames[frame_id]
if self.show:
cv2.imshow('Demo', frame)
cv2.waitKey(int(1000 / self.output_fps))
if self.video_writer:
self.video_writer.write(frame)
cur_time = time.time()
logger.debug(
f'Display thread: {1000*(cur_time - start_time):.0f} ms, '
f'read id {read_id}, display id {display_id}')
start_time = cur_time | Main function for display thread.
Read data from display queue and display predictions. | display_fn | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def __next__(self):
"""Get data from read queue.
This function is part of the main thread.
"""
if self.read_queue.qsize() == 0:
time.sleep(0.02)
return not self.stopped, None
was_read, task = self.read_queue.get()
if not was_read:
# If we reach the end of the video, there aren't enough frames
# in the task.processed_frames, so no need to model inference
# and draw predictions. Put task into display queue.
with self.read_id_lock:
read_id = self.read_id
with self.display_lock:
self.display_queue[read_id] = was_read, copy.deepcopy(task)
# main thread doesn't need to handle this task again
task = None
return was_read, task | Get data from read queue.
This function is part of the main thread. | __next__ | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def start(self):
"""Start read thread and display thread."""
self.read_thread = threading.Thread(
target=self.read_fn, args=(), name='VidRead-Thread', daemon=True)
self.read_thread.start()
self.display_thread = threading.Thread(
target=self.display_fn,
args=(),
name='VidDisplay-Thread',
daemon=True)
self.display_thread.start()
return self | Start read thread and display thread. | start | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def clean(self):
"""Close all threads and release all resources."""
self.stopped = True
self.read_lock.acquire()
self.cap.release()
self.read_lock.release()
self.output_lock.acquire()
cv2.destroyAllWindows()
if self.video_writer:
self.video_writer.release()
self.output_lock.release() | Close all threads and release all resources. | clean | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def join(self):
"""Waiting for the finalization of read and display thread."""
self.read_thread.join()
self.display_thread.join() | Waiting for the finalization of read and display thread. | join | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def display(self, task):
"""Add the visualized task to the display queue.
Args:
task (TaskInfo object): task object that contain the necessary
information for prediction visualization.
"""
with self.display_lock:
self.display_queue[task.id] = (True, task) | Add the visualized task to the display queue.
Args:
task (TaskInfo object): task object that contain the necessary
information for prediction visualization. | display | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def get_output_video_writer(self, path):
"""Return a video writer object.
Args:
path (str): path to the output video file.
"""
return cv2.VideoWriter(
filename=path,
fourcc=cv2.VideoWriter_fourcc(*'mp4v'),
fps=float(self.output_fps),
frameSize=self.display_size,
isColor=True) | Return a video writer object.
Args:
path (str): path to the output video file. | get_output_video_writer | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def draw_predictions(self, task):
"""Visualize stdet predictions on raw frames."""
# read bboxes from task
bboxes = task.display_bboxes.cpu().numpy()
# draw predictions and update task
keyframe_idx = len(task.frames) // 2
draw_range = [
keyframe_idx - task.clip_vis_length // 2,
keyframe_idx + (task.clip_vis_length - 1) // 2
]
assert draw_range[0] >= 0 and draw_range[1] < len(task.frames)
task.frames = self.draw_clip_range(task.frames, task.action_preds,
bboxes, draw_range)
return task | Visualize stdet predictions on raw frames. | draw_predictions | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def draw_clip_range(self, frames, preds, bboxes, draw_range):
"""Draw a range of frames with the same bboxes and predictions."""
# no predictions to be draw
if bboxes is None or len(bboxes) == 0:
return frames
# draw frames in `draw_range`
left_frames = frames[:draw_range[0]]
right_frames = frames[draw_range[1] + 1:]
draw_frames = frames[draw_range[0]:draw_range[1] + 1]
# get labels(texts) and draw predictions
draw_frames = [
self.draw_one_image(frame, bboxes, preds) for frame in draw_frames
]
return list(left_frames) + draw_frames + list(right_frames) | Draw a range of frames with the same bboxes and predictions. | draw_clip_range | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def draw_one_image(self, frame, bboxes, preds):
"""Draw bboxes and corresponding texts on one frame.""" | Draw bboxes and corresponding texts on one frame. | draw_one_image | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def abbrev(name):
"""Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...'
"""
while name.find('(') != -1:
st, ed = name.find('('), name.find(')')
name = name[:st] + '...' + name[ed + 1:]
return name | Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...' | abbrev | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def hex2color(h):
"""Convert the 6-digit hex string to tuple of 3 int value (RGB)"""
return (int(h[:2], 16), int(h[2:4], 16), int(h[4:], 16)) | Convert the 6-digit hex string to tuple of 3 int value (RGB) | hex2color | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def draw_one_image(self, frame, bboxes, preds):
"""Draw predictions on one image."""
for bbox, pred in zip(bboxes, preds):
# draw bbox
box = bbox.astype(np.int64)
st, ed = tuple(box[:2]), tuple(box[2:])
cv2.rectangle(frame, st, ed, (0, 0, 255), 2)
# draw texts
for k, (label, score) in enumerate(pred):
if k >= self.max_labels_per_bbox:
break
text = f'{self.abbrev(label)}: {score:.4f}'
location = (0 + st[0], 18 + k * 18 + st[1])
textsize = cv2.getTextSize(text, self.text_fontface,
self.text_fontscale,
self.text_thickness)[0]
textwidth = textsize[0]
diag0 = (location[0] + textwidth, location[1] - 14)
diag1 = (location[0], location[1] + 2)
cv2.rectangle(frame, diag0, diag1, self.plate[k + 1], -1)
cv2.putText(frame, text, location, self.text_fontface,
self.text_fontscale, self.text_fontcolor,
self.text_thickness, self.text_linetype)
return frame | Draw predictions on one image. | draw_one_image | python | open-mmlab/mmaction2 | demo/webcam_demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py | Apache-2.0 |
def hex2color(h):
"""Convert the 6-digit hex string to tuple of 3 int value (RGB)"""
return (int(h[:2], 16), int(h[2:4], 16), int(h[4:], 16)) | Convert the 6-digit hex string to tuple of 3 int value (RGB) | hex2color | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det.py | Apache-2.0 |
def visualize(frames, annotations, plate=plate_blue, max_num=5):
"""Visualize frames with predicted annotations.
Args:
frames (list[np.ndarray]): Frames for visualization, note that
len(frames) % len(annotations) should be 0.
annotations (list[list[tuple]]): The predicted results.
plate (str): The plate used for visualization. Default: plate_blue.
max_num (int): Max number of labels to visualize for a person box.
Default: 5.
Returns:
list[np.ndarray]: Visualized frames.
"""
assert max_num + 1 <= len(plate)
plate = [x[::-1] for x in plate]
frames_out = cp.deepcopy(frames)
nf, na = len(frames), len(annotations)
assert nf % na == 0
nfpa = len(frames) // len(annotations)
anno = None
h, w, _ = frames[0].shape
scale_ratio = np.array([w, h, w, h])
for i in range(na):
anno = annotations[i]
if anno is None:
continue
for j in range(nfpa):
ind = i * nfpa + j
frame = frames_out[ind]
for ann in anno:
box = ann[0]
label = ann[1]
if not len(label):
continue
score = ann[2]
box = (box * scale_ratio).astype(np.int64)
st, ed = tuple(box[:2]), tuple(box[2:])
cv2.rectangle(frame, st, ed, plate[0], 2)
for k, lb in enumerate(label):
if k >= max_num:
break
text = abbrev(lb)
text = ': '.join([text, f'{score[k]:>.2f}'])
location = (0 + st[0], 18 + k * 18 + st[1])
textsize = cv2.getTextSize(text, FONTFACE, FONTSCALE,
THICKNESS)[0]
textwidth = textsize[0]
diag0 = (location[0] + textwidth, location[1] - 14)
diag1 = (location[0], location[1] + 2)
cv2.rectangle(frame, diag0, diag1, plate[k + 1], -1)
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
return frames_out | Visualize frames with predicted annotations.
Args:
frames (list[np.ndarray]): Frames for visualization, note that
len(frames) % len(annotations) should be 0.
annotations (list[list[tuple]]): The predicted results.
plate (str): The plate used for visualization. Default: plate_blue.
max_num (int): Max number of labels to visualize for a person box.
Default: 5.
Returns:
list[np.ndarray]: Visualized frames. | visualize | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det.py | Apache-2.0 |
def load_label_map(file_path):
"""Load Label Map.
Args:
file_path (str): The file path of label map.
Returns:
dict: The label map (int -> label name).
"""
lines = open(file_path).readlines()
lines = [x.strip().split(': ') for x in lines]
return {int(x[0]): x[1] for x in lines} | Load Label Map.
Args:
file_path (str): The file path of label map.
Returns:
dict: The label map (int -> label name). | load_label_map | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det.py | Apache-2.0 |
def abbrev(name):
"""Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...'
"""
while name.find('(') != -1:
st, ed = name.find('('), name.find(')')
name = name[:st] + '...' + name[ed + 1:]
return name | Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...' | abbrev | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det.py | Apache-2.0 |
def pack_result(human_detection, result, img_h, img_w):
"""Short summary.
Args:
human_detection (np.ndarray): Human detection result.
result (type): The predicted label of each human proposal.
img_h (int): The image height.
img_w (int): The image width.
Returns:
tuple: Tuple of human proposal, label name and label score.
"""
human_detection[:, 0::2] /= img_w
human_detection[:, 1::2] /= img_h
results = []
if result is None:
return None
for prop, res in zip(human_detection, result):
res.sort(key=lambda x: -x[1])
results.append(
(prop.data.cpu().numpy(), [x[0] for x in res], [x[1]
for x in res]))
return results | Short summary.
Args:
human_detection (np.ndarray): Human detection result.
result (type): The predicted label of each human proposal.
img_h (int): The image height.
img_w (int): The image width.
Returns:
tuple: Tuple of human proposal, label name and label score. | pack_result | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det.py | Apache-2.0 |
def dense_timestamps(timestamps, n):
"""Make it nx frames."""
old_frame_interval = (timestamps[1] - timestamps[0])
start = timestamps[0] - old_frame_interval / n * (n - 1) / 2
new_frame_inds = np.arange(
len(timestamps) * n) * old_frame_interval / n + start
return new_frame_inds.astype(np.int64) | Make it nx frames. | main.dense_timestamps | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det.py | Apache-2.0 |
def main():
args = parse_args()
tmp_dir = tempfile.TemporaryDirectory()
frame_paths, original_frames = frame_extract(
args.video, out_dir=tmp_dir.name)
num_frame = len(frame_paths)
h, w, _ = original_frames[0].shape
# resize frames to shortside
new_w, new_h = mmcv.rescale_size((w, h), (args.short_side, np.Inf))
frames = [mmcv.imresize(img, (new_w, new_h)) for img in original_frames]
w_ratio, h_ratio = new_w / w, new_h / h
# Get clip_len, frame_interval and calculate center index of each clip
config = mmengine.Config.fromfile(args.config)
config.merge_from_dict(args.cfg_options)
val_pipeline = config.val_pipeline
sampler = [
x for x in val_pipeline if get_str_type(x['type']) == 'SampleAVAFrames'
][0]
clip_len, frame_interval = sampler['clip_len'], sampler['frame_interval']
window_size = clip_len * frame_interval
assert clip_len % 2 == 0, 'We would like to have an even clip_len'
# Note that it's 1 based here
timestamps = np.arange(window_size // 2, num_frame + 1 - window_size // 2,
args.predict_stepsize)
# Load label_map
label_map = load_label_map(args.label_map)
try:
if config['data']['train']['custom_classes'] is not None:
label_map = {
id + 1: label_map[cls]
for id, cls in enumerate(config['data']['train']
['custom_classes'])
}
except KeyError:
pass
# Get Human detection results
center_frames = [frame_paths[ind - 1] for ind in timestamps]
human_detections, _ = detection_inference(args.det_config,
args.det_checkpoint,
center_frames,
args.det_score_thr,
args.det_cat_id, args.device)
torch.cuda.empty_cache()
for i in range(len(human_detections)):
det = human_detections[i]
det[:, 0:4:2] *= w_ratio
det[:, 1:4:2] *= h_ratio
human_detections[i] = torch.from_numpy(det[:, :4]).to(args.device)
# Build STDET model
try:
# In our spatiotemporal detection demo, different actions should have
# the same number of bboxes.
config['model']['test_cfg']['rcnn'] = dict(action_thr=0)
except KeyError:
pass
config.model.backbone.pretrained = None
model = MODELS.build(config.model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
model.to(args.device)
model.eval()
predictions = []
img_norm_cfg = dict(
mean=np.array(config.model.data_preprocessor.mean),
std=np.array(config.model.data_preprocessor.std),
to_rgb=False)
print('Performing SpatioTemporal Action Detection for each clip')
assert len(timestamps) == len(human_detections)
prog_bar = mmengine.ProgressBar(len(timestamps))
for timestamp, proposal in zip(timestamps, human_detections):
if proposal.shape[0] == 0:
predictions.append(None)
continue
start_frame = timestamp - (clip_len // 2 - 1) * frame_interval
frame_inds = start_frame + np.arange(0, window_size, frame_interval)
frame_inds = list(frame_inds - 1)
imgs = [frames[ind].astype(np.float32) for ind in frame_inds]
_ = [mmcv.imnormalize_(img, **img_norm_cfg) for img in imgs]
# THWC -> CTHW -> 1CTHW
input_array = np.stack(imgs).transpose((3, 0, 1, 2))[np.newaxis]
input_tensor = torch.from_numpy(input_array).to(args.device)
datasample = ActionDataSample()
datasample.proposals = InstanceData(bboxes=proposal)
datasample.set_metainfo(dict(img_shape=(new_h, new_w)))
with torch.no_grad():
result = model(input_tensor, [datasample], mode='predict')
scores = result[0].pred_instances.scores
prediction = []
# N proposals
for i in range(proposal.shape[0]):
prediction.append([])
# Perform action score thr
for i in range(scores.shape[1]):
if i not in label_map:
continue
for j in range(proposal.shape[0]):
if scores[j, i] > args.action_score_thr:
prediction[j].append((label_map[i], scores[j,
i].item()))
predictions.append(prediction)
prog_bar.update()
results = []
for human_detection, prediction in zip(human_detections, predictions):
results.append(pack_result(human_detection, prediction, new_h, new_w))
def dense_timestamps(timestamps, n):
"""Make it nx frames."""
old_frame_interval = (timestamps[1] - timestamps[0])
start = timestamps[0] - old_frame_interval / n * (n - 1) / 2
new_frame_inds = np.arange(
len(timestamps) * n) * old_frame_interval / n + start
return new_frame_inds.astype(np.int64)
dense_n = int(args.predict_stepsize / args.output_stepsize)
frames = [
cv2.imread(frame_paths[i - 1])
for i in dense_timestamps(timestamps, dense_n)
]
print('Performing visualization')
vis_frames = visualize(frames, results)
vid = mpy.ImageSequenceClip([x[:, :, ::-1] for x in vis_frames],
fps=args.output_fps)
vid.write_videofile(args.out_filename)
tmp_dir.cleanup() | Make it nx frames. | main | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det.py | Apache-2.0 |
def get_output(
video_path: str,
out_filename: str,
data_sample: str,
labels: list,
fps: int = 30,
font_scale: Optional[str] = None,
font_color: str = 'white',
target_resolution: Optional[Tuple[int]] = None,
) -> None:
"""Get demo output using ``moviepy``.
This function will generate video file or gif file from raw video or
frames, by using ``moviepy``. For more information of some parameters,
you can refer to: https://github.com/Zulko/moviepy.
Args:
video_path (str): The video file path.
out_filename (str): Output filename for the generated file.
datasample (str): Predicted label of the generated file.
labels (list): Label list of current dataset.
fps (int): Number of picture frames to read per second. Defaults to 30.
font_scale (float): Font scale of the text. Defaults to None.
font_color (str): Font color of the text. Defaults to ``white``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
"""
if video_path.startswith(('http://', 'https://')):
raise NotImplementedError
# init visualizer
out_type = 'gif' if osp.splitext(out_filename)[1] == '.gif' else 'video'
visualizer = ActionVisualizer()
visualizer.dataset_meta = dict(classes=labels)
text_cfg = {'colors': font_color}
if font_scale is not None:
text_cfg.update({'font_sizes': font_scale})
visualizer.add_datasample(
out_filename,
video_path,
data_sample,
draw_pred=True,
draw_gt=False,
text_cfg=text_cfg,
fps=fps,
out_type=out_type,
out_path=osp.join('demo', out_filename),
target_resolution=target_resolution) | Get demo output using ``moviepy``.
This function will generate video file or gif file from raw video or
frames, by using ``moviepy``. For more information of some parameters,
you can refer to: https://github.com/Zulko/moviepy.
Args:
video_path (str): The video file path.
out_filename (str): Output filename for the generated file.
datasample (str): Predicted label of the generated file.
labels (list): Label list of current dataset.
fps (int): Number of picture frames to read per second. Defaults to 30.
font_scale (float): Font scale of the text. Defaults to None.
font_color (str): Font color of the text. Defaults to ``white``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None. | get_output | python | open-mmlab/mmaction2 | demo/demo.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo.py | Apache-2.0 |
def hex2color(h):
"""Convert the 6-digit hex string to tuple of 3 int value (RGB)"""
return (int(h[:2], 16), int(h[2:4], 16), int(h[4:], 16)) | Convert the 6-digit hex string to tuple of 3 int value (RGB) | hex2color | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det_onnx.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det_onnx.py | Apache-2.0 |
def visualize(frames, annotations, plate=plate_blue, max_num=5):
"""Visualize frames with predicted annotations.
Args:
frames (list[np.ndarray]): Frames for visualization, note that
len(frames) % len(annotations) should be 0.
annotations (list[list[tuple]]): The predicted results.
plate (str): The plate used for visualization. Default: plate_blue.
max_num (int): Max number of labels to visualize for a person box.
Default: 5.
Returns:
list[np.ndarray]: Visualized frames.
"""
assert max_num + 1 <= len(plate)
plate = [x[::-1] for x in plate]
frames_out = cp.deepcopy(frames)
nf, na = len(frames), len(annotations)
assert nf % na == 0
nfpa = len(frames) // len(annotations)
anno = None
h, w, _ = frames[0].shape
scale_ratio = np.array([w, h, w, h])
for i in range(na):
anno = annotations[i]
if anno is None:
continue
for j in range(nfpa):
ind = i * nfpa + j
frame = frames_out[ind]
for ann in anno:
box = ann[0]
label = ann[1]
if not len(label):
continue
score = ann[2]
box = (box * scale_ratio).astype(np.int64)
st, ed = tuple(box[:2]), tuple(box[2:])
cv2.rectangle(frame, st, ed, plate[0], 2)
for k, lb in enumerate(label):
if k >= max_num:
break
text = abbrev(lb)
text = ': '.join([text, str(score[k])])
location = (0 + st[0], 18 + k * 18 + st[1])
textsize = cv2.getTextSize(text, FONTFACE, FONTSCALE,
THICKNESS)[0]
textwidth = textsize[0]
diag0 = (location[0] + textwidth, location[1] - 14)
diag1 = (location[0], location[1] + 2)
cv2.rectangle(frame, diag0, diag1, plate[k + 1], -1)
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
return frames_out | Visualize frames with predicted annotations.
Args:
frames (list[np.ndarray]): Frames for visualization, note that
len(frames) % len(annotations) should be 0.
annotations (list[list[tuple]]): The predicted results.
plate (str): The plate used for visualization. Default: plate_blue.
max_num (int): Max number of labels to visualize for a person box.
Default: 5.
Returns:
list[np.ndarray]: Visualized frames. | visualize | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det_onnx.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det_onnx.py | Apache-2.0 |
def load_label_map(file_path):
"""Load Label Map.
Args:
file_path (str): The file path of label map.
Returns:
dict: The label map (int -> label name).
"""
lines = open(file_path).readlines()
lines = [x.strip().split(': ') for x in lines]
return {int(x[0]): x[1] for x in lines} | Load Label Map.
Args:
file_path (str): The file path of label map.
Returns:
dict: The label map (int -> label name). | load_label_map | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det_onnx.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det_onnx.py | Apache-2.0 |
def abbrev(name):
"""Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...'
"""
while name.find('(') != -1:
st, ed = name.find('('), name.find(')')
name = name[:st] + '...' + name[ed + 1:]
return name | Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...' | abbrev | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det_onnx.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det_onnx.py | Apache-2.0 |
def pack_result(human_detection, result, img_h, img_w):
"""Short summary.
Args:
human_detection (np.ndarray): Human detection result.
result (type): The predicted label of each human proposal.
img_h (int): The image height.
img_w (int): The image width.
Returns:
tuple: Tuple of human proposal, label name and label score.
"""
human_detection[:, 0::2] /= img_w
human_detection[:, 1::2] /= img_h
results = []
if result is None:
return None
for prop, res in zip(human_detection, result):
res.sort(key=lambda x: -x[1])
results.append(
(prop.data.cpu().numpy(), [x[0] for x in res], [x[1]
for x in res]))
return results | Short summary.
Args:
human_detection (np.ndarray): Human detection result.
result (type): The predicted label of each human proposal.
img_h (int): The image height.
img_w (int): The image width.
Returns:
tuple: Tuple of human proposal, label name and label score. | pack_result | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det_onnx.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det_onnx.py | Apache-2.0 |
def dense_timestamps(timestamps, n):
"""Make it nx frames."""
old_frame_interval = (timestamps[1] - timestamps[0])
start = timestamps[0] - old_frame_interval / n * (n - 1) / 2
new_frame_inds = np.arange(
len(timestamps) * n) * old_frame_interval / n + start
return new_frame_inds.astype(np.int64) | Make it nx frames. | main.dense_timestamps | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det_onnx.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det_onnx.py | Apache-2.0 |
def main():
args = parse_args()
tmp_dir = tempfile.TemporaryDirectory()
frame_paths, original_frames = frame_extract(
args.video, out_dir=tmp_dir.name)
num_frame = len(frame_paths)
h, w, _ = original_frames[0].shape
# resize frames to shortside
new_w, new_h = mmcv.rescale_size((w, h), (args.short_side, np.Inf))
frames = [mmcv.imresize(img, (new_w, new_h)) for img in original_frames]
w_ratio, h_ratio = new_w / w, new_h / h
# Get clip_len, frame_interval and calculate center index of each clip
config = mmengine.Config.fromfile(args.config)
config.merge_from_dict(args.cfg_options)
val_pipeline = config.val_pipeline
sampler = [
x for x in val_pipeline if get_str_type(x['type']) == 'SampleAVAFrames'
][0]
clip_len, frame_interval = sampler['clip_len'], sampler['frame_interval']
window_size = clip_len * frame_interval
assert clip_len % 2 == 0, 'We would like to have an even clip_len'
# Note that it's 1 based here
timestamps = np.arange(window_size // 2, num_frame + 1 - window_size // 2,
args.predict_stepsize)
# Load label_map
label_map = load_label_map(args.label_map)
try:
if config['data']['train']['custom_classes'] is not None:
label_map = {
id + 1: label_map[cls]
for id, cls in enumerate(config['data']['train']
['custom_classes'])
}
except KeyError:
pass
# Get Human detection results
center_frames = [frame_paths[ind - 1] for ind in timestamps]
human_detections, _ = detection_inference(args.det_config,
args.det_checkpoint,
center_frames,
args.det_score_thr,
args.det_cat_id, args.device)
torch.cuda.empty_cache()
for i in range(len(human_detections)):
det = human_detections[i]
det[:, 0:4:2] *= w_ratio
det[:, 1:4:2] *= h_ratio
human_detections[i] = torch.from_numpy(det[:, :4]).to(args.device)
# Build STDET model
session = onnxruntime.InferenceSession(args.onnx_file)
predictions = []
img_norm_cfg = dict(
mean=np.array(config.model.data_preprocessor.mean),
std=np.array(config.model.data_preprocessor.std),
to_rgb=False)
print('Performing SpatioTemporal Action Detection for each clip')
assert len(timestamps) == len(human_detections)
prog_bar = mmengine.ProgressBar(len(timestamps))
for timestamp, proposal in zip(timestamps, human_detections):
if proposal.shape[0] == 0:
predictions.append(None)
continue
start_frame = timestamp - (clip_len // 2 - 1) * frame_interval
frame_inds = start_frame + np.arange(0, window_size, frame_interval)
frame_inds = list(frame_inds - 1)
imgs = [frames[ind].astype(np.float32) for ind in frame_inds]
_ = [mmcv.imnormalize_(img, **img_norm_cfg) for img in imgs]
# THWC -> CTHW -> 1CTHW
input_array = np.stack(imgs).transpose((3, 0, 1, 2))[np.newaxis]
rois = bbox2roi([proposal])
input_feed = {
'input_tensor': input_array,
'rois': rois.cpu().data.numpy()
}
outputs = session.run(['cls_score'], input_feed=input_feed)
logits = outputs[0]
scores = 1 / (1 + np.exp(-logits))
prediction = []
# N proposals
for i in range(proposal.shape[0]):
prediction.append([])
# Perform action score thr
for i in range(scores.shape[1]):
if i not in label_map:
continue
for j in range(proposal.shape[0]):
if scores[j, i] > args.action_score_thr:
prediction[j].append((label_map[i], scores[j, i].item()))
predictions.append(prediction)
prog_bar.update()
results = []
for human_detection, prediction in zip(human_detections, predictions):
results.append(pack_result(human_detection, prediction, new_h, new_w))
def dense_timestamps(timestamps, n):
"""Make it nx frames."""
old_frame_interval = (timestamps[1] - timestamps[0])
start = timestamps[0] - old_frame_interval / n * (n - 1) / 2
new_frame_inds = np.arange(
len(timestamps) * n) * old_frame_interval / n + start
return new_frame_inds.astype(np.int64)
dense_n = int(args.predict_stepsize / args.output_stepsize)
frames = [
cv2.imread(frame_paths[i - 1])
for i in dense_timestamps(timestamps, dense_n)
]
print('Performing visualization')
vis_frames = visualize(frames, results)
vid = mpy.ImageSequenceClip([x[:, :, ::-1] for x in vis_frames],
fps=args.output_fps)
vid.write_videofile(args.out_filename)
tmp_dir.cleanup() | Make it nx frames. | main | python | open-mmlab/mmaction2 | demo/demo_spatiotemporal_det_onnx.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det_onnx.py | Apache-2.0 |
def hex2color(h):
"""Convert the 6-digit hex string to tuple of 3 int value (RGB)"""
return (int(h[:2], 16), int(h[2:4], 16), int(h[4:], 16)) | Convert the 6-digit hex string to tuple of 3 int value (RGB) | hex2color | python | open-mmlab/mmaction2 | demo/demo_video_structuralize.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_video_structuralize.py | Apache-2.0 |
def visualize(args,
frames,
annotations,
pose_data_samples,
action_result,
plate=PLATEBLUE,
max_num=5):
"""Visualize frames with predicted annotations.
Args:
frames (list[np.ndarray]): Frames for visualization, note that
len(frames) % len(annotations) should be 0.
annotations (list[list[tuple]]): The predicted spatio-temporal
detection results.
pose_data_samples (list[list[PoseDataSample]): The pose results.
action_result (str): The predicted action recognition results.
pose_model (nn.Module): The constructed pose model.
plate (str): The plate used for visualization. Default: PLATEBLUE.
max_num (int): Max number of labels to visualize for a person box.
Default: 5.
Returns:
list[np.ndarray]: Visualized frames.
"""
assert max_num + 1 <= len(plate)
frames_ = cp.deepcopy(frames)
frames_ = [mmcv.imconvert(f, 'bgr', 'rgb') for f in frames_]
nf, na = len(frames), len(annotations)
assert nf % na == 0
nfpa = len(frames) // len(annotations)
anno = None
h, w, _ = frames[0].shape
scale_ratio = np.array([w, h, w, h])
# add pose results
if pose_data_samples:
pose_config = mmengine.Config.fromfile(args.pose_config)
visualizer = VISUALIZERS.build(pose_config.visualizer)
visualizer.set_dataset_meta(pose_data_samples[0].dataset_meta)
for i, (d, f) in enumerate(zip(pose_data_samples, frames_)):
visualizer.add_datasample(
'result',
f,
data_sample=d,
draw_gt=False,
draw_heatmap=False,
draw_bbox=True,
show=False,
wait_time=0,
out_file=None,
kpt_thr=0.3)
frames_[i] = visualizer.get_image()
cv2.putText(frames_[i], action_result, (10, 30), FONTFACE,
FONTSCALE, FONTCOLOR, THICKNESS, LINETYPE)
for i in range(na):
anno = annotations[i]
if anno is None:
continue
for j in range(nfpa):
ind = i * nfpa + j
frame = frames_[ind]
# add action result for whole video
cv2.putText(frame, action_result, (10, 30), FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
# add spatio-temporal action detection results
for ann in anno:
box = ann[0]
label = ann[1]
if not len(label):
continue
score = ann[2]
box = (box * scale_ratio).astype(np.int64)
st, ed = tuple(box[:2]), tuple(box[2:])
if not pose_data_samples:
cv2.rectangle(frame, st, ed, plate[0], 2)
for k, lb in enumerate(label):
if k >= max_num:
break
text = abbrev(lb)
text = ': '.join([text, f'{score[k]:.3f}'])
location = (0 + st[0], 18 + k * 18 + st[1])
textsize = cv2.getTextSize(text, FONTFACE, FONTSCALE,
THICKNESS)[0]
textwidth = textsize[0]
diag0 = (location[0] + textwidth, location[1] - 14)
diag1 = (location[0], location[1] + 2)
cv2.rectangle(frame, diag0, diag1, plate[k + 1], -1)
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
return frames_ | Visualize frames with predicted annotations.
Args:
frames (list[np.ndarray]): Frames for visualization, note that
len(frames) % len(annotations) should be 0.
annotations (list[list[tuple]]): The predicted spatio-temporal
detection results.
pose_data_samples (list[list[PoseDataSample]): The pose results.
action_result (str): The predicted action recognition results.
pose_model (nn.Module): The constructed pose model.
plate (str): The plate used for visualization. Default: PLATEBLUE.
max_num (int): Max number of labels to visualize for a person box.
Default: 5.
Returns:
list[np.ndarray]: Visualized frames. | visualize | python | open-mmlab/mmaction2 | demo/demo_video_structuralize.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_video_structuralize.py | Apache-2.0 |
def load_label_map(file_path):
"""Load Label Map.
Args:
file_path (str): The file path of label map.
Returns:
dict: The label map (int -> label name).
"""
lines = open(file_path).readlines()
lines = [x.strip().split(': ') for x in lines]
return {int(x[0]): x[1] for x in lines} | Load Label Map.
Args:
file_path (str): The file path of label map.
Returns:
dict: The label map (int -> label name). | load_label_map | python | open-mmlab/mmaction2 | demo/demo_video_structuralize.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_video_structuralize.py | Apache-2.0 |
def abbrev(name):
"""Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...'
"""
while name.find('(') != -1:
st, ed = name.find('('), name.find(')')
name = name[:st] + '...' + name[ed + 1:]
return name | Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...' | abbrev | python | open-mmlab/mmaction2 | demo/demo_video_structuralize.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_video_structuralize.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.