response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Compute pairwise intersection areas between masks.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding M masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*M] representing pairwise intersection area.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8. | def intersection(masks1, masks2):
"""Compute pairwise intersection areas between masks.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding M masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*M] representing pairwise intersection area.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError("masks1 and masks2 should be of type np.uint8")
n = masks1.shape[0]
m = masks2.shape[0]
answer = np.zeros([n, m], dtype=np.float32)
for i in np.arange(n):
for j in np.arange(m):
answer[i, j] = np.sum(
np.minimum(masks1[i], masks2[j]), dtype=np.float32
)
return answer |
Computes pairwise intersection-over-union between mask collections.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8. | def iou(masks1, masks2):
"""Computes pairwise intersection-over-union between mask collections.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError("masks1 and masks2 should be of type np.uint8")
intersect = intersection(masks1, masks2)
area1 = area(masks1)
area2 = area(masks2)
union = (
np.expand_dims(area1, axis=1)
+ np.expand_dims(area2, axis=0)
- intersect
)
return intersect / np.maximum(union, EPSILON) |
Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two masks, mask1 and mask2 is defined as
their intersection area over mask2's area. Note that ioa is not symmetric,
that is, IOA(mask1, mask2) != IOA(mask2, mask1).
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8. | def ioa(masks1, masks2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two masks, mask1 and mask2 is defined as
their intersection area over mask2's area. Note that ioa is not symmetric,
that is, IOA(mask1, mask2) != IOA(mask2, mask1).
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError("masks1 and masks2 should be of type np.uint8")
intersect = intersection(masks1, masks2)
areas = np.expand_dims(area(masks2), axis=0)
return intersect / (areas + EPSILON) |
Calculate and plot confusion matrix to a SummaryWriter.
Args:
writer (SummaryWriter): the SummaryWriter to write the matrix to.
cmtx (ndarray): confusion matrix.
num_classes (int): total number of classes.
global_step (Optional[int]): current step.
subset_ids (list of ints): a list of label indices to keep.
class_names (list of strs, optional): a list of all class names.
tag (str or list of strs): name(s) of the confusion matrix image.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8]. | def add_confusion_matrix(
writer,
cmtx,
num_classes,
global_step=None,
subset_ids=None,
class_names=None,
tag="Confusion Matrix",
figsize=None,
):
"""
Calculate and plot confusion matrix to a SummaryWriter.
Args:
writer (SummaryWriter): the SummaryWriter to write the matrix to.
cmtx (ndarray): confusion matrix.
num_classes (int): total number of classes.
global_step (Optional[int]): current step.
subset_ids (list of ints): a list of label indices to keep.
class_names (list of strs, optional): a list of all class names.
tag (str or list of strs): name(s) of the confusion matrix image.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
"""
if subset_ids is None or len(subset_ids) != 0:
# If class names are not provided, use class indices as class names.
if class_names is None:
class_names = [str(i) for i in range(num_classes)]
# If subset is not provided, take every classes.
if subset_ids is None:
subset_ids = list(range(num_classes))
sub_cmtx = cmtx[subset_ids, :][:, subset_ids]
sub_names = [class_names[j] for j in subset_ids]
sub_cmtx = vis_utils.plot_confusion_matrix(
sub_cmtx,
num_classes=len(subset_ids),
class_names=sub_names,
figsize=figsize,
)
# Add the confusion matrix image to writer.
writer.add_figure(tag=tag, figure=sub_cmtx, global_step=global_step) |
Given all predictions and all true labels, plot histograms of top-k most
frequently predicted classes for each true class.
Args:
writer (SummaryWriter object): a tensorboard SummaryWriter object.
cmtx (ndarray): confusion matrix.
num_classes (int): total number of classes.
k (int): top k to plot histograms.
global_step (Optional[int]): current step.
subset_ids (list of ints, optional): class indices to plot histogram.
mapping (list of strings): names of all classes.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8]. | def plot_hist(
writer,
cmtx,
num_classes,
k=10,
global_step=None,
subset_ids=None,
class_names=None,
figsize=None,
):
"""
Given all predictions and all true labels, plot histograms of top-k most
frequently predicted classes for each true class.
Args:
writer (SummaryWriter object): a tensorboard SummaryWriter object.
cmtx (ndarray): confusion matrix.
num_classes (int): total number of classes.
k (int): top k to plot histograms.
global_step (Optional[int]): current step.
subset_ids (list of ints, optional): class indices to plot histogram.
mapping (list of strings): names of all classes.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
"""
if subset_ids is None or len(subset_ids) != 0:
if subset_ids is None:
subset_ids = set(range(num_classes))
else:
subset_ids = set(subset_ids)
# If class names are not provided, use their indices as names.
if class_names is None:
class_names = list(range(num_classes))
for i in subset_ids:
pred = cmtx[i]
hist = vis_utils.plot_topk_histogram(
class_names[i],
torch.Tensor(pred),
k,
class_names,
figsize=figsize,
)
writer.add_figure(
tag="Top {} predictions by classes/{}".format(
k, class_names[i]
),
figure=hist,
global_step=global_step,
) |
Visualize and add tensors of n-dimentionals to a Tensorboard SummaryWriter. Tensors
will be visualized as a 2D grid image.
Args:
writer (SummaryWriter): Tensorboard SummaryWriter.
array (tensor): tensor to visualize.
name (str): name of the tensor.
nrow (Optional[int]): number of 2D filters in each row in the grid image.
normalize (bool): whether to normalize when we have multiple 2D filters.
Default to False.
global_step (Optional[int]): current step.
heat_map (bool): whether to add heat map to 2D each 2D filters in array. | def add_ndim_array(
writer,
array,
name,
nrow=None,
normalize=False,
global_step=None,
heat_map=True,
):
"""
Visualize and add tensors of n-dimentionals to a Tensorboard SummaryWriter. Tensors
will be visualized as a 2D grid image.
Args:
writer (SummaryWriter): Tensorboard SummaryWriter.
array (tensor): tensor to visualize.
name (str): name of the tensor.
nrow (Optional[int]): number of 2D filters in each row in the grid image.
normalize (bool): whether to normalize when we have multiple 2D filters.
Default to False.
global_step (Optional[int]): current step.
heat_map (bool): whether to add heat map to 2D each 2D filters in array.
"""
if array is not None and array.ndim != 0:
if array.ndim == 1:
reshaped_array = array.unsqueeze(0)
if nrow is None:
nrow = int(math.sqrt(reshaped_array.size()[1]))
reshaped_array = reshaped_array.view(-1, nrow)
if heat_map:
reshaped_array = add_heatmap(reshaped_array)
writer.add_image(
name,
reshaped_array,
global_step=global_step,
dataformats="CHW",
)
else:
writer.add_image(
name,
reshaped_array,
global_step=global_step,
dataformats="HW",
)
elif array.ndim == 2:
reshaped_array = array
if heat_map:
heatmap = add_heatmap(reshaped_array)
writer.add_image(
name, heatmap, global_step=global_step, dataformats="CHW"
)
else:
writer.add_image(
name,
reshaped_array,
global_step=global_step,
dataformats="HW",
)
else:
last2_dims = array.size()[-2:]
reshaped_array = array.view(-1, *last2_dims)
if heat_map:
reshaped_array = [
add_heatmap(array_2d).unsqueeze(0)
for array_2d in reshaped_array
]
reshaped_array = torch.cat(reshaped_array, dim=0)
else:
reshaped_array = reshaped_array.unsqueeze(1)
if nrow is None:
nrow = int(math.sqrt(reshaped_array.size()[0]))
img_grid = make_grid(
reshaped_array, nrow, padding=1, normalize=normalize
)
writer.add_image(name, img_grid, global_step=global_step) |
Add heatmap to 2D tensor.
Args:
tensor (tensor): a 2D tensor. Tensor value must be in [0..1] range.
Returns:
heatmap (tensor): a 3D tensor. Result of applying heatmap to the 2D tensor. | def add_heatmap(tensor):
"""
Add heatmap to 2D tensor.
Args:
tensor (tensor): a 2D tensor. Tensor value must be in [0..1] range.
Returns:
heatmap (tensor): a 3D tensor. Result of applying heatmap to the 2D tensor.
"""
assert tensor.ndim == 2, "Only support 2D tensors."
# Move tensor to cpu if necessary.
if tensor.device != torch.device("cpu"):
arr = tensor.cpu()
else:
arr = tensor
arr = arr.numpy()
# Get the color map by name.
cm = plt.get_cmap("viridis")
heatmap = cm(arr)
heatmap = heatmap[:, :, :3]
# Convert (H, W, C) to (C, H, W)
heatmap = torch.Tensor(heatmap).permute(2, 0, 1)
return heatmap |
Calculate confusion matrix on the provided preds and labels.
Args:
preds (tensor or lists of tensors): predictions. Each tensor is in
in the shape of (n_batch, num_classes). Tensor(s) must be on CPU.
labels (tensor or lists of tensors): corresponding labels. Each tensor is
in the shape of either (n_batch,) or (n_batch, num_classes).
num_classes (int): number of classes. Tensor(s) must be on CPU.
normalize (Optional[str]) : {‘true’, ‘pred’, ‘all’}, default="true"
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix
will not be normalized.
Returns:
cmtx (ndarray): confusion matrix of size (num_classes x num_classes) | def get_confusion_matrix(preds, labels, num_classes, normalize="true"):
"""
Calculate confusion matrix on the provided preds and labels.
Args:
preds (tensor or lists of tensors): predictions. Each tensor is in
in the shape of (n_batch, num_classes). Tensor(s) must be on CPU.
labels (tensor or lists of tensors): corresponding labels. Each tensor is
in the shape of either (n_batch,) or (n_batch, num_classes).
num_classes (int): number of classes. Tensor(s) must be on CPU.
normalize (Optional[str]) : {‘true’, ‘pred’, ‘all’}, default="true"
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix
will not be normalized.
Returns:
cmtx (ndarray): confusion matrix of size (num_classes x num_classes)
"""
if isinstance(preds, list):
preds = torch.cat(preds, dim=0)
if isinstance(labels, list):
labels = torch.cat(labels, dim=0)
# If labels are one-hot encoded, get their indices.
if labels.ndim == preds.ndim:
labels = torch.argmax(labels, dim=-1)
# Get the predicted class indices for examples.
preds = torch.flatten(torch.argmax(preds, dim=-1))
labels = torch.flatten(labels)
cmtx = confusion_matrix(
labels, preds, labels=list(range(num_classes)), normalize=normalize
)
return cmtx |
A function to create a colored and labeled confusion matrix matplotlib figure
given true labels and preds.
Args:
cmtx (ndarray): confusion matrix.
num_classes (int): total number of classes.
class_names (Optional[list of strs]): a list of class names.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
Returns:
img (figure): matplotlib figure. | def plot_confusion_matrix(cmtx, num_classes, class_names=None, figsize=None):
"""
A function to create a colored and labeled confusion matrix matplotlib figure
given true labels and preds.
Args:
cmtx (ndarray): confusion matrix.
num_classes (int): total number of classes.
class_names (Optional[list of strs]): a list of class names.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
Returns:
img (figure): matplotlib figure.
"""
if class_names is None or type(class_names) != list:
class_names = [str(i) for i in range(num_classes)]
figure = plt.figure(figsize=figsize)
plt.imshow(cmtx, interpolation="nearest", cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
# Use white text if squares are dark; otherwise black.
threshold = cmtx.max() / 2.0
for i, j in itertools.product(range(cmtx.shape[0]), range(cmtx.shape[1])):
color = "white" if cmtx[i, j] > threshold else "black"
plt.text(
j,
i,
format(cmtx[i, j], ".2f") if cmtx[i, j] != 0 else ".",
horizontalalignment="center",
color=color,
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
return figure |
Plot histogram of top-k value from the given array.
Args:
tag (str): histogram title.
array (tensor): a tensor to draw top k value from.
k (int): number of top values to draw from array.
Defaut to 10.
class_names (list of strings, optional):
a list of names for values in array.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
Returns:
fig (matplotlib figure): a matplotlib figure of the histogram. | def plot_topk_histogram(tag, array, k=10, class_names=None, figsize=None):
"""
Plot histogram of top-k value from the given array.
Args:
tag (str): histogram title.
array (tensor): a tensor to draw top k value from.
k (int): number of top values to draw from array.
Defaut to 10.
class_names (list of strings, optional):
a list of names for values in array.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
Returns:
fig (matplotlib figure): a matplotlib figure of the histogram.
"""
val, ind = torch.topk(array, k)
fig = plt.Figure(figsize=figsize, facecolor="w", edgecolor="k")
ax = fig.add_subplot(1, 1, 1)
if class_names is None:
class_names = [str(i) for i in ind]
else:
class_names = [class_names[i] for i in ind]
tick_marks = np.arange(k)
width = 0.75
ax.bar(
tick_marks,
val,
width,
color="orange",
tick_label=class_names,
edgecolor="w",
linewidth=1,
)
ax.set_xlabel("Candidates")
ax.set_xticks(tick_marks)
ax.set_xticklabels(class_names, rotation=-45, ha="center")
ax.xaxis.set_label_position("bottom")
ax.xaxis.tick_bottom()
y_tick = np.linspace(0, 1, num=10)
ax.set_ylabel("Frequency")
ax.set_yticks(y_tick)
y_labels = [format(i, ".1f") for i in y_tick]
ax.set_yticklabels(y_labels, ha="center")
for i, v in enumerate(val.numpy()):
ax.text(
i - 0.1,
v + 0.03,
format(v, ".2f"),
color="orange",
fontweight="bold",
)
ax.set_title(tag)
fig.set_tight_layout(True)
return fig |
Parse numpy-like fancy indexing from a string.
Args:
string (str): string represent the indices to take
a subset of from array. Indices for each dimension
are separated by `,`; indices for different dimensions
are separated by `;`.
e.g.: For a numpy array `arr` of shape (3,3,3), the string "1,2;1,2"
means taking the sub-array `arr[[1,2], [1,2]]
Returns:
final_indexing (tuple): the parsed indexing. | def get_indexing(string):
"""
Parse numpy-like fancy indexing from a string.
Args:
string (str): string represent the indices to take
a subset of from array. Indices for each dimension
are separated by `,`; indices for different dimensions
are separated by `;`.
e.g.: For a numpy array `arr` of shape (3,3,3), the string "1,2;1,2"
means taking the sub-array `arr[[1,2], [1,2]]
Returns:
final_indexing (tuple): the parsed indexing.
"""
index_ls = string.strip().split(";")
final_indexing = []
for index in index_ls:
index_single_dim = index.split(",")
index_single_dim = [int(i) for i in index_single_dim]
final_indexing.append(index_single_dim)
return tuple(final_indexing) |
Extract layer names and numpy-like fancy indexing from a string.
Args:
layer_ls (list of strs): list of strings containing data about layer names
and their indexing. For each string, layer name and indexing is separated by whitespaces.
e.g.: [layer1 1,2;2, layer2, layer3 150;3,4]
layer_name_prefix (Optional[str]): prefix to be added to each layer name.
Returns:
layer_name (list of strings): a list of layer names.
indexing_dict (Python dict): a dictionary of the pair
{one_layer_name: indexing_for_that_layer} | def process_layer_index_data(layer_ls, layer_name_prefix=""):
"""
Extract layer names and numpy-like fancy indexing from a string.
Args:
layer_ls (list of strs): list of strings containing data about layer names
and their indexing. For each string, layer name and indexing is separated by whitespaces.
e.g.: [layer1 1,2;2, layer2, layer3 150;3,4]
layer_name_prefix (Optional[str]): prefix to be added to each layer name.
Returns:
layer_name (list of strings): a list of layer names.
indexing_dict (Python dict): a dictionary of the pair
{one_layer_name: indexing_for_that_layer}
"""
layer_name, indexing_dict = [], {}
for layer in layer_ls:
ls = layer.split()
name = layer_name_prefix + ls[0]
layer_name.append(name)
if len(ls) == 2:
indexing_dict[name] = get_indexing(ls[1])
else:
indexing_dict[name] = ()
return layer_name, indexing_dict |
Normalize and prepare inputs as a list of tensors. Each tensor
correspond to a unique pathway.
Args:
frames (list of array): list of input images (correspond to one clip) in range [0, 255].
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py | def process_cv2_inputs(frames, cfg):
"""
Normalize and prepare inputs as a list of tensors. Each tensor
correspond to a unique pathway.
Args:
frames (list of array): list of input images (correspond to one clip) in range [0, 255].
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
inputs = torch.from_numpy(np.array(frames)).float() / 255
inputs = tensor_normalize(inputs, cfg.DATA.MEAN, cfg.DATA.STD)
# T H W C -> C T H W.
inputs = inputs.permute(3, 0, 1, 2)
# Sample frames for num_frames specified.
index = torch.linspace(0, inputs.shape[1] - 1, cfg.DATA.NUM_FRAMES).long()
inputs = torch.index_select(inputs, 1, index)
inputs = pack_pathway_output(cfg, inputs)
inputs = [inp.unsqueeze(0) for inp in inputs]
return inputs |
Return the targeted layer (nn.Module Object) given a hierarchical layer name,
separated by /.
Args:
model (model): model to get layers from.
layer_name (str): name of the layer.
Returns:
prev_module (nn.Module): the layer from the model with `layer_name` name. | def get_layer(model, layer_name):
"""
Return the targeted layer (nn.Module Object) given a hierarchical layer name,
separated by /.
Args:
model (model): model to get layers from.
layer_name (str): name of the layer.
Returns:
prev_module (nn.Module): the layer from the model with `layer_name` name.
"""
layer_ls = layer_name.split("/")
prev_module = model
for layer in layer_ls:
prev_module = prev_module._modules[layer]
return prev_module |
Main function to spawn the train and test process. | def main():
"""
Main function to spawn the train and test process.
"""
args = parse_args()
if args.num_shards > 1:
args.output_dir = str(args.job_dir)
cfg = load_config(args)
train, test = get_func(cfg)
# Perform training.
if cfg.TRAIN.ENABLE:
launch_job(cfg=cfg, init_method=args.init_method, func=train)
# Perform multi-clip testing.
if cfg.TEST.ENABLE:
launch_job(cfg=cfg, init_method=args.init_method, func=test)
# Perform model visualization.
if cfg.TENSORBOARD.ENABLE and (
cfg.TENSORBOARD.MODEL_VIS.ENABLE
or cfg.TENSORBOARD.WRONG_PRED_VIS.ENABLE
):
launch_job(cfg=cfg, init_method=args.init_method, func=visualize) |
For classification:
Perform mutli-view testing that uniformly samples N clips from a video along
its temporal axis. For each clip, it takes 3 crops to cover the spatial
dimension, followed by averaging the softmax scores across all Nx3 views to
form a video-level prediction. All video predictions are compared to
ground-truth labels and the final testing performance is logged.
For detection:
Perform fully-convolutional testing on the full frames without crop.
Args:
test_loader (loader): video testing loader.
model (model): the pretrained video model to test.
test_meter (TestMeter): testing meters to log and ensemble the testing
results.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter object, optional): TensorboardWriter object
to writer Tensorboard log. | def perform_test(test_loader, model, test_meter, cfg, writer=None):
"""
For classification:
Perform mutli-view testing that uniformly samples N clips from a video along
its temporal axis. For each clip, it takes 3 crops to cover the spatial
dimension, followed by averaging the softmax scores across all Nx3 views to
form a video-level prediction. All video predictions are compared to
ground-truth labels and the final testing performance is logged.
For detection:
Perform fully-convolutional testing on the full frames without crop.
Args:
test_loader (loader): video testing loader.
model (model): the pretrained video model to test.
test_meter (TestMeter): testing meters to log and ensemble the testing
results.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter object, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Enable eval mode.
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels, video_idx, meta) in enumerate(test_loader):
if cfg.NUM_GPUS:
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
# Transfer the data to the current GPU device.
labels = labels.cuda()
video_idx = video_idx.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
test_meter.data_toc()
if cfg.DETECTION.ENABLE:
# Compute the predictions.
preds = model(inputs, meta["boxes"])
ori_boxes = meta["ori_boxes"]
metadata = meta["metadata"]
preds = preds.detach().cpu() if cfg.NUM_GPUS else preds.detach()
ori_boxes = (
ori_boxes.detach().cpu() if cfg.NUM_GPUS else ori_boxes.detach()
)
metadata = (
metadata.detach().cpu() if cfg.NUM_GPUS else metadata.detach()
)
if cfg.NUM_GPUS > 1:
preds = torch.cat(du.all_gather_unaligned(preds), dim=0)
ori_boxes = torch.cat(du.all_gather_unaligned(ori_boxes), dim=0)
metadata = torch.cat(du.all_gather_unaligned(metadata), dim=0)
test_meter.iter_toc()
# Update and log stats.
test_meter.update_stats(preds, ori_boxes, metadata)
test_meter.log_iter_stats(None, cur_iter)
else:
# Perform the forward pass.
preds = model(inputs)
# Gather all the predictions across all the devices to perform ensemble.
if cfg.NUM_GPUS > 1:
preds, labels, video_idx = du.all_gather(
[preds, labels, video_idx]
)
if cfg.NUM_GPUS:
preds = preds.cpu()
labels = labels.cpu()
video_idx = video_idx.cpu()
test_meter.iter_toc()
# Update and log stats.
test_meter.update_stats(
preds.detach(), labels.detach(), video_idx.detach()
)
test_meter.log_iter_stats(cur_iter)
test_meter.iter_tic()
# Log epoch stats and print the final testing results.
if not cfg.DETECTION.ENABLE:
all_preds = test_meter.video_preds.clone().detach()
all_labels = test_meter.video_labels
if cfg.NUM_GPUS:
all_preds = all_preds.cpu()
all_labels = all_labels.cpu()
if writer is not None:
writer.plot_eval(preds=all_preds, labels=all_labels)
if cfg.TEST.SAVE_RESULTS_PATH != "":
save_path = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.SAVE_RESULTS_PATH)
with PathManager.open(save_path, "wb") as f:
pickle.dump([all_labels, all_labels], f)
logger.info(
"Successfully saved prediction results to {}".format(save_path)
)
test_meter.finalize_metrics()
return test_meter |
Perform multi-view testing on the pretrained video model.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py | def test(cfg):
"""
Perform multi-view testing on the pretrained video model.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Print config.
logger.info("Test with config:")
logger.info(cfg)
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=False)
cu.load_test_checkpoint(cfg, model)
# Create video testing loaders.
test_loader = loader.construct_loader(cfg, "test")
logger.info("Testing model for {} iterations".format(len(test_loader)))
assert (
len(test_loader.dataset)
% (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS)
== 0
)
# Create meters for multi-view testing.
test_meter = TestMeter(
len(test_loader.dataset)
// (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS),
cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS,
cfg.MODEL.NUM_CLASSES,
len(test_loader),
cfg.DATA.MULTI_LABEL,
cfg.DATA.ENSEMBLE_METHOD,
)
# Set up writer for logging to Tensorboard format.
if cfg.TENSORBOARD.ENABLE and du.is_master_proc(
cfg.NUM_GPUS * cfg.NUM_SHARDS
):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
# # Perform multi-view test on the entire dataset.
test_meter = perform_test(test_loader, model, test_meter, cfg, writer)
if writer is not None:
writer.close() |
Perform the video training for one epoch.
Args:
train_loader (loader): video training loader.
model (model): the video model to train.
optimizer (optim): the optimizer to perform optimization on the model's
parameters.
train_meter (TrainMeter): training meters to log the training performance.
cur_epoch (int): current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log. | def train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, writer=None
):
"""
Perform the video training for one epoch.
Args:
train_loader (loader): video training loader.
model (model): the video model to train.
optimizer (optim): the optimizer to perform optimization on the model's
parameters.
train_meter (TrainMeter): training meters to log the training performance.
cur_epoch (int): current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Enable train mode.
model.train()
train_meter.iter_tic()
data_size = len(train_loader)
cur_global_batch_size = cfg.NUM_SHARDS * cfg.TRAIN.BATCH_SIZE
num_iters = cfg.GLOBAL_BATCH_SIZE // cur_global_batch_size
for cur_iter, (inputs, labels, _, meta) in enumerate(train_loader):
# Transfer the data to the current GPU device.
if cfg.NUM_GPUS:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
# Update the learning rate.
lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
optim.set_lr(optimizer, lr)
train_meter.data_toc()
# Explicitly declare reduction to mean.
if not cfg.MIXUP.ENABLED:
loss_fun = losses.get_loss_func(cfg.MODEL.LOSS_FUNC)(reduction="mean")
else:
mixup_fn = Mixup(
mixup_alpha=cfg.MIXUP.ALPHA, cutmix_alpha=cfg.MIXUP.CUTMIX_ALPHA, cutmix_minmax=cfg.MIXUP.CUTMIX_MINMAX, prob=cfg.MIXUP.PROB, switch_prob=cfg.MIXUP.SWITCH_PROB, mode=cfg.MIXUP.MODE,
label_smoothing=0.1, num_classes=cfg.MODEL.NUM_CLASSES)
hard_labels = labels
inputs, labels = mixup_fn(inputs, labels)
loss_fun = SoftTargetCrossEntropy()
if cfg.DETECTION.ENABLE:
preds = model(inputs, meta["boxes"])
else:
preds = model(inputs)
# Compute the loss.
loss = loss_fun(preds, labels)
if cfg.MIXUP.ENABLED:
labels = hard_labels
# check Nan Loss.
misc.check_nan_losses(loss)
if cur_global_batch_size >= cfg.GLOBAL_BATCH_SIZE:
# Perform the backward pass.
optimizer.zero_grad()
loss.backward()
# Update the parameters.
optimizer.step()
else:
if cur_iter == 0:
optimizer.zero_grad()
loss.backward()
if (cur_iter + 1) % num_iters == 0:
for p in model.parameters():
p.grad /= num_iters
optimizer.step()
optimizer.zero_grad()
if cfg.DETECTION.ENABLE:
if cfg.NUM_GPUS > 1:
loss = du.all_reduce([loss])[0]
loss = loss.item()
# Update and log stats.
train_meter.update_stats(None, None, None, loss, lr)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Train/loss": loss, "Train/lr": lr},
global_step=data_size * cur_epoch + cur_iter,
)
else:
top1_err, top5_err = None, None
if cfg.DATA.MULTI_LABEL:
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
[loss] = du.all_reduce([loss])
loss = loss.item()
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point).
loss, top1_err, top5_err = (
loss.item(),
top1_err.item(),
top5_err.item(),
)
# Update and log stats.
train_meter.update_stats(
top1_err,
top5_err,
loss,
lr,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{
"Train/loss": loss,
"Train/lr": lr,
"Train/Top1_err": top1_err,
"Train/Top5_err": top5_err,
},
global_step=data_size * cur_epoch + cur_iter,
)
train_meter.iter_toc() # measure allreduce for this meter
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats.
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset() |
Evaluate the model on the val set.
Args:
val_loader (loader): data loader to provide validation data.
model (model): model to evaluate the performance.
val_meter (ValMeter): meter instance to record and calculate the metrics.
cur_epoch (int): number of the current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log. | def eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer=None):
"""
Evaluate the model on the val set.
Args:
val_loader (loader): data loader to provide validation data.
model (model): model to evaluate the performance.
val_meter (ValMeter): meter instance to record and calculate the metrics.
cur_epoch (int): number of the current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Evaluation mode enabled. The running stats would not be updated.
model.eval()
val_meter.iter_tic()
for cur_iter, (inputs, labels, _, meta) in enumerate(val_loader):
if cfg.NUM_GPUS:
# Transferthe data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
val_meter.data_toc()
if cfg.DETECTION.ENABLE:
# Compute the predictions.
preds = model(inputs, meta["boxes"])
ori_boxes = meta["ori_boxes"]
metadata = meta["metadata"]
if cfg.NUM_GPUS:
preds = preds.cpu()
ori_boxes = ori_boxes.cpu()
metadata = metadata.cpu()
if cfg.NUM_GPUS > 1:
preds = torch.cat(du.all_gather_unaligned(preds), dim=0)
ori_boxes = torch.cat(du.all_gather_unaligned(ori_boxes), dim=0)
metadata = torch.cat(du.all_gather_unaligned(metadata), dim=0)
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(preds, ori_boxes, metadata)
else:
preds = model(inputs)
if cfg.DATA.MULTI_LABEL:
if cfg.NUM_GPUS > 1:
preds, labels = du.all_gather([preds, labels])
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
# Combine the errors across the GPUs.
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point).
top1_err, top5_err = top1_err.item(), top5_err.item()
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(
top1_err,
top5_err,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Val/Top1_err": top1_err, "Val/Top5_err": top5_err},
global_step=len(val_loader) * cur_epoch + cur_iter,
)
val_meter.update_predictions(preds, labels)
val_meter.log_iter_stats(cur_epoch, cur_iter)
val_meter.iter_tic()
# Log epoch stats.
val_meter.log_epoch_stats(cur_epoch)
# write to tensorboard format if available.
if writer is not None:
if cfg.DETECTION.ENABLE:
writer.add_scalars(
{"Val/mAP": val_meter.full_map}, global_step=cur_epoch
)
else:
all_preds = [pred.clone().detach() for pred in val_meter.all_preds]
all_labels = [
label.clone().detach() for label in val_meter.all_labels
]
if cfg.NUM_GPUS:
all_preds = [pred.cpu() for pred in all_preds]
all_labels = [label.cpu() for label in all_labels]
writer.plot_eval(
preds=all_preds, labels=all_labels, global_step=cur_epoch
)
val_meter.reset() |
Update the stats in bn layers by calculate the precise stats.
Args:
loader (loader): data loader to provide training data.
model (model): model to update the bn stats.
num_iters (int): number of iterations to compute and update the bn stats.
use_gpu (bool): whether to use GPU or not. | def calculate_and_update_precise_bn(loader, model, num_iters=200, use_gpu=True):
"""
Update the stats in bn layers by calculate the precise stats.
Args:
loader (loader): data loader to provide training data.
model (model): model to update the bn stats.
num_iters (int): number of iterations to compute and update the bn stats.
use_gpu (bool): whether to use GPU or not.
"""
def _gen_loader():
for inputs, *_ in loader:
if use_gpu:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
yield inputs
# Update the bn stats.
update_bn_stats(model, _gen_loader(), num_iters) |
Build training model and its associated tools, including optimizer,
dataloaders and meters.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
Returns:
model (nn.Module): training model.
optimizer (Optimizer): optimizer.
train_loader (DataLoader): training data loader.
val_loader (DataLoader): validatoin data loader.
precise_bn_loader (DataLoader): training data loader for computing
precise BN.
train_meter (TrainMeter): tool for measuring training stats.
val_meter (ValMeter): tool for measuring validation stats. | def build_trainer(cfg):
"""
Build training model and its associated tools, including optimizer,
dataloaders and meters.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
Returns:
model (nn.Module): training model.
optimizer (Optimizer): optimizer.
train_loader (DataLoader): training data loader.
val_loader (DataLoader): validatoin data loader.
precise_bn_loader (DataLoader): training data loader for computing
precise BN.
train_meter (TrainMeter): tool for measuring training stats.
val_meter (ValMeter): tool for measuring validation stats.
"""
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = loader.construct_loader(
cfg, "train", is_precise_bn=True
)
# Create meters.
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
return (
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
) |
Train a video model for many epochs on train set and evaluate it on val set.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py | def train(cfg):
"""
Train a video model for many epochs on train set and evaluate it on val set.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Init multigrid.
multigrid = None
if cfg.MULTIGRID.LONG_CYCLE or cfg.MULTIGRID.SHORT_CYCLE:
multigrid = MultigridSchedule()
cfg = multigrid.init_multigrid(cfg)
if cfg.MULTIGRID.LONG_CYCLE:
cfg, _ = multigrid.update_long_cycle(cfg, cur_epoch=0)
# Print config.
logger.info("Train with config:")
logger.info(pprint.pformat(cfg))
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Load a checkpoint to resume training if applicable.
if not cfg.TRAIN.FINETUNE:
start_epoch = cu.load_train_checkpoint(cfg, model, optimizer)
else:
start_epoch = 0
cu.load_checkpoint(cfg.TRAIN.CHECKPOINT_FILE_PATH, model)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = (
loader.construct_loader(cfg, "train", is_precise_bn=True)
if cfg.BN.USE_PRECISE_STATS
else None
)
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
# set up writer for logging to Tensorboard format.
if cfg.TENSORBOARD.ENABLE and du.is_master_proc(
cfg.NUM_GPUS * cfg.NUM_SHARDS
):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
# Perform the training loop.
logger.info("Start epoch: {}".format(start_epoch + 1))
for cur_epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):
if cfg.MULTIGRID.LONG_CYCLE:
cfg, changed = multigrid.update_long_cycle(cfg, cur_epoch)
if changed:
(
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
) = build_trainer(cfg)
# Load checkpoint.
if cu.has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = cu.get_last_checkpoint(cfg.OUTPUT_DIR)
assert "{:05d}.pyth".format(cur_epoch) in last_checkpoint
else:
last_checkpoint = cfg.TRAIN.CHECKPOINT_FILE_PATH
logger.info("Load from {}".format(last_checkpoint))
cu.load_checkpoint(
last_checkpoint, model, cfg.NUM_GPUS > 1, optimizer
)
# Shuffle the dataset.
loader.shuffle_dataset(train_loader, cur_epoch)
# Train for one epoch.
train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, writer
)
is_checkp_epoch = cu.is_checkpoint_epoch(
cfg,
cur_epoch,
None if multigrid is None else multigrid.schedule,
)
is_eval_epoch = misc.is_eval_epoch(
cfg, cur_epoch, None if multigrid is None else multigrid.schedule
)
# Compute precise BN stats.
if (
(is_checkp_epoch or is_eval_epoch)
and cfg.BN.USE_PRECISE_STATS
and len(get_bn_modules(model)) > 0
):
calculate_and_update_precise_bn(
precise_bn_loader,
model,
min(cfg.BN.NUM_BATCHES_PRECISE, len(precise_bn_loader)),
cfg.NUM_GPUS > 0,
)
_ = misc.aggregate_sub_bn_stats(model)
# Save a checkpoint.
if is_checkp_epoch:
cu.save_checkpoint(cfg.OUTPUT_DIR, model, optimizer, cur_epoch, cfg)
# Evaluate the model on validation set.
if is_eval_epoch:
eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer)
if writer is not None:
writer.close() |
Run model visualization (weights, activations and model inputs) and visualize
them on Tensorboard.
Args:
vis_loader (loader): video visualization loader.
model (model): the video model to visualize.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log. | def run_visualization(vis_loader, model, cfg, writer=None):
"""
Run model visualization (weights, activations and model inputs) and visualize
them on Tensorboard.
Args:
vis_loader (loader): video visualization loader.
model (model): the video model to visualize.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
n_devices = cfg.NUM_GPUS * cfg.NUM_SHARDS
prefix = "module/" if n_devices > 1 else ""
# Get a list of selected layer names and indexing.
layer_ls, indexing_dict = process_layer_index_data(
cfg.TENSORBOARD.MODEL_VIS.LAYER_LIST, layer_name_prefix=prefix
)
logger.info("Start Model Visualization.")
# Register hooks for activations.
model_vis = GetWeightAndActivation(model, layer_ls)
if writer is not None and cfg.TENSORBOARD.MODEL_VIS.MODEL_WEIGHTS:
layer_weights = model_vis.get_weights()
writer.plot_weights_and_activations(
layer_weights, tag="Layer Weights/", heat_map=False
)
video_vis = VideoVisualizer(
cfg.MODEL.NUM_CLASSES,
cfg.TENSORBOARD.CLASS_NAMES_PATH,
cfg.TENSORBOARD.MODEL_VIS.TOPK_PREDS,
cfg.TENSORBOARD.MODEL_VIS.COLORMAP,
)
if n_devices > 1:
grad_cam_layer_ls = [
"module/" + layer
for layer in cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST
]
else:
grad_cam_layer_ls = cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST
if cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE:
gradcam = GradCAM(
model,
target_layers=grad_cam_layer_ls,
data_mean=cfg.DATA.MEAN,
data_std=cfg.DATA.STD,
colormap=cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.COLORMAP,
)
logger.info("Finish drawing weights.")
global_idx = -1
for inputs, labels, _, meta in tqdm.tqdm(vis_loader):
if cfg.NUM_GPUS:
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
if cfg.DETECTION.ENABLE:
activations, preds = model_vis.get_activations(
inputs, meta["boxes"]
)
else:
activations, preds = model_vis.get_activations(inputs)
if cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE:
if cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.USE_TRUE_LABEL:
inputs, preds = gradcam(inputs, labels=labels)
else:
inputs, preds = gradcam(inputs)
if cfg.NUM_GPUS:
inputs = du.all_gather_unaligned(inputs)
activations = du.all_gather_unaligned(activations)
preds = du.all_gather_unaligned(preds)
if isinstance(inputs[0], list):
for i in range(len(inputs)):
for j in range(len(inputs[0])):
inputs[i][j] = inputs[i][j].cpu()
else:
inputs = [inp.cpu() for inp in inputs]
preds = [pred.cpu() for pred in preds]
else:
inputs, activations, preds = [inputs], [activations], [preds]
boxes = [None] * max(n_devices, 1)
if cfg.DETECTION.ENABLE and cfg.NUM_GPUS:
boxes = du.all_gather_unaligned(meta["boxes"])
boxes = [box.cpu() for box in boxes]
if writer is not None:
total_vids = 0
for i in range(max(n_devices, 1)):
cur_input = inputs[i]
cur_activations = activations[i]
cur_batch_size = cur_input[0].shape[0]
cur_preds = preds[i]
cur_boxes = boxes[i]
for cur_batch_idx in range(cur_batch_size):
global_idx += 1
total_vids += 1
if (
cfg.TENSORBOARD.MODEL_VIS.INPUT_VIDEO
or cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE
):
for path_idx, input_pathway in enumerate(cur_input):
if cfg.TEST.DATASET == "ava" and cfg.AVA.BGR:
video = input_pathway[
cur_batch_idx, [2, 1, 0], ...
]
else:
video = input_pathway[cur_batch_idx]
if not cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE:
# Permute to (T, H, W, C) from (C, T, H, W).
video = video.permute(1, 2, 3, 0)
video = data_utils.revert_tensor_normalize(
video, cfg.DATA.MEAN, cfg.DATA.STD
)
else:
# Permute from (T, C, H, W) to (T, H, W, C)
video = video.permute(0, 2, 3, 1)
bboxes = (
None if cur_boxes is None else cur_boxes[:, 1:]
)
cur_prediction = (
cur_preds
if cfg.DETECTION.ENABLE
else cur_preds[cur_batch_idx]
)
video = video_vis.draw_clip(
video, cur_prediction, bboxes=bboxes
)
video = (
torch.from_numpy(np.array(video))
.permute(0, 3, 1, 2)
.unsqueeze(0)
)
writer.add_video(
video,
tag="Input {}/Pathway {}".format(
global_idx, path_idx + 1
),
)
if cfg.TENSORBOARD.MODEL_VIS.ACTIVATIONS:
writer.plot_weights_and_activations(
cur_activations,
tag="Input {}/Activations: ".format(global_idx),
batch_idx=cur_batch_idx,
indexing_dict=indexing_dict,
) |
Visualize video inputs with wrong predictions on Tensorboard.
Args:
vis_loader (loader): video visualization loader.
model (model): the video model to visualize.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py | def perform_wrong_prediction_vis(vis_loader, model, cfg):
"""
Visualize video inputs with wrong predictions on Tensorboard.
Args:
vis_loader (loader): video visualization loader.
model (model): the video model to visualize.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
wrong_prediction_visualizer = WrongPredictionVis(cfg=cfg)
for batch_idx, (inputs, labels, _, _) in tqdm.tqdm(enumerate(vis_loader)):
if cfg.NUM_GPUS:
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
# Some model modify the original input.
inputs_clone = [inp.clone() for inp in inputs]
preds = model(inputs)
if cfg.NUM_GPUS > 1:
preds, labels = du.all_gather([preds, labels])
if isinstance(inputs_clone, (list,)):
inputs_clone = du.all_gather(inputs_clone)
else:
inputs_clone = du.all_gather([inputs_clone])[0]
if cfg.NUM_GPUS:
# Transfer the data to the current CPU device.
labels = labels.cpu()
preds = preds.cpu()
if isinstance(inputs_clone, (list,)):
for i in range(len(inputs_clone)):
inputs_clone[i] = inputs_clone[i].cpu()
else:
inputs_clone = inputs_clone.cpu()
# If using CPU (NUM_GPUS = 0), 1 represent 1 CPU.
n_devices = max(cfg.NUM_GPUS, 1)
for device_idx in range(1, n_devices + 1):
wrong_prediction_visualizer.visualize_vid(
video_input=inputs_clone,
labels=labels,
preds=preds.detach().clone(),
batch_idx=device_idx * batch_idx,
)
logger.info(
"Class indices with wrong predictions: {}".format(
sorted(wrong_prediction_visualizer.wrong_class_prediction)
)
)
wrong_prediction_visualizer.clean() |
Perform layer weights and activations visualization on the model.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py | def visualize(cfg):
"""
Perform layer weights and activations visualization on the model.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
if cfg.TENSORBOARD.ENABLE and (
cfg.TENSORBOARD.MODEL_VIS.ENABLE
or cfg.TENSORBOARD.WRONG_PRED_VIS.ENABLE
):
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Print config.
logger.info("Model Visualization with config:")
logger.info(cfg)
# Build the video model and print model statistics.
model = build_model(cfg)
model.eval()
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=False)
cu.load_test_checkpoint(cfg, model)
# Create video testing loaders.
vis_loader = loader.construct_loader(cfg, "test")
if cfg.DETECTION.ENABLE:
assert cfg.NUM_GPUS == cfg.TEST.BATCH_SIZE or cfg.NUM_GPUS == 0
# Set up writer for logging to Tensorboard format.
if du.is_master_proc(cfg.NUM_GPUS * cfg.NUM_SHARDS):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
if cfg.TENSORBOARD.PREDICTIONS_PATH != "":
assert not cfg.DETECTION.ENABLE, "Detection is not supported."
logger.info(
"Visualizing class-level performance from saved results..."
)
if writer is not None:
with PathManager.open(
cfg.TENSORBOARD.PREDICTIONS_PATH, "rb"
) as f:
preds, labels = pickle.load(f, encoding="latin1")
writer.plot_eval(preds, labels)
if cfg.TENSORBOARD.MODEL_VIS.ENABLE:
if cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE:
assert (
not cfg.DETECTION.ENABLE
), "Detection task is currently not supported for Grad-CAM visualization."
if cfg.MODEL.ARCH in cfg.MODEL.SINGLE_PATHWAY_ARCH:
assert (
len(cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST) == 1
), "The number of chosen CNN layers must be equal to the number of pathway(s), given {} layer(s).".format(
len(cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST)
)
elif cfg.MODEL.ARCH in cfg.MODEL.MULTI_PATHWAY_ARCH:
assert (
len(cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST) == 2
), "The number of chosen CNN layers must be equal to the number of pathway(s), given {} layer(s).".format(
len(cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST)
)
else:
raise NotImplementedError(
"Model arch {} is not in {}".format(
cfg.MODEL.ARCH,
cfg.MODEL.SINGLE_PATHWAY_ARCH
+ cfg.MODEL.MULTI_PATHWAY_ARCH,
)
)
logger.info(
"Visualize model analysis for {} iterations".format(
len(vis_loader)
)
)
# Run visualization on the model
run_visualization(vis_loader, model, cfg, writer)
if cfg.TENSORBOARD.WRONG_PRED_VIS.ENABLE:
logger.info(
"Visualize Wrong Predictions for {} iterations".format(
len(vis_loader)
)
)
perform_wrong_prediction_vis(vis_loader, model, cfg)
if writer is not None:
writer.close() |
Get the command-line arguments | def get_args():
"""Get the command-line arguments"""
parser = argparse.ArgumentParser(description='Say hello')
parser.add_argument('-n', '--name', default='World', help='Name to greet')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
print('Hello, ' + args.name + '!') |
exists | def test_exists():
"""exists"""
assert os.path.isfile(prg) |
Runs using python3 | def test_runnable():
"""Runs using python3"""
out = getoutput(f'python3 {prg}')
assert out.strip() == 'Hello, World!' |
Says 'Hello, World!' by default | def test_executable():
"""Says 'Hello, World!' by default"""
out = getoutput(prg)
assert out.strip() == 'Hello, World!' |
usage | def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{prg} {flag}')
assert rv == 0
assert out.lower().startswith('usage') |
test for input | def test_input():
"""test for input"""
for val in ['Universe', 'Multiverse']:
for option in ['-n', '--name']:
rv, out = getstatusoutput(f'{prg} {option} {val}')
assert rv == 0
assert out.strip() == f'Hello, {val}!' |
Get command-line arguments | def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="Crow's Nest -- choose the correct article",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('word', metavar='word', help='A word')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
word = args.word
article = 'an' if word[0].lower() in 'aeiou' else 'a'
print(f'Ahoy, Captain, {article} {word} off the larboard bow!') |
exists | def test_exists():
"""exists"""
assert os.path.isfile(prg) |
usage | def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{prg} {flag}')
assert rv == 0
assert out.lower().startswith('usage') |
brigantine -> a brigantine | def test_consonant():
"""brigantine -> a brigantine"""
for word in consonant_words:
out = getoutput(f'{prg} {word}')
assert out.strip() == template.format('a', word) |
brigantine -> a Brigatine | def test_consonant_upper():
"""brigantine -> a Brigatine"""
for word in consonant_words:
out = getoutput(f'{prg} {word.title()}')
assert out.strip() == template.format('a', word.title()) |
octopus -> an octopus | def test_vowel():
"""octopus -> an octopus"""
for word in vowel_words:
out = getoutput(f'{prg} {word}')
assert out.strip() == template.format('an', word) |
octopus -> an Octopus | def test_vowel_upper():
"""octopus -> an Octopus"""
for word in vowel_words:
out = getoutput(f'{prg} {word.upper()}')
assert out.strip() == template.format('an', word.upper()) |
Get command-line arguments | def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Picnic game',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('item',
metavar='str',
nargs='+',
help='Item(s) to bring')
parser.add_argument('-s',
'--sorted',
action='store_true',
help='Sort the items')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
items = args.item
num = len(items)
if args.sorted:
items.sort()
bringing = ''
if num == 1:
bringing = items[0]
elif num == 2:
bringing = ' and '.join(items)
else:
items[-1] = 'and ' + items[-1]
bringing = ', '.join(items)
print('You are bringing {}.'.format(bringing)) |
exists | def test_exists():
"""exists"""
assert os.path.isfile(prg) |
usage | def test_usage():
"""usage"""
for flag in ['', '-h', '--help']:
out = getoutput(f'{prg} {flag}')
assert out.lower().startswith('usage') |
one item | def test_one():
"""one item"""
out = getoutput(f'{prg} chips')
assert out.strip() == 'You are bringing chips.' |
two items | def test_two():
"""two items"""
out = getoutput(f'{prg} soda "french fries"')
assert out.strip() == 'You are bringing soda and french fries.' |
more than two items | def test_more_than_two():
"""more than two items"""
arg = '"potato chips" coleslaw cupcakes "French silk pie"'
out = getoutput(f'{prg} {arg}')
expected = ('You are bringing potato chips, coleslaw, '
'cupcakes, and French silk pie.')
assert out.strip() == expected |
two items sorted output | def test_two_sorted():
"""two items sorted output"""
out = getoutput(f'{prg} -s soda candy')
assert out.strip() == 'You are bringing candy and soda.' |
more than two items sorted output | def test_more_than_two_sorted():
"""more than two items sorted output"""
arg = 'bananas apples dates cherries'
out = getoutput(f'{prg} {arg} --sorted')
expected = ('You are bringing apples, bananas, cherries, and dates.')
assert out.strip() == expected |
Get command-line arguments | def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Jump the Five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text', metavar='str', help='Input text')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
jumper = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0',
'6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
for char in args.text:
print(jumper.get(char, char), end='')
print() |
Get command-line arguments | def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Jump the Five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text', metavar='str', help='Input text')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
jumper = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0',
'6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
# Method 2: for loop to build new string
new_text = ''
for char in args.text:
new_text += jumper.get(char, char)
print(new_text) |
Get command-line arguments | def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Jump the Five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text', metavar='str', help='Input text')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
jumper = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0',
'6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
# Method 3: for loop to build new list
new_text = []
for char in args.text:
new_text.append(jumper.get(char, char))
print(''.join(new_text)) |
Get command-line arguments | def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Jump the Five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text', metavar='str', help='Input text')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
jumper = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0',
'6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
# Method 4: list comprehension
print(''.join([jumper.get(char, char) for char in args.text])) |
Get command-line arguments | def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Jump the Five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text', metavar='str', help='Input text')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
jumper = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0',
'6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
# Method 5: str.translate
print(args.text.translate(str.maketrans(jumper))) |
exists | def test_exists():
"""exists"""
assert os.path.isfile(prg) |
usage | def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{prg} {flag}')
assert rv == 0
assert out.lower().startswith('usage') |
test | def test_01():
"""test"""
rv, out = getstatusoutput(f'{prg} 123-456-7890')
assert rv == 0
assert out == '987-604-3215' |
test | def test_02():
"""test"""
rv, out = getstatusoutput(f'{prg} "That number to call is 098-765-4321."')
assert rv == 0
assert out.rstrip() == 'That number to call is 512-340-6789.' |
get command-line arguments | def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Howler (upper-cases input)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text',
metavar='text',
type=str,
help='Input string or file')
parser.add_argument('-o',
'--outfile',
help='Output filename',
metavar='str',
type=str,
default='')
args = parser.parse_args()
if os.path.isfile(args.text):
args.text = open(args.text).read().rstrip()
return args |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
out_fh = open(args.outfile, 'wt') if args.outfile else sys.stdout
out_fh.write(args.text.upper() + '\n')
out_fh.close() |
get command-line arguments | def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Howler (upper-cases input)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text',
metavar='text',
type=str,
help='Input string or file')
parser.add_argument('-o',
'--outfile',
help='Output filename',
metavar='str',
type=str,
default='')
args = parser.parse_args()
if os.path.isfile(args.text):
args.text = open(args.text)
else:
args.text = io.StringIO(args.text + '\n')
return args |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
out_fh = open(args.outfile, 'wt') if args.outfile else sys.stdout
for line in args.text:
out_fh.write(line.upper())
out_fh.close() |
generate a random string | def random_string():
"""generate a random string"""
k = random.randint(5, 10)
return ''.join(random.choices(string.ascii_letters + string.digits, k=k)) |
Either -o or --outfile | def out_flag():
"""Either -o or --outfile"""
return '-o' if random.randint(0, 1) else '--outfile' |
exists | def test_exists():
"""exists"""
assert os.path.isfile(prg) |
usage | def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{prg} {flag}')
assert rv == 0
assert re.match("usage", out, re.IGNORECASE) |
Test STDIN/STDOUT | def test_text_stdout():
"""Test STDIN/STDOUT"""
out = getoutput(f'{prg} "foo bar baz"')
assert out.strip() == 'FOO BAR BAZ' |
Test STDIN/outfile | def test_text_outfile():
"""Test STDIN/outfile"""
out_file = random_string()
if os.path.isfile(out_file):
os.remove(out_file)
try:
out = getoutput(f'{prg} {out_flag()} {out_file} "foo bar baz"')
assert out.strip() == ''
assert os.path.isfile(out_file)
text = open(out_file).read().rstrip()
assert text == 'FOO BAR BAZ'
finally:
if os.path.isfile(out_file):
os.remove(out_file) |
Test file in/out | def test_file():
"""Test file in/out"""
for expected_file in os.listdir('test-outs'):
try:
out_file = random_string()
if os.path.isfile(out_file):
os.remove(out_file)
basename = os.path.basename(expected_file)
in_file = os.path.join('../inputs', basename)
out = getoutput(f'{prg} {out_flag()} {out_file} {in_file}')
assert out.strip() == ''
produced = open(out_file).read().rstrip()
expected = open(os.path.join('test-outs',
expected_file)).read().strip()
assert expected == produced
finally:
if os.path.isfile(out_file):
os.remove(out_file) |
Get command-line arguments | def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Emulate wc (word count)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
metavar='FILE',
nargs='*',
default=[sys.stdin],
type=argparse.FileType('rt'),
help='Input file(s)')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
total_lines, total_bytes, total_words = 0, 0, 0
for fh in args.file:
num_lines, num_words, num_bytes = 0, 0, 0
for line in fh:
num_lines += 1
num_bytes += len(line)
num_words += len(line.split())
total_lines += num_lines
total_bytes += num_bytes
total_words += num_words
print(f'{num_lines:8}{num_words:8}{num_bytes:8} {fh.name}')
if len(args.file) > 1:
print(f'{total_lines:8}{total_words:8}{total_bytes:8} total') |
exists | def test_exists():
"""exists"""
assert os.path.isfile(prg) |
usage | def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{prg} {flag}')
assert rv == 0
assert re.match("usage", out, re.IGNORECASE) |
generate a random string | def random_string():
"""generate a random string"""
k = random.randint(5, 10)
return ''.join(random.choices(string.ascii_letters + string.digits, k=k)) |
bad_file | def test_bad_file():
"""bad_file"""
bad = random_string()
rv, out = getstatusoutput(f'{prg} {bad}')
assert rv != 0
assert re.search(f"No such file or directory: '{bad}'", out) |
Test on empty | def test_empty():
"""Test on empty"""
rv, out = getstatusoutput(f'{prg} {empty}')
assert rv == 0
assert out.rstrip() == ' 0 0 0 ./inputs/empty.txt' |
Test on one | def test_one():
"""Test on one"""
rv, out = getstatusoutput(f'{prg} {one_line}')
assert rv == 0
assert out.rstrip() == ' 1 1 2 ./inputs/one.txt' |
Test on two | def test_two():
"""Test on two"""
rv, out = getstatusoutput(f'{prg} {two_lines}')
assert rv == 0
assert out.rstrip() == ' 2 2 4 ./inputs/two.txt' |
Test on fox | def test_fox():
"""Test on fox"""
rv, out = getstatusoutput(f'{prg} {fox}')
assert rv == 0
assert out.rstrip() == ' 1 9 45 ../inputs/fox.txt' |
Test on more than one file | def test_more():
"""Test on more than one file"""
rv, out = getstatusoutput(f'{prg} {fox} {sonnet}')
expected = (' 1 9 45 ../inputs/fox.txt\n'
' 17 118 661 ../inputs/sonnet-29.txt\n'
' 18 127 706 total')
assert rv == 0
assert out.rstrip() == expected |
Test on stdin | def test_stdin():
"""Test on stdin"""
rv, out = getstatusoutput(f'{prg} < {fox}')
assert rv == 0
assert out.rstrip() == ' 1 9 45 <stdin>' |
get command-line arguments | def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Interactive Gashlycrumb',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f',
'--file',
help='Input file',
metavar='str',
type=argparse.FileType('r'),
default='gashlycrumb.txt')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
lookup = {line[0]: line.rstrip() for line in args.file}
while True:
letter = input('Please provide a letter [! to quit]: ')
if letter == '!':
print('Bye')
break
print(lookup.get(letter.upper(), f'I do not know "{letter}".')) |
get command-line arguments | def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Gashlycrumb',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('letter',
help='Letter(s)',
metavar='letter',
nargs='+',
type=str)
parser.add_argument('-f',
'--file',
help='Input file',
metavar='FILE',
type=argparse.FileType('r'),
default='gashlycrumb.txt')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
lookup = {}
for line in args.file:
lookup[line[0].upper()] = line.rstrip()
for letter in args.letter:
if letter.upper() in lookup:
print(lookup[letter.upper()])
else:
print(f'I do not know "{letter}".') |
get command-line arguments | def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Gashlycrumb',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('letter',
help='Letter(s)',
metavar='letter',
nargs='+',
type=str)
parser.add_argument('-f',
'--file',
help='Input file',
metavar='FILE',
type=argparse.FileType('r'),
default='gashlycrumb.txt')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
lookup = {line[0].upper(): line.rstrip() for line in args.file}
for letter in args.letter:
if letter.upper() in lookup:
print(lookup[letter.upper()])
else:
print(f'I do not know "{letter}".') |
get command-line arguments | def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Gashlycrumb',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('letter',
help='Letter(s)',
metavar='letter',
nargs='+',
type=str)
parser.add_argument('-f',
'--file',
help='Input file',
metavar='FILE',
type=argparse.FileType('r'),
default='gashlycrumb.txt')
return parser.parse_args() |
Make a jazz noise here | def main():
"""Make a jazz noise here"""
args = get_args()
lookup = {line[0].upper(): line.rstrip() for line in args.file}
for letter in args.letter:
print(lookup.get(letter.upper(), f'I do not know "{letter}".')) |
Either -f or --file | def file_flag():
"""Either -f or --file"""
return '-f' if random.randint(0, 1) else '--file' |
exists | def test_exists():
"""exists"""
assert os.path.isfile(prg) |
usage | def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{prg} {flag}')
assert rv == 0
assert re.match("usage", out, re.IGNORECASE) |
Test for bad --file | def test_bad_file():
"""Test for bad --file"""
bad = random_string()
letter = random.choice(string.ascii_lowercase)
rv, out = getstatusoutput(f'{prg} {letter} -f {bad}')
assert rv != 0
expected = f"No such file or directory: '{bad}'"
assert re.search(expected, out) |
Test for 'a' | def test_a():
"""Test for 'a'"""
rv, out = getstatusoutput(f'{prg} a')
assert rv == 0
expected = 'A is for Amy who fell down the stairs.'
assert out.strip() == expected |
Test for 'b c' | def test_b_c():
"""Test for 'b c'"""
rv, out = getstatusoutput(f'{prg} b c')
assert rv == 0
expected = ('B is for Basil assaulted by bears.\n'
'C is for Clara who wasted away.')
assert out.strip() == expected |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.