response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
resizing mode:
- default: random resized crop with scale factor(0.7, 1.0) and size 224;
- cen.crop: take the center crop of 224;
- res.|cen.crop: resize the image to 256 and take the center crop of size 224;
- res: resize the image to 224;
- res2x: resize the image to 448;
- res.|crop: resize the image to 256 and take a random crop of size 224;
- res.sma|crop: resize the image keeping its aspect ratio such that the
smaller side is 256, then take a random crop of size 224;
– inc.crop: “inception crop” from (Szegedy et al., 2015);
– cif.crop: resize the image to 224, zero-pad it by 28 on each side, then take a random crop of size 224. | def get_train_transform(resizing='default', random_horizontal_flip=True, random_color_jitter=True,
random_gray_scale=True):
"""
resizing mode:
- default: random resized crop with scale factor(0.7, 1.0) and size 224;
- cen.crop: take the center crop of 224;
- res.|cen.crop: resize the image to 256 and take the center crop of size 224;
- res: resize the image to 224;
- res2x: resize the image to 448;
- res.|crop: resize the image to 256 and take a random crop of size 224;
- res.sma|crop: resize the image keeping its aspect ratio such that the
smaller side is 256, then take a random crop of size 224;
– inc.crop: “inception crop” from (Szegedy et al., 2015);
– cif.crop: resize the image to 224, zero-pad it by 28 on each side, then take a random crop of size 224.
"""
if resizing == 'default':
transform = T.RandomResizedCrop(224, scale=(0.7, 1.0))
elif resizing == 'cen.crop':
transform = T.CenterCrop(224)
elif resizing == 'res.|cen.crop':
transform = T.Compose([
ResizeImage(256),
T.CenterCrop(224)
])
elif resizing == 'res':
transform = ResizeImage(224)
elif resizing == 'res2x':
transform = ResizeImage(448)
elif resizing == 'res.|crop':
transform = T.Compose([
T.Resize((256, 256)),
T.RandomCrop(224)
])
elif resizing == "res.sma|crop":
transform = T.Compose([
T.Resize(256),
T.RandomCrop(224)
])
elif resizing == 'inc.crop':
transform = T.RandomResizedCrop(224)
elif resizing == 'cif.crop':
transform = T.Compose([
T.Resize((224, 224)),
T.Pad(28),
T.RandomCrop(224),
])
else:
raise NotImplementedError(resizing)
transforms = [transform]
if random_horizontal_flip:
transforms.append(T.RandomHorizontalFlip())
if random_color_jitter:
transforms.append(T.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3))
if random_gray_scale:
transforms.append(T.RandomGrayscale())
transforms.extend([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return T.Compose(transforms) |
resizing mode:
- default: resize the image to 224;
- res2x: resize the image to 448;
- res.|cen.crop: resize the image to 256 and take the center crop of size 224; | def get_val_transform(resizing='default'):
"""
resizing mode:
- default: resize the image to 224;
- res2x: resize the image to 448;
- res.|cen.crop: resize the image to 256 and take the center crop of size 224;
"""
if resizing == 'default':
transform = ResizeImage(224)
elif resizing == 'res2x':
transform = ResizeImage(448)
elif resizing == 'res.|cen.crop':
transform = T.Compose([
ResizeImage(256),
T.CenterCrop(224),
])
else:
raise NotImplementedError(resizing)
return T.Compose([
transform,
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]) |
Fetch data from `data_loader`, and then use `feature_extractor` to collect features. This function is
specific for domain generalization because each element in data_loader is a tuple
(images, labels, domain_labels).
Args:
data_loader (torch.utils.data.DataLoader): Data loader.
feature_extractor (torch.nn.Module): A feature extractor.
device (torch.device)
max_num_features (int): The max number of features to return
Returns:
Features in shape (min(len(data_loader), max_num_features * mini-batch size), :math:`|\mathcal{F}|`). | def collect_feature(data_loader, feature_extractor: nn.Module, device: torch.device,
max_num_features=None) -> torch.Tensor:
"""
Fetch data from `data_loader`, and then use `feature_extractor` to collect features. This function is
specific for domain generalization because each element in data_loader is a tuple
(images, labels, domain_labels).
Args:
data_loader (torch.utils.data.DataLoader): Data loader.
feature_extractor (torch.nn.Module): A feature extractor.
device (torch.device)
max_num_features (int): The max number of features to return
Returns:
Features in shape (min(len(data_loader), max_num_features * mini-batch size), :math:`|\mathcal{F}|`).
"""
feature_extractor.eval()
all_features = []
with torch.no_grad():
for i, (images, target, domain_labels) in enumerate(tqdm.tqdm(data_loader)):
if max_num_features is not None and i >= max_num_features:
break
images = images.to(device)
feature = feature_extractor(images).cpu()
all_features.append(feature)
return torch.cat(all_features, dim=0) |
resizing mode:
- default: resize the image to (height, width), zero-pad it by 10 on each size, the take a random crop of
(height, width)
- res: resize the image to(height, width) | def get_train_transform(height, width, resizing='default', random_horizontal_flip=True, random_color_jitter=False,
random_gray_scale=False):
"""
resizing mode:
- default: resize the image to (height, width), zero-pad it by 10 on each size, the take a random crop of
(height, width)
- res: resize the image to(height, width)
"""
if resizing == 'default':
transform = T.Compose([
T.Resize((height, width), interpolation=3),
T.Pad(10),
T.RandomCrop((height, width))
])
elif resizing == 'res':
transform = T.Resize((height, width), interpolation=3)
else:
raise NotImplementedError(resizing)
transforms = [transform]
if random_horizontal_flip:
transforms.append(T.RandomHorizontalFlip())
if random_color_jitter:
transforms.append(T.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3))
if random_gray_scale:
transforms.append(T.RandomGrayscale())
transforms.extend([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return T.Compose(transforms) |
Visualize features from different domains using t-SNE. As we can have very large number of samples in each
domain, only `n_data_points_per_domain` number of samples are randomly selected in each domain. | def visualize_tsne(source_loader, target_loader, model, filename, device, n_data_points_per_domain=3000):
"""Visualize features from different domains using t-SNE. As we can have very large number of samples in each
domain, only `n_data_points_per_domain` number of samples are randomly selected in each domain.
"""
source_feature_dict = extract_reid_feature(source_loader, model, device, normalize=True)
source_feature = torch.stack(list(source_feature_dict.values())).cpu()
source_feature = source_feature[torch.randperm(len(source_feature))]
source_feature = source_feature[:n_data_points_per_domain]
target_feature_dict = extract_reid_feature(target_loader, model, device, normalize=True)
target_feature = torch.stack(list(target_feature_dict.values())).cpu()
target_feature = target_feature[torch.randperm(len(target_feature))]
target_feature = target_feature[:n_data_points_per_domain]
tsne.visualize(source_feature, target_feature, filename, source_color='cornflowerblue', target_color='darkorange')
print('T-SNE process is done, figure is saved to {}'.format(filename)) |
A forward forcasting on full dataset
:params score_loader: the dataloader for scoring transferability
:params model: the model for scoring transferability
:params layer: before which layer features are extracted, for registering hooks
returns
features: extracted features of model
prediction: probability outputs of model
targets: ground-truth labels of dataset | def forwarding_dataset(score_loader, model, layer, device):
"""
A forward forcasting on full dataset
:params score_loader: the dataloader for scoring transferability
:params model: the model for scoring transferability
:params layer: before which layer features are extracted, for registering hooks
returns
features: extracted features of model
prediction: probability outputs of model
targets: ground-truth labels of dataset
"""
features = []
outputs = []
targets = []
def hook_fn_forward(module, input, output):
features.append(input[0].detach().cpu())
outputs.append(output.detach().cpu())
forward_hook = layer.register_forward_hook(hook_fn_forward)
model.eval()
with torch.no_grad():
for _, (data, target) in enumerate(score_loader):
targets.append(target)
data = data.to(device)
_ = model(data)
forward_hook.remove()
features = torch.cat([x for x in features]).numpy()
outputs = torch.cat([x for x in outputs])
predictions = F.softmax(outputs, dim=-1).numpy()
targets = torch.cat([x for x in targets]).numpy()
return features, predictions, targets |
When sample_rate < 100, e.g. sample_rate = 50, use 50% data to train the model.
Otherwise,
if num_samples_per_classes is not None, e.g. 5, then sample 5 images for each class, and use them to train the model;
otherwise, keep all the data. | def get_dataset(dataset_name, root, transform, sample_rate=100, num_samples_per_classes=None, split='train'):
"""
When sample_rate < 100, e.g. sample_rate = 50, use 50% data to train the model.
Otherwise,
if num_samples_per_classes is not None, e.g. 5, then sample 5 images for each class, and use them to train the model;
otherwise, keep all the data.
"""
dataset = datasets.__dict__[dataset_name]
if sample_rate < 100:
score_dataset = dataset(root=root, split=split, sample_rate=sample_rate, download=True, transform=transform)
num_classes = len(score_dataset.classes)
else:
score_dataset = dataset(root=root, split=split, download=True, transform=transform)
num_classes = len(score_dataset.classes)
if num_samples_per_classes is not None:
samples = list(range(len(score_dataset)))
random.shuffle(samples)
samples_len = min(num_samples_per_classes * num_classes, len(score_dataset))
print("Origin dataset:", len(score_dataset), "Sampled dataset:", samples_len, "Ratio:",
float(samples_len) / len(score_dataset))
dataset = Subset(score_dataset, samples[:samples_len])
return score_dataset, num_classes |
resizing mode:
- default: resize the image to 256 and take the center crop of size 224;
– res.: resize the image to 224
– res.|crop: resize the image such that the smaller side is of size 256 and
then take a central crop of size 224. | def get_transform(resizing='res.'):
"""
resizing mode:
- default: resize the image to 256 and take the center crop of size 224;
– res.: resize the image to 224
– res.|crop: resize the image such that the smaller side is of size 256 and
then take a central crop of size 224.
"""
if resizing == 'default':
transform = T.Compose([
T.Resize(256),
T.CenterCrop(224),
])
elif resizing == 'res.':
transform = T.Resize((224, 224))
elif resizing == 'res.299':
transform = T.Resize((299, 299))
elif resizing == 'res.|crop':
transform = T.Compose([
T.Resize((256, 256)),
T.CenterCrop(224),
])
else:
raise NotImplementedError(resizing)
return T.Compose([
transform,
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]) |
Compute outputs of the teacher network. Here, we use weak data augmentation and do not introduce an additional
dropout layer according to the Noisy Student paper `Self-Training With Noisy Student Improves ImageNet
Classification <https://openaccess.thecvf.com/content_CVPR_2020/papers/Xie_Self-Training_With_Noisy_Student_Improves
_ImageNet_Classification_CVPR_2020_paper.pdf>`_. | def calc_teacher_output(classifier_teacher: ImageClassifier, weak_augmented_unlabeled_dataset):
"""Compute outputs of the teacher network. Here, we use weak data augmentation and do not introduce an additional
dropout layer according to the Noisy Student paper `Self-Training With Noisy Student Improves ImageNet
Classification <https://openaccess.thecvf.com/content_CVPR_2020/papers/Xie_Self-Training_With_Noisy_Student_Improves
_ImageNet_Classification_CVPR_2020_paper.pdf>`_.
"""
data_loader = DataLoader(weak_augmented_unlabeled_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, drop_last=False)
batch_time = AverageMeter('Time', ':6.3f')
progress = ProgressMeter(
len(data_loader),
[batch_time],
prefix='Computing teacher output: ')
teacher_output = []
with torch.no_grad():
end = time.time()
for i, (images, _) in enumerate(data_loader):
images = images.to(device)
output = classifier_teacher(images)
teacher_output.append(output)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
teacher_output = torch.cat(teacher_output, dim=0)
return teacher_output |
Construct labeled and unlabeled subsets, where the labeled subset is class balanced. Note that the resulting
subsets are **deterministic** with the same random seed. | def x_u_split(num_samples_per_class, num_classes, labels, seed):
"""
Construct labeled and unlabeled subsets, where the labeled subset is class balanced. Note that the resulting
subsets are **deterministic** with the same random seed.
"""
labels = np.array(labels)
assert num_samples_per_class * num_classes <= len(labels)
random_state = np.random.RandomState(seed)
# labeled subset
labeled_idxes = []
for i in range(num_classes):
ith_class_idxes = np.where(labels == i)[0]
ith_class_idxes = random_state.choice(ith_class_idxes, num_samples_per_class, False)
labeled_idxes.extend(ith_class_idxes)
# unlabeled subset
unlabeled_idxes = [i for i in range(len(labels)) if i not in labeled_idxes]
return labeled_idxes, unlabeled_idxes |
Converts a dataset which returns (img, label) pairs into one that returns (index, img, label) triplets. | def convert_dataset(dataset):
"""
Converts a dataset which returns (img, label) pairs into one that returns (index, img, label) triplets.
"""
class DatasetWrapper:
def __init__(self):
self.dataset = dataset
def __getitem__(self, index):
return index, self.dataset[index]
def __len__(self):
return len(self.dataset)
return DatasetWrapper() |
Cosine learning rate scheduler from `FixMatch: Simplifying Semi-Supervised Learning with
Consistency and Confidence (NIPS 2020) <https://arxiv.org/abs/2001.07685>`_.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
num_cycles (float): A scalar that controls the shape of cosine function. Default: 7/16.
num_warmup_steps (int): Number of iterations to warm up. Default: 0.
last_epoch (int): The index of last epoch. Default: -1. | def get_cosine_scheduler_with_warmup(optimizer, T_max, num_cycles=7. / 16., num_warmup_steps=0,
last_epoch=-1):
"""
Cosine learning rate scheduler from `FixMatch: Simplifying Semi-Supervised Learning with
Consistency and Confidence (NIPS 2020) <https://arxiv.org/abs/2001.07685>`_.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
num_cycles (float): A scalar that controls the shape of cosine function. Default: 7/16.
num_warmup_steps (int): Number of iterations to warm up. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
"""
def _lr_lambda(current_step):
if current_step < num_warmup_steps:
_lr = float(current_step) / float(max(1, num_warmup_steps))
else:
num_cos_steps = float(current_step - num_warmup_steps)
num_cos_steps = num_cos_steps / float(max(1, T_max - num_warmup_steps))
_lr = max(0.0, math.cos(math.pi * num_cycles * num_cos_steps))
return _lr
return LambdaLR(optimizer, _lr_lambda, last_epoch) |
When sample_rate < 100, e.g. sample_rate = 50, use 50% data to train the model.
Otherwise,
if num_samples_per_classes is not None, e.g. 5, then sample 5 images for each class, and use them to train the model;
otherwise, keep all the data. | def get_dataset(dataset_name, root, train_transform, val_transform, sample_rate=100, num_samples_per_classes=None):
"""
When sample_rate < 100, e.g. sample_rate = 50, use 50% data to train the model.
Otherwise,
if num_samples_per_classes is not None, e.g. 5, then sample 5 images for each class, and use them to train the model;
otherwise, keep all the data.
"""
dataset = datasets.__dict__[dataset_name]
if sample_rate < 100:
train_dataset = dataset(root=root, split='train', sample_rate=sample_rate, download=True, transform=train_transform)
test_dataset = dataset(root=root, split='test', sample_rate=100, download=True, transform=val_transform)
num_classes = train_dataset.num_classes
else:
train_dataset = dataset(root=root, split='train', download=True, transform=train_transform)
test_dataset = dataset(root=root, split='test', download=True, transform=val_transform)
num_classes = train_dataset.num_classes
if num_samples_per_classes is not None:
samples = list(range(len(train_dataset)))
random.shuffle(samples)
samples_len = min(num_samples_per_classes * num_classes, len(train_dataset))
print("Origin dataset:", len(train_dataset), "Sampled dataset:", samples_len, "Ratio:", float(samples_len) / len(train_dataset))
train_dataset = Subset(train_dataset, samples[:samples_len])
return train_dataset, test_dataset, num_classes |
resizing mode:
- default: take a random resized crop of size 224 with scale in [0.2, 1.];
- res: resize the image to 224;
- res.|crop: resize the image to 256 and take a random crop of size 224;
- res.sma|crop: resize the image keeping its aspect ratio such that the
smaller side is 256, then take a random crop of size 224;
– inc.crop: “inception crop” from (Szegedy et al., 2015);
– cif.crop: resize the image to 224, zero-pad it by 28 on each side, then take a random crop of size 224. | def get_train_transform(resizing='default', random_horizontal_flip=True, random_color_jitter=False):
"""
resizing mode:
- default: take a random resized crop of size 224 with scale in [0.2, 1.];
- res: resize the image to 224;
- res.|crop: resize the image to 256 and take a random crop of size 224;
- res.sma|crop: resize the image keeping its aspect ratio such that the
smaller side is 256, then take a random crop of size 224;
– inc.crop: “inception crop” from (Szegedy et al., 2015);
– cif.crop: resize the image to 224, zero-pad it by 28 on each side, then take a random crop of size 224.
"""
if resizing == 'default':
transform = T.RandomResizedCrop(224, scale=(0.2, 1.))
elif resizing == 'res.':
transform = T.Resize((224, 224))
elif resizing == 'res.|crop':
transform = T.Compose([
T.Resize((256, 256)),
T.RandomCrop(224)
])
elif resizing == "res.sma|crop":
transform = T.Compose([
T.Resize(256),
T.RandomCrop(224)
])
elif resizing == 'inc.crop':
transform = T.RandomResizedCrop(224)
elif resizing == 'cif.crop':
transform = T.Compose([
T.Resize((224, 224)),
T.Pad(28),
T.RandomCrop(224),
])
else:
raise NotImplementedError(resizing)
transforms = [transform]
if random_horizontal_flip:
transforms.append(T.RandomHorizontalFlip())
if random_color_jitter:
transforms.append(T.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5))
transforms.extend([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return T.Compose(transforms) |
resizing mode:
- default: resize the image to 256 and take the center crop of size 224;
– res.: resize the image to 224
– res.|crop: resize the image such that the smaller side is of size 256 and
then take a central crop of size 224. | def get_val_transform(resizing='default'):
"""
resizing mode:
- default: resize the image to 256 and take the center crop of size 224;
– res.: resize the image to 224
– res.|crop: resize the image such that the smaller side is of size 256 and
then take a central crop of size 224.
"""
if resizing == 'default':
transform = T.Compose([
T.Resize(256),
T.CenterCrop(224),
])
elif resizing == 'res.':
transform = T.Resize((224, 224))
elif resizing == 'res.|crop':
transform = T.Compose([
T.Resize((256, 256)),
T.CenterCrop(224),
])
else:
raise NotImplementedError(resizing)
return T.Compose([
transform,
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]) |
Args:
optimizer_name:
- SGD
- Adam
params: iterable of parameters to optimize or dicts defining parameter groups
lr: learning rate
weight_decay: weight decay
momentum: momentum factor for SGD | def get_optimizer(optimizer_name, params, lr, wd, momentum):
'''
Args:
optimizer_name:
- SGD
- Adam
params: iterable of parameters to optimize or dicts defining parameter groups
lr: learning rate
weight_decay: weight decay
momentum: momentum factor for SGD
'''
if optimizer_name == 'SGD':
optimizer = SGD(params=params, lr=lr, momentum=momentum, weight_decay=wd, nesterov=True)
elif optimizer_name == 'Adam':
optimizer = Adam(params=params, lr=lr, weight_decay=wd)
else:
raise NotImplementedError(optimizer_name)
return optimizer |
Args:
image (tensor): 3 x H x W
filename: filename of the saving image | def visualize(image, filename):
"""
Args:
image (tensor): 3 x H x W
filename: filename of the saving image
"""
image = image.detach().cpu()
image = Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(image)
image = image.numpy().transpose((1, 2, 0)) * 255
Image.fromarray(np.uint8(image)).save(filename) |
convert probabilistic prediction maps to weighted self-information maps
| def prob_2_entropy(prob):
""" convert probabilistic prediction maps to weighted self-information maps
"""
n, c, h, w = prob.size()
return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c) |
Update the `index_matrix` which convert `kernel_matrix` to loss.
If `index_matrix` is a tensor with shape (2 x batch_size, 2 x batch_size), then return `index_matrix`.
Else return a new tensor with shape (2 x batch_size, 2 x batch_size). | def _update_index_matrix(batch_size: int, index_matrix: Optional[torch.Tensor] = None,
linear: Optional[bool] = True) -> torch.Tensor:
r"""
Update the `index_matrix` which convert `kernel_matrix` to loss.
If `index_matrix` is a tensor with shape (2 x batch_size, 2 x batch_size), then return `index_matrix`.
Else return a new tensor with shape (2 x batch_size, 2 x batch_size).
"""
if index_matrix is None or index_matrix.size(0) != batch_size * 2:
index_matrix = torch.zeros(2 * batch_size, 2 * batch_size)
if linear:
for i in range(batch_size):
s1, s2 = i, (i + 1) % batch_size
t1, t2 = s1 + batch_size, s2 + batch_size
index_matrix[s1, s2] = 1. / float(batch_size)
index_matrix[t1, t2] = 1. / float(batch_size)
index_matrix[s1, t2] = -1. / float(batch_size)
index_matrix[s2, t1] = -1. / float(batch_size)
else:
for i in range(batch_size):
for j in range(batch_size):
if i != j:
index_matrix[i][j] = 1. / float(batch_size * (batch_size - 1))
index_matrix[i + batch_size][j + batch_size] = 1. / float(batch_size * (batch_size - 1))
for i in range(batch_size):
for j in range(batch_size):
index_matrix[i][j + batch_size] = -1. / float(batch_size * batch_size)
index_matrix[i + batch_size][j] = -1. / float(batch_size * batch_size)
return index_matrix |
The `Classifier Discrepancy` in
`Maximum Classifier Discrepancy for Unsupervised Domain Adaptation (CVPR 2018) <https://arxiv.org/abs/1712.02560>`_.
The classfier discrepancy between predictions :math:`p_1` and :math:`p_2` can be described as:
.. math::
d(p_1, p_2) = \dfrac{1}{K} \sum_{k=1}^K | p_{1k} - p_{2k} |,
where K is number of classes.
Args:
predictions1 (torch.Tensor): Classifier predictions :math:`p_1`. Expected to contain raw, normalized scores for each class
predictions2 (torch.Tensor): Classifier predictions :math:`p_2` | def classifier_discrepancy(predictions1: torch.Tensor, predictions2: torch.Tensor) -> torch.Tensor:
r"""The `Classifier Discrepancy` in
`Maximum Classifier Discrepancy for Unsupervised Domain Adaptation (CVPR 2018) <https://arxiv.org/abs/1712.02560>`_.
The classfier discrepancy between predictions :math:`p_1` and :math:`p_2` can be described as:
.. math::
d(p_1, p_2) = \dfrac{1}{K} \sum_{k=1}^K | p_{1k} - p_{2k} |,
where K is number of classes.
Args:
predictions1 (torch.Tensor): Classifier predictions :math:`p_1`. Expected to contain raw, normalized scores for each class
predictions2 (torch.Tensor): Classifier predictions :math:`p_2`
"""
return torch.mean(torch.abs(predictions1 - predictions2)) |
Entropy of N predictions :math:`(p_1, p_2, ..., p_N)`.
The definition is:
.. math::
d(p_1, p_2, ..., p_N) = -\dfrac{1}{K} \sum_{k=1}^K \log \left( \dfrac{1}{N} \sum_{i=1}^N p_{ik} \right)
where K is number of classes.
.. note::
This entropy function is specifically used in MCD and different from the usual :meth:`~tllib.modules.entropy.entropy` function.
Args:
predictions (torch.Tensor): Classifier predictions. Expected to contain raw, normalized scores for each class | def entropy(predictions: torch.Tensor) -> torch.Tensor:
r"""Entropy of N predictions :math:`(p_1, p_2, ..., p_N)`.
The definition is:
.. math::
d(p_1, p_2, ..., p_N) = -\dfrac{1}{K} \sum_{k=1}^K \log \left( \dfrac{1}{N} \sum_{i=1}^N p_{ik} \right)
where K is number of classes.
.. note::
This entropy function is specifically used in MCD and different from the usual :meth:`~tllib.modules.entropy.entropy` function.
Args:
predictions (torch.Tensor): Classifier predictions. Expected to contain raw, normalized scores for each class
"""
return -torch.mean(torch.log(torch.mean(predictions, 0) + 1e-6)) |
First shift, then calculate log, which can be described as:
.. math::
y = \max(\log(x+\text{offset}), 0)
Used to avoid the gradient explosion problem in log(x) function when x=0.
Args:
x (torch.Tensor): input tensor
offset (float, optional): offset size. Default: 1e-6
.. note::
Input tensor falls in [0., 1.] and the output tensor falls in [-log(offset), 0] | def shift_log(x: torch.Tensor, offset: Optional[float] = 1e-6) -> torch.Tensor:
r"""
First shift, then calculate log, which can be described as:
.. math::
y = \max(\log(x+\text{offset}), 0)
Used to avoid the gradient explosion problem in log(x) function when x=0.
Args:
x (torch.Tensor): input tensor
offset (float, optional): offset size. Default: 1e-6
.. note::
Input tensor falls in [0., 1.] and the output tensor falls in [-log(offset), 0]
"""
return torch.log(torch.clamp(x + offset, max=1.)) |
Load precomputed object feedbacks into the dataset.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
proposals_list (list[Proposal]): list of Proposal.
Returns:
list[dict]: the same format as dataset_dicts, but added feedback field. | def load_feedbacks_into_dataset(dataset_dicts, proposals_list: List[Proposal]):
"""
Load precomputed object feedbacks into the dataset.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
proposals_list (list[Proposal]): list of Proposal.
Returns:
list[dict]: the same format as dataset_dicts, but added feedback field.
"""
feedbacks = {}
for record in dataset_dicts:
image_id = str(record["image_id"])
feedbacks[image_id] = {
'pred_boxes': [],
'pred_classes': [],
}
for proposals in proposals_list:
image_id = str(proposals.image_id)
feedbacks[image_id]['pred_boxes'] += proposals.pred_boxes.tolist()
feedbacks[image_id]['pred_classes'] += proposals.pred_classes.tolist()
# Assuming default bbox_mode of precomputed feedbacks are 'XYXY_ABS'
bbox_mode = BoxMode.XYXY_ABS
dataset_dicts_with_feedbacks = []
for record in dataset_dicts:
# Get the index of the feedback
image_id = str(record["image_id"])
record["feedback_proposal_boxes"] = feedbacks[image_id]["pred_boxes"]
record["feedback_gt_classes"] = feedbacks[image_id]["pred_classes"]
record["feedback_gt_boxes"] = feedbacks[image_id]["pred_boxes"]
record["feedback_bbox_mode"] = bbox_mode
if sum(map(lambda x: x >= 0, feedbacks[image_id]["pred_classes"])) > 0: # remove images without feedbacks
dataset_dicts_with_feedbacks.append(record)
return dataset_dicts_with_feedbacks |
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
Args:
names (str or list[str]): a dataset name or a list of dataset names
filter_empty (bool): whether to filter out images without instance annotations
min_keypoints (int): filter out images with fewer keypoints than
`min_keypoints`. Set to 0 to do nothing.
proposals_list (optional, list[Proposal]): list of Proposal.
Returns:
list[dict]: a list of dicts following the standard dataset dict format. | def get_detection_dataset_dicts(names, filter_empty=True, min_keypoints=0, proposals_list=None):
"""
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
Args:
names (str or list[str]): a dataset name or a list of dataset names
filter_empty (bool): whether to filter out images without instance annotations
min_keypoints (int): filter out images with fewer keypoints than
`min_keypoints`. Set to 0 to do nothing.
proposals_list (optional, list[Proposal]): list of Proposal.
Returns:
list[dict]: a list of dicts following the standard dataset dict format.
"""
if isinstance(names, str):
names = [names]
assert len(names), names
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
for dataset_name, dicts in zip(names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
if proposals_list is not None:
# load precomputed feedbacks for each proposals
dataset_dicts = load_feedbacks_into_dataset(dataset_dicts, proposals_list)
has_instances = "annotations" in dataset_dicts[0]
if filter_empty and has_instances:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
if min_keypoints > 0 and has_instances:
dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
if has_instances:
try:
class_names = MetadataCatalog.get(names[0]).thing_classes
check_metadata_consistency("thing_classes", names)
print_instances_class_histogram(dataset_dicts, class_names)
except AttributeError: # class names are not available for this dataset
pass
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
return dataset_dicts |
Apply transformations to the feedbacks in dataset_dict, if any.
Args:
dataset_dict (dict): a dict read from the dataset, possibly
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
image_shape (tuple): height, width
transforms (TransformList):
min_box_size (int): proposals with either side smaller than this
threshold are removed
The input dict is modified in-place, with abovementioned keys removed. A new
key "proposals" will be added. Its value is an `Instances`
object which contains the transformed proposals in its field
"proposal_boxes" and "objectness_logits". | def transform_feedbacks(dataset_dict, image_shape, transforms, *, min_box_size=0):
"""
Apply transformations to the feedbacks in dataset_dict, if any.
Args:
dataset_dict (dict): a dict read from the dataset, possibly
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
image_shape (tuple): height, width
transforms (TransformList):
min_box_size (int): proposals with either side smaller than this
threshold are removed
The input dict is modified in-place, with abovementioned keys removed. A new
key "proposals" will be added. Its value is an `Instances`
object which contains the transformed proposals in its field
"proposal_boxes" and "objectness_logits".
"""
if "feedback_proposal_boxes" in dataset_dict:
# Transform proposal boxes
proposal_boxes = transforms.apply_box(
BoxMode.convert(
dataset_dict.pop("feedback_proposal_boxes"),
dataset_dict.get("feedback_bbox_mode"),
BoxMode.XYXY_ABS,
)
)
proposal_boxes = Boxes(proposal_boxes)
gt_boxes = transforms.apply_box(
BoxMode.convert(
dataset_dict.pop("feedback_gt_boxes"),
dataset_dict.get("feedback_bbox_mode"),
BoxMode.XYXY_ABS,
)
)
gt_boxes = Boxes(gt_boxes)
gt_classes = torch.as_tensor(
dataset_dict.pop("feedback_gt_classes")
)
proposal_boxes.clip(image_shape)
gt_boxes.clip(image_shape)
keep = proposal_boxes.nonempty(threshold=min_box_size) & (gt_classes >= 0)
# keep = boxes.nonempty(threshold=min_box_size)
proposal_boxes = proposal_boxes[keep]
gt_boxes = gt_boxes[keep]
gt_classes = gt_classes[keep]
feedbacks = Instances(image_shape)
feedbacks.proposal_boxes = proposal_boxes
feedbacks.gt_boxes = gt_boxes
feedbacks.gt_classes = gt_classes
dataset_dict["feedbacks"] = feedbacks |
Flatten a list of proposals
Args:
proposal_list (list): a list of proposals grouped by images
max_number (int): maximum number of kept proposals for each image | def flatten(proposal_list, max_number=10000):
"""
Flatten a list of proposals
Args:
proposal_list (list): a list of proposals grouped by images
max_number (int): maximum number of kept proposals for each image
"""
flattened_list = []
for proposals in proposal_list:
for i in range(min(len(proposals), max_number)):
flattened_list.append(proposals[i:i+1])
return flattened_list |
Same as `tllib.modules.loss.LabelSmoothSoftmaxCEV1`, but returns 0 (instead of nan)
for empty inputs. | def label_smoothing_cross_entropy(input, target, *, reduction="mean", **kwargs):
"""
Same as `tllib.modules.loss.LabelSmoothSoftmaxCEV1`, but returns 0 (instead of nan)
for empty inputs.
"""
if target.numel() == 0 and reduction == "mean":
return input.sum() * 0.0 # connect the gradient
return LabelSmoothSoftmaxCEV1(reduction=reduction, **kwargs)(input, target) |
Call `fast_rcnn_sample_background_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the background proposals.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i. | def fast_rcnn_sample_background(
boxes: List[torch.Tensor],
scores: List[torch.Tensor],
image_shapes: List[Tuple[int, int]],
score_thresh: float,
nms_thresh: float,
topk_per_image: int,
):
"""
Call `fast_rcnn_sample_background_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the background proposals.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
fast_rcnn_sample_background_single_image(
boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
)
for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)
]
return [x[0] for x in result_per_image], [x[1] for x in result_per_image] |
Single-image background samples. .
Args:
Same as `fast_rcnn_sample_background`, but with boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_sample_background`, but for only one image. | def fast_rcnn_sample_background_single_image(
boxes,
scores,
image_shape: Tuple[int, int],
score_thresh: float,
nms_thresh: float,
topk_per_image: int,
):
"""
Single-image background samples. .
Args:
Same as `fast_rcnn_sample_background`, but with boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_sample_background`, but for only one image.
"""
valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
if not valid_mask.all():
boxes = boxes[valid_mask]
scores = scores[valid_mask]
num_classes = scores.shape[1]
# Only keep background proposals
scores = scores[:, -1:]
# Convert to Boxes to use the `clip` function ...
boxes = Boxes(boxes.reshape(-1, 4))
boxes.clip(image_shape)
boxes = boxes.tensor.view(-1, 1, 4) # R x C x 4
# 1. Filter results based on detection scores. It can make NMS more efficient
# by filtering out low-confidence detections.
filter_mask = scores > score_thresh # R
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds = filter_mask.nonzero()
boxes = boxes[filter_mask]
scores = scores[filter_mask]
# 2. Apply NMS only for background class
keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
if 0 <= topk_per_image < len(keep):
idx = list(range(len(keep)))
idx = random.sample(idx, k=topk_per_image)
idx = sorted(idx)
keep = keep[idx]
boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
result = Instances(image_shape)
result.pred_boxes = Boxes(boxes)
result.scores = scores
result.pred_classes = filter_inds[:, 1] + num_classes - 1
return result, filter_inds[:, 0] |
Entropy of prediction.
The definition is:
.. math::
entropy(p) = - \sum_{c=1}^C p_c \log p_c
where C is number of classes.
Args:
predictions (tensor): Classifier predictions. Expected to contain raw, normalized scores for each class
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output. Default: ``'mean'``
Shape:
- predictions: :math:`(minibatch, C)` where C means the number of classes.
- Output: :math:`(minibatch, )` by default. If :attr:`reduction` is ``'mean'``, then scalar. | def entropy(predictions: torch.Tensor, reduction='none') -> torch.Tensor:
r"""Entropy of prediction.
The definition is:
.. math::
entropy(p) = - \sum_{c=1}^C p_c \log p_c
where C is number of classes.
Args:
predictions (tensor): Classifier predictions. Expected to contain raw, normalized scores for each class
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output. Default: ``'mean'``
Shape:
- predictions: :math:`(minibatch, C)` where C means the number of classes.
- Output: :math:`(minibatch, )` by default. If :attr:`reduction` is ``'mean'``, then scalar.
"""
epsilon = 1e-5
H = -predictions * torch.log(predictions + epsilon)
H = H.sum(dim=1)
if reduction == 'mean':
return H.mean()
else:
return H |
Constructs a ResNet-18-IBN-a model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | def resnet18_ibn_a(pretrained=False):
"""Constructs a ResNet-18-IBN-a model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = IBNNet(block=BasicBlock,
layers=[2, 2, 2, 2],
ibn_cfg=('a', 'a', 'a', None))
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet18_ibn_a']), strict=False)
return model |
Constructs a ResNet-34-IBN-a model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | def resnet34_ibn_a(pretrained=False):
"""Constructs a ResNet-34-IBN-a model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = IBNNet(block=BasicBlock,
layers=[3, 4, 6, 3],
ibn_cfg=('a', 'a', 'a', None))
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet34_ibn_a']), strict=False)
return model |
Constructs a ResNet-50-IBN-a model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | def resnet50_ibn_a(pretrained=False):
"""Constructs a ResNet-50-IBN-a model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = IBNNet(block=Bottleneck,
layers=[3, 4, 6, 3],
ibn_cfg=('a', 'a', 'a', None))
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet50_ibn_a']), strict=False)
return model |
Constructs a ResNet-101-IBN-a model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | def resnet101_ibn_a(pretrained=False):
"""Constructs a ResNet-101-IBN-a model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = IBNNet(block=Bottleneck,
layers=[3, 4, 23, 3],
ibn_cfg=('a', 'a', 'a', None))
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet101_ibn_a']), strict=False)
return model |
Constructs a ResNet-18-IBN-b model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | def resnet18_ibn_b(pretrained=False):
"""Constructs a ResNet-18-IBN-b model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = IBNNet(block=BasicBlock,
layers=[2, 2, 2, 2],
ibn_cfg=('b', 'b', None, None))
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet18_ibn_b']), strict=False)
return model |
Constructs a ResNet-34-IBN-b model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | def resnet34_ibn_b(pretrained=False):
"""Constructs a ResNet-34-IBN-b model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = IBNNet(block=BasicBlock,
layers=[3, 4, 6, 3],
ibn_cfg=('b', 'b', None, None))
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet34_ibn_b']), strict=False)
return model |
Constructs a ResNet-50-IBN-b model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | def resnet50_ibn_b(pretrained=False):
"""Constructs a ResNet-50-IBN-b model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = IBNNet(block=Bottleneck,
layers=[3, 4, 6, 3],
ibn_cfg=('b', 'b', None, None))
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet50_ibn_b']), strict=False)
return model |
Constructs a ResNet-101-IBN-b model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | def resnet101_ibn_b(pretrained=False):
"""Constructs a ResNet-101-IBN-b model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = IBNNet(block=Bottleneck,
layers=[3, 4, 23, 3],
ibn_cfg=('b', 'b', None, None))
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet101_ibn_b']), strict=False)
return model |
Traverses the input module and its child recursively and replaces all
instance of BatchNorm to StochNorm.
Args:
module (torch.nn.Module): The input module needs to be convert to StochNorm model.
p (float): The hyper-parameter for StochNorm layer.
Returns:
The module converted to StochNorm version. | def convert_model(module, p):
"""
Traverses the input module and its child recursively and replaces all
instance of BatchNorm to StochNorm.
Args:
module (torch.nn.Module): The input module needs to be convert to StochNorm model.
p (float): The hyper-parameter for StochNorm layer.
Returns:
The module converted to StochNorm version.
"""
mod = module
for pth_module, stoch_module in zip([torch.nn.modules.batchnorm.BatchNorm1d,
torch.nn.modules.batchnorm.BatchNorm2d,
torch.nn.modules.batchnorm.BatchNorm3d],
[StochNorm1d,
StochNorm2d,
StochNorm3d]):
if isinstance(module, pth_module):
mod = stoch_module(module.num_features, module.eps, module.momentum, module.affine, p)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_model(child, p))
return mod |
Construct `ResNet` with MixStyle modules. Given any resnet architecture **resnet_class** that contains conv1,
bn1, relu, maxpool, layer1-4, this function define a new class that inherits from **resnet_class** and inserts
MixStyle module during forward pass. Although MixStyle Module can be inserted anywhere, original paper finds it
better to place MixStyle after layer1-3. Our implementation follows this idea, but you are free to modify this
function to try other possibilities.
Args:
arch (str): resnet architecture (resnet50 for example)
block (class): class of resnet block
layers (list): depth list of each block
pretrained (bool): if True, load imagenet pre-trained model parameters
progress (bool): whether or not to display a progress bar to stderr
mix_layers (list): layers to insert MixStyle module after
mix_p (float): probability to activate MixStyle during forward pass
mix_alpha (float): parameter alpha for beta distribution
resnet_class (class): base resnet class to inherit from | def _resnet_with_mix_style(arch, block, layers, pretrained, progress, mix_layers=None, mix_p=0.5, mix_alpha=0.1,
resnet_class=ResNet, **kwargs):
"""Construct `ResNet` with MixStyle modules. Given any resnet architecture **resnet_class** that contains conv1,
bn1, relu, maxpool, layer1-4, this function define a new class that inherits from **resnet_class** and inserts
MixStyle module during forward pass. Although MixStyle Module can be inserted anywhere, original paper finds it
better to place MixStyle after layer1-3. Our implementation follows this idea, but you are free to modify this
function to try other possibilities.
Args:
arch (str): resnet architecture (resnet50 for example)
block (class): class of resnet block
layers (list): depth list of each block
pretrained (bool): if True, load imagenet pre-trained model parameters
progress (bool): whether or not to display a progress bar to stderr
mix_layers (list): layers to insert MixStyle module after
mix_p (float): probability to activate MixStyle during forward pass
mix_alpha (float): parameter alpha for beta distribution
resnet_class (class): base resnet class to inherit from
"""
if mix_layers is None:
mix_layers = []
available_resnet_class = [ResNet, ReidResNet]
assert resnet_class in available_resnet_class
class ResNetWithMixStyleModule(resnet_class):
def __init__(self, mix_layers, mix_p=0.5, mix_alpha=0.1, *args, **kwargs):
super(ResNetWithMixStyleModule, self).__init__(*args, **kwargs)
self.mixStyleModule = MixStyle(p=mix_p, alpha=mix_alpha)
for layer in mix_layers:
assert layer in ['layer1', 'layer2', 'layer3']
self.apply_layers = mix_layers
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
# turn on relu activation here **except for** reid tasks
if resnet_class != ReidResNet:
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if 'layer1' in self.apply_layers:
x = self.mixStyleModule(x)
x = self.layer2(x)
if 'layer2' in self.apply_layers:
x = self.mixStyleModule(x)
x = self.layer3(x)
if 'layer3' in self.apply_layers:
x = self.mixStyleModule(x)
x = self.layer4(x)
return x
model = ResNetWithMixStyleModule(mix_layers=mix_layers, mix_p=mix_p, mix_alpha=mix_alpha, block=block,
layers=layers, **kwargs)
if pretrained:
model_dict = model.state_dict()
pretrained_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
# remove keys from pretrained dict that doesn't appear in model dict
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model.load_state_dict(pretrained_dict, strict=False)
return model |
Constructs a ResNet-18 model with MixStyle.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def resnet18(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-18 model with MixStyle.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet_with_mix_style('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs) |
Constructs a ResNet-34 model with MixStyle.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def resnet34(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-34 model with MixStyle.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet_with_mix_style('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs) |
Constructs a ResNet-50 model with MixStyle.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def resnet50(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-50 model with MixStyle.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet_with_mix_style('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs) |
Constructs a ResNet-101 model with MixStyle.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def resnet101(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-101 model with MixStyle.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet_with_mix_style('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs) |
H-score in `An Information-theoretic Approach to Transferability in Task Transfer Learning (ICIP 2019)
<http://yangli-feasibility.com/home/media/icip-19.pdf>`_.
The H-Score :math:`\mathcal{H}` can be described as:
.. math::
\mathcal{H}=\operatorname{tr}\left(\operatorname{cov}(f)^{-1} \operatorname{cov}\left(\mathbb{E}[f \mid y]\right)\right)
where :math:`f` is the features extracted by the model to be ranked, :math:`y` is the groud-truth label vector
Args:
features (np.ndarray):features extracted by pre-trained model.
labels (np.ndarray): groud-truth labels.
Shape:
- features: (N, F), with number of samples N and feature dimension F.
- labels: (N, ) elements in [0, :math:`C_t`), with target class number :math:`C_t`.
- score: scalar. | def h_score(features: np.ndarray, labels: np.ndarray):
r"""
H-score in `An Information-theoretic Approach to Transferability in Task Transfer Learning (ICIP 2019)
<http://yangli-feasibility.com/home/media/icip-19.pdf>`_.
The H-Score :math:`\mathcal{H}` can be described as:
.. math::
\mathcal{H}=\operatorname{tr}\left(\operatorname{cov}(f)^{-1} \operatorname{cov}\left(\mathbb{E}[f \mid y]\right)\right)
where :math:`f` is the features extracted by the model to be ranked, :math:`y` is the groud-truth label vector
Args:
features (np.ndarray):features extracted by pre-trained model.
labels (np.ndarray): groud-truth labels.
Shape:
- features: (N, F), with number of samples N and feature dimension F.
- labels: (N, ) elements in [0, :math:`C_t`), with target class number :math:`C_t`.
- score: scalar.
"""
f = features
y = labels
covf = np.cov(f, rowvar=False)
C = int(y.max() + 1)
g = np.zeros_like(f)
for i in range(C):
Ef_i = np.mean(f[y == i, :], axis=0)
g[y == i] = Ef_i
covg = np.cov(g, rowvar=False)
score = np.trace(np.dot(np.linalg.pinv(covf, rcond=1e-15), covg))
return score |
Regularized H-score in `Newer is not always better: Rethinking transferability metrics, their peculiarities, stability and performance (NeurIPS 2021)
<https://openreview.net/pdf?id=iz_Wwmfquno>`_.
The regularized H-Score :math:`\mathcal{H}_{\alpha}` can be described as:
.. math::
\mathcal{H}_{\alpha}=\operatorname{tr}\left(\operatorname{cov}_{\alpha}(f)^{-1}\left(1-\alpha \right)\operatorname{cov}\left(\mathbb{E}[f \mid y]\right)\right)
where :math:`f` is the features extracted by the model to be ranked, :math:`y` is the groud-truth label vector and :math:`\operatorname{cov}_{\alpha}` the Ledoit-Wolf
covariance estimator with shrinkage parameter :math:`\alpha`
Args:
features (np.ndarray):features extracted by pre-trained model.
labels (np.ndarray): groud-truth labels.
Shape:
- features: (N, F), with number of samples N and feature dimension F.
- labels: (N, ) elements in [0, :math:`C_t`), with target class number :math:`C_t`.
- score: scalar. | def regularized_h_score(features: np.ndarray, labels: np.ndarray):
r"""
Regularized H-score in `Newer is not always better: Rethinking transferability metrics, their peculiarities, stability and performance (NeurIPS 2021)
<https://openreview.net/pdf?id=iz_Wwmfquno>`_.
The regularized H-Score :math:`\mathcal{H}_{\alpha}` can be described as:
.. math::
\mathcal{H}_{\alpha}=\operatorname{tr}\left(\operatorname{cov}_{\alpha}(f)^{-1}\left(1-\alpha \right)\operatorname{cov}\left(\mathbb{E}[f \mid y]\right)\right)
where :math:`f` is the features extracted by the model to be ranked, :math:`y` is the groud-truth label vector and :math:`\operatorname{cov}_{\alpha}` the Ledoit-Wolf
covariance estimator with shrinkage parameter :math:`\alpha`
Args:
features (np.ndarray):features extracted by pre-trained model.
labels (np.ndarray): groud-truth labels.
Shape:
- features: (N, F), with number of samples N and feature dimension F.
- labels: (N, ) elements in [0, :math:`C_t`), with target class number :math:`C_t`.
- score: scalar.
"""
f = features.astype('float64')
f = f - np.mean(f, axis=0, keepdims=True) # Center the features for correct Ledoit-Wolf Estimation
y = labels
C = int(y.max() + 1)
g = np.zeros_like(f)
cov = LedoitWolf(assume_centered=False).fit(f)
alpha = cov.shrinkage_
covf_alpha = cov.covariance_
for i in range(C):
Ef_i = np.mean(f[y == i, :], axis=0)
g[y == i] = Ef_i
covg = np.cov(g, rowvar=False)
score = np.trace(np.dot(np.linalg.pinv(covf_alpha, rcond=1e-15), (1 - alpha) * covg))
return score |
Log Expected Empirical Prediction in `LEEP: A New Measure to
Evaluate Transferability of Learned Representations (ICML 2020)
<http://proceedings.mlr.press/v119/nguyen20b/nguyen20b.pdf>`_.
The LEEP :math:`\mathcal{T}` can be described as:
.. math::
\mathcal{T}=\mathbb{E}\log \left(\sum_{z \in \mathcal{C}_s} \hat{P}\left(y \mid z\right) \theta\left(y \right)_{z}\right)
where :math:`\theta\left(y\right)_{z}` is the predictions of pre-trained model on source category, :math:`\hat{P}\left(y \mid z\right)` is the empirical conditional distribution estimated by prediction and ground-truth label.
Args:
predictions (np.ndarray): predictions of pre-trained model.
labels (np.ndarray): groud-truth labels.
Shape:
- predictions: (N, :math:`C_s`), with number of samples N and source class number :math:`C_s`.
- labels: (N, ) elements in [0, :math:`C_t`), with target class number :math:`C_t`.
- score: scalar | def log_expected_empirical_prediction(predictions: np.ndarray, labels: np.ndarray):
r"""
Log Expected Empirical Prediction in `LEEP: A New Measure to
Evaluate Transferability of Learned Representations (ICML 2020)
<http://proceedings.mlr.press/v119/nguyen20b/nguyen20b.pdf>`_.
The LEEP :math:`\mathcal{T}` can be described as:
.. math::
\mathcal{T}=\mathbb{E}\log \left(\sum_{z \in \mathcal{C}_s} \hat{P}\left(y \mid z\right) \theta\left(y \right)_{z}\right)
where :math:`\theta\left(y\right)_{z}` is the predictions of pre-trained model on source category, :math:`\hat{P}\left(y \mid z\right)` is the empirical conditional distribution estimated by prediction and ground-truth label.
Args:
predictions (np.ndarray): predictions of pre-trained model.
labels (np.ndarray): groud-truth labels.
Shape:
- predictions: (N, :math:`C_s`), with number of samples N and source class number :math:`C_s`.
- labels: (N, ) elements in [0, :math:`C_t`), with target class number :math:`C_t`.
- score: scalar
"""
N, C_s = predictions.shape
labels = labels.reshape(-1)
C_t = int(np.max(labels) + 1)
normalized_prob = predictions / float(N)
joint = np.zeros((C_t, C_s), dtype=float) # placeholder for joint distribution over (y, z)
for i in range(C_t):
this_class = normalized_prob[labels == i]
row = np.sum(this_class, axis=0)
joint[i] = row
p_target_given_source = (joint / joint.sum(axis=0, keepdims=True)).T # P(y | z)
empirical_prediction = predictions @ p_target_given_source
empirical_prob = np.array([predict[label] for predict, label in zip(empirical_prediction, labels)])
score = np.mean(np.log(empirical_prob))
return score |
Log Maximum Evidence in `LogME: Practical Assessment of Pre-trained Models
for Transfer Learning (ICML 2021) <https://arxiv.org/pdf/2102.11005.pdf>`_.
Args:
features (np.ndarray): feature matrix from pre-trained model.
targets (np.ndarray): targets labels/values.
regression (bool, optional): whether to apply in regression setting. (Default: False)
return_weights (bool, optional): whether to return bayesian weight. (Default: False)
Shape:
- features: (N, F) with element in [0, :math:`C_t`) and feature dimension F, where :math:`C_t` denotes the number of target class
- targets: (N, ) or (N, C), with C regression-labels.
- weights: (F, :math:`C_t`).
- score: scalar. | def log_maximum_evidence(features: np.ndarray, targets: np.ndarray, regression=False, return_weights=False):
r"""
Log Maximum Evidence in `LogME: Practical Assessment of Pre-trained Models
for Transfer Learning (ICML 2021) <https://arxiv.org/pdf/2102.11005.pdf>`_.
Args:
features (np.ndarray): feature matrix from pre-trained model.
targets (np.ndarray): targets labels/values.
regression (bool, optional): whether to apply in regression setting. (Default: False)
return_weights (bool, optional): whether to return bayesian weight. (Default: False)
Shape:
- features: (N, F) with element in [0, :math:`C_t`) and feature dimension F, where :math:`C_t` denotes the number of target class
- targets: (N, ) or (N, C), with C regression-labels.
- weights: (F, :math:`C_t`).
- score: scalar.
"""
f = features.astype(np.float64)
y = targets
if regression:
y = targets.astype(np.float64)
fh = f
f = f.transpose()
D, N = f.shape
v, s, vh = np.linalg.svd(f @ fh, full_matrices=True)
evidences = []
weights = []
if regression:
C = y.shape[1]
for i in range(C):
y_ = y[:, i]
evidence, weight = each_evidence(y_, f, fh, v, s, vh, N, D)
evidences.append(evidence)
weights.append(weight)
else:
C = int(y.max() + 1)
for i in range(C):
y_ = (y == i).astype(np.float64)
evidence, weight = each_evidence(y_, f, fh, v, s, vh, N, D)
evidences.append(evidence)
weights.append(weight)
score = np.mean(evidences)
weights = np.vstack(weights)
if return_weights:
return score, weights
else:
return score |
compute the maximum evidence for each class | def each_evidence(y_, f, fh, v, s, vh, N, D):
"""
compute the maximum evidence for each class
"""
alpha = 1.0
beta = 1.0
lam = alpha / beta
tmp = (vh @ (f @ y_))
for _ in range(11):
# should converge after at most 10 steps
# typically converge after two or three steps
gamma = (s / (s + lam)).sum()
m = v @ (tmp * beta / (alpha + beta * s))
alpha_de = (m * m).sum()
alpha = gamma / alpha_de
beta_de = ((y_ - fh @ m) ** 2).sum()
beta = (N - gamma) / beta_de
new_lam = alpha / beta
if np.abs(new_lam - lam) / lam < 0.01:
break
lam = new_lam
evidence = D / 2.0 * np.log(alpha) \
+ N / 2.0 * np.log(beta) \
- 0.5 * np.sum(np.log(alpha + beta * s)) \
- beta / 2.0 * beta_de \
- alpha / 2.0 * alpha_de \
- N / 2.0 * np.log(2 * np.pi)
return evidence / N, m |
Negative Conditional Entropy in `Transferability and Hardness of Supervised
Classification Tasks (ICCV 2019) <https://arxiv.org/pdf/1908.08142v1.pdf>`_.
The NCE :math:`\mathcal{H}` can be described as:
.. math::
\mathcal{H}=-\sum_{y \in \mathcal{C}_t} \sum_{z \in \mathcal{C}_s} \hat{P}(y, z) \log \frac{\hat{P}(y, z)}{\hat{P}(z)}
where :math:`\hat{P}(z)` is the empirical distribution and :math:`\hat{P}\left(y \mid z\right)` is the empirical
conditional distribution estimated by source and target label.
Args:
source_labels (np.ndarray): predicted source labels.
target_labels (np.ndarray): groud-truth target labels.
Shape:
- source_labels: (N, ) elements in [0, :math:`C_s`), with source class number :math:`C_s`.
- target_labels: (N, ) elements in [0, :math:`C_t`), with target class number :math:`C_t`. | def negative_conditional_entropy(source_labels: np.ndarray, target_labels: np.ndarray):
r"""
Negative Conditional Entropy in `Transferability and Hardness of Supervised
Classification Tasks (ICCV 2019) <https://arxiv.org/pdf/1908.08142v1.pdf>`_.
The NCE :math:`\mathcal{H}` can be described as:
.. math::
\mathcal{H}=-\sum_{y \in \mathcal{C}_t} \sum_{z \in \mathcal{C}_s} \hat{P}(y, z) \log \frac{\hat{P}(y, z)}{\hat{P}(z)}
where :math:`\hat{P}(z)` is the empirical distribution and :math:`\hat{P}\left(y \mid z\right)` is the empirical
conditional distribution estimated by source and target label.
Args:
source_labels (np.ndarray): predicted source labels.
target_labels (np.ndarray): groud-truth target labels.
Shape:
- source_labels: (N, ) elements in [0, :math:`C_s`), with source class number :math:`C_s`.
- target_labels: (N, ) elements in [0, :math:`C_t`), with target class number :math:`C_t`.
"""
C_t = int(np.max(target_labels) + 1)
C_s = int(np.max(source_labels) + 1)
N = len(source_labels)
joint = np.zeros((C_t, C_s), dtype=float) # placeholder for the joint distribution, shape [C_t, C_s]
for s, t in zip(source_labels, target_labels):
s = int(s)
t = int(t)
joint[t, s] += 1.0 / N
p_z = joint.sum(axis=0, keepdims=True)
p_target_given_source = (joint / p_z).T # P(y | z), shape [C_s, C_t]
mask = p_z.reshape(-1) != 0 # valid Z, shape [C_s]
p_target_given_source = p_target_given_source[mask] + 1e-20 # remove NaN where p(z) = 0, add 1e-20 to avoid log (0)
entropy_y_given_z = np.sum(- p_target_given_source * np.log(p_target_given_source), axis=1, keepdims=True)
conditional_entropy = np.sum(entropy_y_given_z * p_z.reshape((-1, 1))[mask])
return -conditional_entropy |
TransRate in `Frustratingly easy transferability estimation (ICML 2022)
<https://proceedings.mlr.press/v162/huang22d/huang22d.pdf>`_.
The TransRate :math:`TrR` can be described as:
.. math::
TrR= R\left(f, \espilon \right) - R\left(f, \espilon \mid y \right)
where :math:`f` is the features extracted by the model to be ranked, :math:`y` is the groud-truth label vector,
:math:`R` is the coding rate with distortion rate :math:`\epsilon`
Args:
features (np.ndarray):features extracted by pre-trained model.
labels (np.ndarray): groud-truth labels.
eps (float, optional): distortion rare (Default: 1e-4)
Shape:
- features: (N, F), with number of samples N and feature dimension F.
- labels: (N, ) elements in [0, :math:`C_t`), with target class number :math:`C_t`.
- score: scalar. | def transrate(features: np.ndarray, labels: np.ndarray, eps=1e-4):
r"""
TransRate in `Frustratingly easy transferability estimation (ICML 2022)
<https://proceedings.mlr.press/v162/huang22d/huang22d.pdf>`_.
The TransRate :math:`TrR` can be described as:
.. math::
TrR= R\left(f, \espilon \right) - R\left(f, \espilon \mid y \right)
where :math:`f` is the features extracted by the model to be ranked, :math:`y` is the groud-truth label vector,
:math:`R` is the coding rate with distortion rate :math:`\epsilon`
Args:
features (np.ndarray):features extracted by pre-trained model.
labels (np.ndarray): groud-truth labels.
eps (float, optional): distortion rare (Default: 1e-4)
Shape:
- features: (N, F), with number of samples N and feature dimension F.
- labels: (N, ) elements in [0, :math:`C_t`), with target class number :math:`C_t`.
- score: scalar.
"""
f = features
y = labels
f = f - np.mean(f, axis=0, keepdims=True)
Rf = coding_rate(f, eps)
Rfy = 0.0
C = int(y.max() + 1)
for i in range(C):
Rfy += coding_rate(f[(y == i).flatten()], eps)
return Rf - Rfy / C |
Fetch data from `data_loader`, and then use `classifier` to collect classification results
Args:
data_loader (torch.utils.data.DataLoader): Data loader.
classifier (torch.nn.Module): A classifier.
device (torch.device)
Returns:
Classification results in shape (len(data_loader), :math:`|\mathcal{C}|`). | def collect_classification_results(data_loader: DataLoader, classifier: nn.Module,
device: torch.device) -> torch.Tensor:
"""
Fetch data from `data_loader`, and then use `classifier` to collect classification results
Args:
data_loader (torch.utils.data.DataLoader): Data loader.
classifier (torch.nn.Module): A classifier.
device (torch.device)
Returns:
Classification results in shape (len(data_loader), :math:`|\mathcal{C}|`).
"""
training = classifier.training
classifier.eval()
all_outputs = []
with torch.no_grad():
for i, (images, target) in enumerate(data_loader):
images = images.to(device)
output = classifier(images)
all_outputs.append(output)
classifier.train(training)
return torch.cat(all_outputs, dim=0) |
First shift, then calculate log for numerical stability. | def shift_log(x, offset=1e-6):
"""
First shift, then calculate log for numerical stability.
"""
return torch.log(torch.clamp(x + offset, max=1.)) |
Set requires_grad=False for all the parameters to avoid unnecessary computations | def set_requires_grad(net, requires_grad=False):
"""
Set requires_grad=False for all the parameters to avoid unnecessary computations
"""
for param in net.parameters():
param.requires_grad = requires_grad |
Replace batch normalization statistics of the teacher model with that ot the student model | def update_bn(model, ema_model):
"""
Replace batch normalization statistics of the teacher model with that ot the student model
"""
for m2, m1 in zip(ema_model.named_modules(), model.named_modules()):
if ('bn' in m2[0]) and ('bn' in m1[0]):
bn2, bn1 = m2[1].state_dict(), m1[1].state_dict()
bn2['running_mean'].data.copy_(bn1['running_mean'].data)
bn2['running_var'].data.copy_(bn1['running_var'].data)
bn2['num_batches_tracked'].data.copy_(bn1['num_batches_tracked'].data) |
Exponential warm up function from `Temporal Ensembling for Semi-Supervised Learning
(ICLR 2017) <https://arxiv.org/abs/1610.02242>`_. | def sigmoid_warm_up(current_epoch, warm_up_epochs: int):
"""Exponential warm up function from `Temporal Ensembling for Semi-Supervised Learning
(ICLR 2017) <https://arxiv.org/abs/1610.02242>`_.
"""
assert warm_up_epochs >= 0
if warm_up_epochs == 0:
return 1.0
else:
current_epoch = np.clip(current_epoch, 0.0, warm_up_epochs)
process = 1.0 - current_epoch / warm_up_epochs
return float(np.exp(-5.0 * process * process)) |
Args:
amp_src (numpy.ndarray): amplitude component of the Fourier transform of source image
amp_trg (numpy.ndarray): amplitude component of the Fourier transform of target image
beta (int, optional): the size of the center region to be replace. Default: 1
Returns:
amplitude component of the Fourier transform of source image
whose low-frequency component is replaced by that of the target image. | def low_freq_mutate(amp_src: np.ndarray, amp_trg: np.ndarray, beta: Optional[int] = 1):
"""
Args:
amp_src (numpy.ndarray): amplitude component of the Fourier transform of source image
amp_trg (numpy.ndarray): amplitude component of the Fourier transform of target image
beta (int, optional): the size of the center region to be replace. Default: 1
Returns:
amplitude component of the Fourier transform of source image
whose low-frequency component is replaced by that of the target image.
"""
# Shift the zero-frequency component to the center of the spectrum.
a_src = np.fft.fftshift(amp_src, axes=(-2, -1))
a_trg = np.fft.fftshift(amp_trg, axes=(-2, -1))
# The low-frequency component includes
# the area where the horizontal and vertical distance from the center does not exceed beta
_, h, w = a_src.shape
c_h = np.floor(h / 2.0).astype(int)
c_w = np.floor(w / 2.0).astype(int)
h1 = c_h - beta
h2 = c_h + beta + 1
w1 = c_w - beta
w2 = c_w + beta + 1
# The low-frequency component of source amplitude is replaced by the target amplitude
a_src[:, h1:h2, w1:w2] = a_trg[:, h1:h2, w1:w2]
a_src = np.fft.ifftshift(a_src, axes=(-2, -1))
return a_src |
PatchGAN classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
Args:
ndf (int): the number of filters in the first conv layer
input_nc (int): the number of channels in input images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
n_layers (int): the number of conv layers in the discriminator. Default: 3
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02 | def patch(ndf, input_nc=3, norm='batch', n_layers=3, init_type='normal', init_gain=0.02):
"""
PatchGAN classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
Args:
ndf (int): the number of filters in the first conv layer
input_nc (int): the number of channels in input images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
n_layers (int): the number of conv layers in the discriminator. Default: 3
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02
"""
norm_layer = get_norm_layer(norm_type=norm)
net = NLayerDiscriminator(input_nc, ndf, n_layers=n_layers, norm_layer=norm_layer)
init_weights(net, init_type, init_gain=init_gain)
return net |
1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
Args:
ndf (int): the number of filters in the first conv layer
input_nc (int): the number of channels in input images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02 | def pixel(ndf, input_nc=3, norm='batch', init_type='normal', init_gain=0.02):
"""
1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
Args:
ndf (int): the number of filters in the first conv layer
input_nc (int): the number of channels in input images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02
"""
norm_layer = get_norm_layer(norm_type=norm)
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
init_weights(net, init_type, init_gain=init_gain)
return net |
Resnet-based generator with 9 Resnet blocks.
Args:
ngf (int): the number of filters in the last conv layer
input_nc (int): the number of channels in input images. Default: 3
output_nc (int): the number of channels in output images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
use_dropout (bool): whether use dropout. Default: False
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02 | def resnet_9(ngf, input_nc=3, output_nc=3, norm='batch', use_dropout=False,
init_type='normal', init_gain=0.02):
"""
Resnet-based generator with 9 Resnet blocks.
Args:
ngf (int): the number of filters in the last conv layer
input_nc (int): the number of channels in input images. Default: 3
output_nc (int): the number of channels in output images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
use_dropout (bool): whether use dropout. Default: False
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02
"""
norm_layer = get_norm_layer(norm_type=norm)
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
init_weights(net, init_type, init_gain)
return net |
Resnet-based generator with 6 Resnet blocks.
Args:
ngf (int): the number of filters in the last conv layer
input_nc (int): the number of channels in input images. Default: 3
output_nc (int): the number of channels in output images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
use_dropout (bool): whether use dropout. Default: False
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02 | def resnet_6(ngf, input_nc=3, output_nc=3, norm='batch', use_dropout=False,
init_type='normal', init_gain=0.02):
"""
Resnet-based generator with 6 Resnet blocks.
Args:
ngf (int): the number of filters in the last conv layer
input_nc (int): the number of channels in input images. Default: 3
output_nc (int): the number of channels in output images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
use_dropout (bool): whether use dropout. Default: False
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02
"""
norm_layer = get_norm_layer(norm_type=norm)
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
init_weights(net, init_type, init_gain)
return net |
`U-Net <https://arxiv.org/abs/1505.04597>`_ generator for 256x256 input images.
The size of the input image should be a multiple of 256.
Args:
ngf (int): the number of filters in the last conv layer
input_nc (int): the number of channels in input images. Default: 3
output_nc (int): the number of channels in output images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
use_dropout (bool): whether use dropout. Default: False
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02 | def unet_256(ngf, input_nc=3, output_nc=3, norm='batch', use_dropout=False,
init_type='normal', init_gain=0.02):
"""
`U-Net <https://arxiv.org/abs/1505.04597>`_ generator for 256x256 input images.
The size of the input image should be a multiple of 256.
Args:
ngf (int): the number of filters in the last conv layer
input_nc (int): the number of channels in input images. Default: 3
output_nc (int): the number of channels in output images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
use_dropout (bool): whether use dropout. Default: False
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02
"""
norm_layer = get_norm_layer(norm_type=norm)
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
init_weights(net, init_type, init_gain)
return net |
`U-Net <https://arxiv.org/abs/1505.04597>`_ generator for 128x128 input images.
The size of the input image should be a multiple of 128.
Args:
ngf (int): the number of filters in the last conv layer
input_nc (int): the number of channels in input images. Default: 3
output_nc (int): the number of channels in output images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
use_dropout (bool): whether use dropout. Default: False
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02 | def unet_128(ngf, input_nc=3, output_nc=3, norm='batch', use_dropout=False,
init_type='normal', init_gain=0.02):
"""
`U-Net <https://arxiv.org/abs/1505.04597>`_ generator for 128x128 input images.
The size of the input image should be a multiple of 128.
Args:
ngf (int): the number of filters in the last conv layer
input_nc (int): the number of channels in input images. Default: 3
output_nc (int): the number of channels in output images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
use_dropout (bool): whether use dropout. Default: False
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02
"""
norm_layer = get_norm_layer(norm_type=norm)
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
init_weights(net, init_type, init_gain)
return net |
`U-Net <https://arxiv.org/abs/1505.04597>`_ generator for 32x32 input images
Args:
ngf (int): the number of filters in the last conv layer
input_nc (int): the number of channels in input images. Default: 3
output_nc (int): the number of channels in output images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
use_dropout (bool): whether use dropout. Default: False
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02 | def unet_32(ngf, input_nc=3, output_nc=3, norm='batch', use_dropout=False,
init_type='normal', init_gain=0.02):
"""
`U-Net <https://arxiv.org/abs/1505.04597>`_ generator for 32x32 input images
Args:
ngf (int): the number of filters in the last conv layer
input_nc (int): the number of channels in input images. Default: 3
output_nc (int): the number of channels in output images. Default: 3
norm (str): the type of normalization layers used in the network. Default: 'batch'
use_dropout (bool): whether use dropout. Default: False
init_type (str): the name of the initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``. Default: 'normal'
init_gain (float): scaling factor for normal, xavier and orthogonal. Default: 0.02
"""
norm_layer = get_norm_layer(norm_type=norm)
net = UnetGenerator(input_nc, output_nc, 5, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
init_weights(net, init_type, init_gain)
return net |
Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics. | def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x): return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer |
Initialize network weights.
Args:
net (torch.nn.Module): network to be initialized
init_type (str): the name of an initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``
init_gain (float): scaling factor for normal, xavier and orthogonal.
'normal' is used in the original CycleGAN paper. But xavier and kaiming might
work better for some applications. | def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Args:
net (torch.nn.Module): network to be initialized
init_type (str): the name of an initialization method. Choices includes: ``normal`` |
``xavier`` | ``kaiming`` | ``orthogonal``
init_gain (float): scaling factor for normal, xavier and orthogonal.
'normal' is used in the original CycleGAN paper. But xavier and kaiming might
work better for some applications.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) |
Set requies_grad=Fasle for all the networks to avoid unnecessary computations | def set_requires_grad(net, requires_grad=False):
"""
Set requies_grad=Fasle for all the networks to avoid unnecessary computations
"""
for param in net.parameters():
param.requires_grad = requires_grad |
Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.
Args:
tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):
The data to send to a given device.
device (:obj:`torch.device`):
The device to send the data to
Returns:
The same data structure as :obj:`tensor` with all tensors sent to the proper device. | def send_to_device(tensor, device):
"""
Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.
Args:
tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):
The data to send to a given device.
device (:obj:`torch.device`):
The device to send the data to
Returns:
The same data structure as :obj:`tensor` with all tensors sent to the proper device.
"""
if isinstance(tensor, (list, tuple)):
return type(tensor)(send_to_device(t, device) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})
elif not hasattr(tensor, "to"):
return tensor
return tensor.to(device) |
concatenate multiple batches into one batch.
``tensors`` can be :class:`torch.Tensor`, List or Dict, but they must be the same data format. | def concatenate(tensors):
"""concatenate multiple batches into one batch.
``tensors`` can be :class:`torch.Tensor`, List or Dict, but they must be the same data format.
"""
if isinstance(tensors[0], torch.Tensor):
return torch.cat(tensors, dim=0)
elif isinstance(tensors[0], List):
ret = []
for i in range(len(tensors[0])):
ret.append(concatenate([t[i] for t in tensors]))
return ret
elif isinstance(tensors[0], Dict):
ret = dict()
for k in tensors[0].keys():
ret[k] = concatenate([t[k] for t in tensors])
return ret |
Calculate the :math:`\mathcal{A}`-distance, which is a measure for distribution discrepancy.
The definition is :math:`dist_\mathcal{A} = 2 (1-2\epsilon)`, where :math:`\epsilon` is the
test error of a classifier trained to discriminate the source from the target.
Args:
source_feature (tensor): features from source domain in shape :math:`(minibatch, F)`
target_feature (tensor): features from target domain in shape :math:`(minibatch, F)`
device (torch.device)
progress (bool): if True, displays a the progress of training A-Net
training_epochs (int): the number of epochs when training the classifier
Returns:
:math:`\mathcal{A}`-distance | def calculate(source_feature: torch.Tensor, target_feature: torch.Tensor,
device, progress=True, training_epochs=10):
"""
Calculate the :math:`\mathcal{A}`-distance, which is a measure for distribution discrepancy.
The definition is :math:`dist_\mathcal{A} = 2 (1-2\epsilon)`, where :math:`\epsilon` is the
test error of a classifier trained to discriminate the source from the target.
Args:
source_feature (tensor): features from source domain in shape :math:`(minibatch, F)`
target_feature (tensor): features from target domain in shape :math:`(minibatch, F)`
device (torch.device)
progress (bool): if True, displays a the progress of training A-Net
training_epochs (int): the number of epochs when training the classifier
Returns:
:math:`\mathcal{A}`-distance
"""
source_label = torch.ones((source_feature.shape[0], 1))
target_label = torch.zeros((target_feature.shape[0], 1))
feature = torch.cat([source_feature, target_feature], dim=0)
label = torch.cat([source_label, target_label], dim=0)
dataset = TensorDataset(feature, label)
length = len(dataset)
train_size = int(0.8 * length)
val_size = length - train_size
train_set, val_set = torch.utils.data.random_split(dataset, [train_size, val_size])
train_loader = DataLoader(train_set, batch_size=2, shuffle=True)
val_loader = DataLoader(val_set, batch_size=8, shuffle=False)
anet = ANet(feature.shape[1]).to(device)
optimizer = SGD(anet.parameters(), lr=0.01)
a_distance = 2.0
for epoch in range(training_epochs):
anet.train()
for (x, label) in train_loader:
x = x.to(device)
label = label.to(device)
anet.zero_grad()
y = anet(x)
loss = F.binary_cross_entropy(y, label)
loss.backward()
optimizer.step()
anet.eval()
meter = AverageMeter("accuracy", ":4.2f")
with torch.no_grad():
for (x, label) in val_loader:
x = x.to(device)
label = label.to(device)
y = anet(x)
acc = binary_accuracy(y, label)
meter.update(acc, x.shape[0])
error = 1 - meter.avg / 100
a_distance = 2 * (1 - 2 * error)
if progress:
print("epoch {} accuracy: {} A-dist: {}".format(epoch, meter.avg, a_distance))
return a_distance |
Visualize features from different domains using t-SNE.
Args:
source_feature (tensor): features from source domain in shape :math:`(minibatch, F)`
target_feature (tensor): features from target domain in shape :math:`(minibatch, F)`
filename (str): the file name to save t-SNE
source_color (str): the color of the source features. Default: 'r'
target_color (str): the color of the target features. Default: 'b' | def visualize(source_feature: torch.Tensor, target_feature: torch.Tensor,
filename: str, source_color='r', target_color='b'):
"""
Visualize features from different domains using t-SNE.
Args:
source_feature (tensor): features from source domain in shape :math:`(minibatch, F)`
target_feature (tensor): features from target domain in shape :math:`(minibatch, F)`
filename (str): the file name to save t-SNE
source_color (str): the color of the source features. Default: 'r'
target_color (str): the color of the target features. Default: 'b'
"""
source_feature = source_feature.numpy()
target_feature = target_feature.numpy()
features = np.concatenate([source_feature, target_feature], axis=0)
# map features to 2-d using TSNE
X_tsne = TSNE(n_components=2, random_state=33).fit_transform(features)
# domain labels, 1 represents source while 0 represents target
domains = np.concatenate((np.ones(len(source_feature)), np.zeros(len(target_feature))))
# visualize using matplotlib
fig, ax = plt.subplots(figsize=(10, 10))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=domains, cmap=col.ListedColormap([target_color, source_color]), s=20)
plt.xticks([])
plt.yticks([])
plt.savefig(filename) |
Fetch data from `data_loader`, and then use `feature_extractor` to collect features
Args:
data_loader (torch.utils.data.DataLoader): Data loader.
feature_extractor (torch.nn.Module): A feature extractor.
device (torch.device)
max_num_features (int): The max number of features to return
Returns:
Features in shape (min(len(data_loader), max_num_features * mini-batch size), :math:`|\mathcal{F}|`). | def collect_feature(data_loader: DataLoader, feature_extractor: nn.Module,
device: torch.device, max_num_features=None) -> torch.Tensor:
"""
Fetch data from `data_loader`, and then use `feature_extractor` to collect features
Args:
data_loader (torch.utils.data.DataLoader): Data loader.
feature_extractor (torch.nn.Module): A feature extractor.
device (torch.device)
max_num_features (int): The max number of features to return
Returns:
Features in shape (min(len(data_loader), max_num_features * mini-batch size), :math:`|\mathcal{F}|`).
"""
feature_extractor.eval()
all_features = []
with torch.no_grad():
for i, data in enumerate(tqdm.tqdm(data_loader)):
if max_num_features is not None and i >= max_num_features:
break
inputs = data[0].to(device)
feature = feature_extractor(inputs).cpu()
all_features.append(feature)
return torch.cat(all_features, dim=0) |
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width]) | def get_max_preds(batch_heatmaps):
'''
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
'''
assert isinstance(batch_heatmaps, np.ndarray), \
'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals |
Return percentage below threshold while ignoring values with a -1 | def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
dist_cal = np.not_equal(dists, -1)
num_dist_cal = dist_cal.sum()
if num_dist_cal > 0:
return np.less(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal
else:
return -1 |
Calculate accuracy according to PCK,
but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs',
followed by individual accuracies | def accuracy(output, target, hm_type='gaussian', thr=0.5):
'''
Calculate accuracy according to PCK,
but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs',
followed by individual accuracies
'''
idx = list(range(output.shape[1]))
norm = 1.0
if hm_type == 'gaussian':
pred, _ = get_max_preds(output)
target, _ = get_max_preds(target)
h = output.shape[2]
w = output.shape[3]
norm = np.ones((pred.shape[0], 2)) * np.array([h, w]) / 10
dists = calc_dists(pred, target, norm)
acc = np.zeros(len(idx))
avg_acc = 0
cnt = 0
for i in range(len(idx)):
acc[i] = dist_acc(dists[idx[i]], thr)
if acc[i] >= 0:
avg_acc = avg_acc + acc[i]
cnt += 1
avg_acc = avg_acc / cnt if cnt != 0 else 0
return acc, avg_acc, cnt, pred |
Randomly choose one instance for each person id, these instances will not be selected again | def unique_sample(ids_dict, num):
"""Randomly choose one instance for each person id, these instances will not be selected again"""
mask = np.zeros(num, dtype=np.bool)
for _, indices in ids_dict.items():
i = np.random.choice(indices)
mask[i] = True
return mask |
Compute Cumulative Matching Characteristics (CMC) | def cmc(dist_mat, query_ids, gallery_ids, query_cams, gallery_cams, topk=100, separate_camera_set=False,
single_gallery_shot=False, first_match_break=False):
"""Compute Cumulative Matching Characteristics (CMC)"""
dist_mat = dist_mat.cpu().numpy()
m, n = dist_mat.shape
query_ids = np.asarray(query_ids)
gallery_ids = np.asarray(gallery_ids)
query_cams = np.asarray(query_cams)
gallery_cams = np.asarray(gallery_cams)
# Sort and find correct matches
indices = np.argsort(dist_mat, axis=1)
matches = (gallery_ids[indices] == query_ids[:, np.newaxis])
# Compute CMC for each query
ret = np.zeros(topk)
num_valid_queries = 0
for i in range(m):
# Filter out the same id and same camera
valid = ((gallery_ids[indices[i]] != query_ids[i]) |
(gallery_cams[indices[i]] != query_cams[i]))
if separate_camera_set:
# Filter out samples from same camera
valid &= (gallery_cams[indices[i]] != query_cams[i])
if not np.any(matches[i, valid]): continue
if single_gallery_shot:
repeat = 10
gids = gallery_ids[indices[i][valid]]
inds = np.where(valid)[0]
ids_dict = defaultdict(list)
for j, x in zip(inds, gids):
ids_dict[x].append(j)
else:
repeat = 1
for _ in range(repeat):
if single_gallery_shot:
# Randomly choose one instance for each id
sampled = (valid & unique_sample(ids_dict, len(valid)))
index = np.nonzero(matches[i, sampled])[0]
else:
index = np.nonzero(matches[i, valid])[0]
delta = 1. / (len(index) * repeat)
for j, k in enumerate(index):
if k - j >= topk: break
if first_match_break:
ret[k - j] += 1
break
ret[k - j] += delta
num_valid_queries += 1
if num_valid_queries == 0:
raise RuntimeError("No valid query")
return ret.cumsum() / num_valid_queries |
Compute mean average precision (mAP) | def mean_ap(dist_mat, query_ids, gallery_ids, query_cams, gallery_cams):
"""Compute mean average precision (mAP)"""
dist_mat = dist_mat.cpu().numpy()
m, n = dist_mat.shape
query_ids = np.asarray(query_ids)
gallery_ids = np.asarray(gallery_ids)
query_cams = np.asarray(query_cams)
gallery_cams = np.asarray(gallery_cams)
# Sort and find correct matches
indices = np.argsort(dist_mat, axis=1)
matches = (gallery_ids[indices] == query_ids[:, np.newaxis])
# Compute AP for each query
aps = []
for i in range(m):
# Filter out the same id and same camera
valid = ((gallery_ids[indices[i]] != query_ids[i]) |
(gallery_cams[indices[i]] != query_cams[i]))
y_true = matches[i, valid]
y_score = -dist_mat[i][indices[i]][valid]
if not np.any(y_true): continue
aps.append(average_precision_score(y_true, y_score))
if len(aps) == 0:
raise RuntimeError("No valid query")
return np.mean(aps) |
Perform re-ranking with distance matrix between query and gallery images `q_g_dist`, distance matrix between
query and query images `q_q_dist` and distance matrix between gallery and gallery images `g_g_dist`. | def re_ranking(q_g_dist, q_q_dist, g_g_dist, k1=20, k2=6, lambda_value=0.3):
"""Perform re-ranking with distance matrix between query and gallery images `q_g_dist`, distance matrix between
query and query images `q_q_dist` and distance matrix between gallery and gallery images `g_g_dist`.
"""
q_g_dist = q_g_dist.cpu().numpy()
q_q_dist = q_q_dist.cpu().numpy()
g_g_dist = g_g_dist.cpu().numpy()
original_dist = np.concatenate(
[np.concatenate([q_q_dist, q_g_dist], axis=1),
np.concatenate([q_g_dist.T, g_g_dist], axis=1)],
axis=0)
original_dist = np.power(original_dist, 2).astype(np.float32)
original_dist = np.transpose(1. * original_dist / np.max(original_dist, axis=0))
V = np.zeros_like(original_dist).astype(np.float32)
initial_rank = np.argsort(original_dist).astype(np.int32)
query_num = q_g_dist.shape[0]
gallery_num = q_g_dist.shape[0] + q_g_dist.shape[1]
all_num = gallery_num
for i in range(all_num):
# k-reciprocal neighbors
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 / 2.)) + 1]
candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,
:int(np.around(k1 / 2.)) + 1]
fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2. / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = 1. * weight / np.sum(weight)
original_dist = original_dist[:query_num, ]
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float32)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(gallery_num):
invIndex.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float32)
for i in range(query_num):
temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float32)
indNonZero = np.where(V[i, :] != 0)[0]
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2. - temp_min)
final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
del original_dist
del V
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
return final_dist |
Extract feature for person ReID. If `normalize` is True, `cosine` distance will be employed as distance
metric, otherwise `euclidean` distance. | def extract_reid_feature(data_loader, model, device, normalize, print_freq=200):
"""Extract feature for person ReID. If `normalize` is True, `cosine` distance will be employed as distance
metric, otherwise `euclidean` distance.
"""
batch_time = AverageMeter('Time', ':6.3f')
progress = ProgressMeter(
len(data_loader),
[batch_time],
prefix='Collect feature: ')
# switch to eval mode
model.eval()
feature_dict = dict()
with torch.no_grad():
end = time.time()
for i, (images_batch, filenames_batch, _, _) in enumerate(data_loader):
images_batch = images_batch.to(device)
features_batch = model(images_batch)
if normalize:
features_batch = F.normalize(features_batch)
for filename, feature in zip(filenames_batch, features_batch):
feature_dict[filename] = feature
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
progress.display(i)
return feature_dict |
Compute pairwise distance between two sets of features | def pairwise_distance(feature_dict, query, gallery):
"""Compute pairwise distance between two sets of features"""
# concat features and convert to pytorch tensor
# we compute pairwise distance metric on cpu because it may require a large amount of GPU memory, if you are using
# gpu with a larger capacity, it's faster to calculate on gpu
x = torch.cat([feature_dict[f].unsqueeze(0) for f, _, _ in query], dim=0).cpu()
y = torch.cat([feature_dict[f].unsqueeze(0) for f, _, _ in gallery], dim=0).cpu()
m, n = x.size(0), y.size(0)
# flatten
x = x.view(m, -1)
y = y.view(n, -1)
# compute dist_mat
dist_mat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t() - \
2 * torch.matmul(x, y.t())
return dist_mat |
Compute CMC score, mAP and return | def evaluate_all(dist_mat, query, gallery, cmc_topk=(1, 5, 10), cmc_flag=False):
"""Compute CMC score, mAP and return"""
query_ids = [pid for _, pid, _ in query]
gallery_ids = [pid for _, pid, _ in gallery]
query_cams = [cid for _, _, cid in query]
gallery_cams = [cid for _, _, cid in gallery]
# Compute mean AP
mAP = mean_ap(dist_mat, query_ids, gallery_ids, query_cams, gallery_cams)
print('Mean AP: {:4.1%}'.format(mAP))
if not cmc_flag:
return mAP
cmc_configs = {
'config': dict(separate_camera_set=False, single_gallery_shot=False, first_match_break=True)
}
cmc_scores = {name: cmc(dist_mat, query_ids, gallery_ids, query_cams, gallery_cams, **params) for name, params in
cmc_configs.items()}
print('CMC Scores:')
for k in cmc_topk:
print(' top-{:<4}{:12.1%}'.format(k, cmc_scores['config'][k - 1]))
return cmc_scores['config'][0], mAP |
Visualize ranker results. We first compute pair-wise distance between query images and gallery images. Then for
every query image, `topk` gallery images with least distance between given query image are selected. We plot the
query image and selected gallery images together. A green border denotes a match, and a red one denotes a mis-match. | def visualize_ranked_results(data_loader, model, query, gallery, device, visualize_dir, criterion='cosine',
rerank=False, width=128, height=256, topk=10):
"""Visualize ranker results. We first compute pair-wise distance between query images and gallery images. Then for
every query image, `topk` gallery images with least distance between given query image are selected. We plot the
query image and selected gallery images together. A green border denotes a match, and a red one denotes a mis-match.
"""
assert criterion in ['cosine', 'euclidean']
normalize = (criterion == 'cosine')
# compute pairwise distance matrix
feature_dict = extract_reid_feature(data_loader, model, device, normalize)
dist_mat = pairwise_distance(feature_dict, query, gallery)
if rerank:
dist_mat_query = pairwise_distance(feature_dict, query, query)
dist_mat_gallery = pairwise_distance(feature_dict, gallery, gallery)
dist_mat = re_ranking(dist_mat, dist_mat_query, dist_mat_gallery)
# make dir if not exists
os.makedirs(visualize_dir, exist_ok=True)
dist_mat = dist_mat.numpy()
num_q, num_g = dist_mat.shape
print('query images: {}'.format(num_q))
print('gallery images: {}'.format(num_g))
assert num_q == len(query)
assert num_g == len(gallery)
# start visualizing
import cv2
sorted_idxes = np.argsort(dist_mat, axis=1)
for q_idx in range(num_q):
q_img_path, q_pid, q_cid = query[q_idx]
q_img = cv2.imread(q_img_path)
q_img = cv2.resize(q_img, (width, height))
# use black border to denote query image
q_img = cv2.copyMakeBorder(
q_img, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0)
)
q_img = cv2.resize(q_img, (width, height))
num_cols = topk + 1
grid_img = 255 * np.ones(
(height, num_cols * width + topk * GRID_SPACING + QUERY_EXTRA_SPACING, 3), dtype=np.uint8
)
grid_img[:, :width, :] = q_img
# collect top-k gallery images with smallest distance
rank_idx = 1
for g_idx in sorted_idxes[q_idx, :]:
g_img_path, g_pid, g_cid = gallery[g_idx]
invalid = (q_pid == g_pid) & (q_cid == g_cid)
if not invalid:
matched = (g_pid == q_pid)
border_color = GREEN if matched else RED
g_img = cv2.imread(g_img_path)
g_img = cv2.resize(g_img, (width, height))
g_img = cv2.copyMakeBorder(
g_img, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color
)
g_img = cv2.resize(g_img, (width, height))
start = rank_idx * width + rank_idx * GRID_SPACING + QUERY_EXTRA_SPACING
end = (rank_idx + 1) * width + rank_idx * GRID_SPACING + QUERY_EXTRA_SPACING
grid_img[:, start:end, :] = g_img
rank_idx += 1
if rank_idx > topk:
break
save_path = osp.basename(osp.splitext(q_img_path)[0])
cv2.imwrite(osp.join(visualize_dir, save_path + '.jpg'), grid_img)
if (q_idx + 1) % 100 == 0:
print('Visualize {}/{}'.format(q_idx + 1, num_q))
print('Visualization process is done, ranked results are saved to {}'.format(visualize_dir)) |
Computes the accuracy for binary classification | def binary_accuracy(output: torch.Tensor, target: torch.Tensor) -> float:
"""Computes the accuracy for binary classification"""
with torch.no_grad():
batch_size = target.size(0)
pred = (output >= 0.5).float().t().view(-1)
correct = pred.eq(target.view(-1)).float().sum()
correct.mul_(100. / batch_size)
return correct |
Computes the accuracy over the k top predictions for the specified values of k
Args:
output (tensor): Classification outputs, :math:`(N, C)` where `C = number of classes`
target (tensor): :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`
topk (sequence[int]): A list of top-N number.
Returns:
Top-N accuracies (N :math:`\in` topK). | def accuracy(output, target, topk=(1,)):
r"""
Computes the accuracy over the k top predictions for the specified values of k
Args:
output (tensor): Classification outputs, :math:`(N, C)` where `C = number of classes`
target (tensor): :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`
topk (sequence[int]): A list of top-N number.
Returns:
Top-N accuracies (N :math:`\in` topK).
"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res |
Download file from internet url link.
Args:
root (str) The directory to put downloaded files.
file_name: (str) The name of the unzipped file.
archive_name: (str) The name of archive(zipped file) downloaded.
url_link: (str) The url link to download data.
.. note::
If `file_name` already exists under path `root`, then it is not downloaded again.
Else `archive_name` will be downloaded from `url_link` and extracted to `file_name`. | def download(root: str, file_name: str, archive_name: str, url_link: str):
"""
Download file from internet url link.
Args:
root (str) The directory to put downloaded files.
file_name: (str) The name of the unzipped file.
archive_name: (str) The name of archive(zipped file) downloaded.
url_link: (str) The url link to download data.
.. note::
If `file_name` already exists under path `root`, then it is not downloaded again.
Else `archive_name` will be downloaded from `url_link` and extracted to `file_name`.
"""
if not os.path.exists(os.path.join(root, file_name)):
print("Downloading {}".format(file_name))
# if os.path.exists(os.path.join(root, archive_name)):
# os.remove(os.path.join(root, archive_name))
try:
download_and_extract_archive(url_link, download_root=root, filename=archive_name, remove_finished=False)
except Exception:
print("Fail to download {} from url link {}".format(archive_name, url_link))
print('Please check you internet connection.'
"Simply trying again may be fine.")
exit(0) |
Check whether `file_name` exists under directory `root`. | def check_exits(root: str, file_name: str):
"""Check whether `file_name` exists under directory `root`. """
if not os.path.exists(os.path.join(root, file_name)):
print("Dataset directory {} not found under {}".format(file_name, root))
exit(-1) |
Read data from file and convert each line into an element in the list | def read_list_from_file(file_name: str) -> List[str]:
"""Read data from file and convert each line into an element in the list"""
result = []
with open(file_name, "r") as f:
for line in f.readlines():
result.append(line.strip())
return result |
Project 3D coordinates into image space. | def projectPoints(xyz, K):
""" Project 3D coordinates into image space. """
xyz = np.array(xyz)
K = np.array(K)
uv = np.matmul(K, xyz.T).T
return uv[:, :2] / uv[:, -1:] |
Hardcoded size of the datasets. | def db_size(set_name):
""" Hardcoded size of the datasets. """
if set_name == 'training':
return 32560 # number of unique samples (they exists in multiple 'versions')
elif set_name == 'evaluation':
return 3960
else:
assert 0, 'Invalid choice.' |
Generate heatamap for joints.
Args:
joints: (K, 2)
joints_vis: (K, 1)
heatmap_size: W, H
sigma:
image_size:
Returns: | def generate_target(joints, joints_vis, heatmap_size, sigma, image_size):
"""Generate heatamap for joints.
Args:
joints: (K, 2)
joints_vis: (K, 1)
heatmap_size: W, H
sigma:
image_size:
Returns:
"""
num_joints = joints.shape[0]
target_weight = np.ones((num_joints, 1), dtype=np.float32)
target_weight[:, 0] = joints_vis[:, 0]
target = np.zeros((num_joints,
heatmap_size[1],
heatmap_size[0]),
dtype=np.float32)
tmp_size = sigma * 3
image_size = np.array(image_size)
heatmap_size = np.array(heatmap_size)
for joint_id in range(num_joints):
feat_stride = image_size / heatmap_size
mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)
mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)
# Check that any part of the gaussian is in-bounds
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if mu_x >= heatmap_size[0] or mu_y >= heatmap_size[1] \
or mu_x < 0 or mu_y < 0:
# If not, just return the image as is
target_weight[joint_id] = 0
continue
# Generate gaussian
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], heatmap_size[0]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], heatmap_size[1]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], heatmap_size[0])
img_y = max(0, ul[1]), min(br[1], heatmap_size[1])
v = target_weight[joint_id]
if v > 0.5:
target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return target, target_weight |
Convert 2D keypoints to 3D keypoints | def keypoint2d_to_3d(keypoint2d: np.ndarray, intrinsic_matrix: np.ndarray, Zc: np.ndarray):
"""Convert 2D keypoints to 3D keypoints"""
uv1 = np.concatenate([np.copy(keypoint2d), np.ones((keypoint2d.shape[0], 1))], axis=1).T * Zc # 3 x NUM_KEYPOINTS
xyz = np.matmul(np.linalg.inv(intrinsic_matrix), uv1).T # NUM_KEYPOINTS x 3
return xyz |
Convert 3D keypoints to 2D keypoints | def keypoint3d_to_2d(keypoint3d: np.ndarray, intrinsic_matrix: np.ndarray):
"""Convert 3D keypoints to 2D keypoints"""
keypoint2d = np.matmul(intrinsic_matrix, keypoint3d.T).T # NUM_KEYPOINTS x 3
keypoint2d = keypoint2d[:, :2] / keypoint2d[:, 2:3] # NUM_KEYPOINTS x 2
return keypoint2d |
Change `box` to a square box.
The side with of the square box will be `scale` * max(w, h)
where w and h is the width and height of the origin box | def scale_box(box, image_width, image_height, scale):
"""
Change `box` to a square box.
The side with of the square box will be `scale` * max(w, h)
where w and h is the width and height of the origin box
"""
left, upper, right, lower = box
center_x, center_y = (left + right) / 2, (upper + lower) / 2
w, h = right - left, lower - upper
side_with = min(round(scale * max(w, h)), min(image_width, image_height))
left = round(center_x - side_with / 2)
right = left + side_with - 1
upper = round(center_y - side_with / 2)
lower = upper + side_with - 1
if left < 0:
left = 0
right = side_with - 1
if right >= image_width:
right = image_width - 1
left = image_width - side_with
if upper < 0:
upper = 0
lower = side_with -1
if lower >= image_height:
lower = image_height - 1
upper = image_height - side_with
return left, upper, right, lower |
Get the bounding box for keypoints | def get_bounding_box(keypoint2d: np.array):
"""Get the bounding box for keypoints"""
left = np.min(keypoint2d[:, 0])
right = np.max(keypoint2d[:, 0])
upper = np.min(keypoint2d[:, 1])
lower = np.max(keypoint2d[:, 1])
return left, upper, right, lower |
Load Pascal VOC detection annotations to Detectron2 format.
Args:
dirname: Contain "Annotations", "ImageSets", "JPEGImages"
split (str): one of "train", "test", "val", "trainval"
class_names: list or tuple of class names | def load_voc_instances(dirname: str, split: str, class_names, ext='.jpg', bbox_zero_based=False):
"""
Load Pascal VOC detection annotations to Detectron2 format.
Args:
dirname: Contain "Annotations", "ImageSets", "JPEGImages"
split (str): one of "train", "test", "val", "trainval"
class_names: list or tuple of class names
"""
with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f:
fileids = np.loadtxt(f, dtype=np.str)
# Needs to read many small annotation files. Makes sense at local
annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/"))
dicts = []
skip_classes = set()
for fileid in fileids:
anno_file = os.path.join(annotation_dirname, fileid + ".xml")
jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ext)
with PathManager.open(anno_file) as f:
tree = ET.parse(f)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
if cls not in class_names:
skip_classes.add(cls)
continue
# We include "difficult" samples in training.
# Based on limited experiments, they don't hurt accuracy.
# difficult = int(obj.find("difficult").text)
# if difficult == 1:
# continue
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
# Original annotations are integers in the range [1, W or H]
# Assuming they mean 1-based pixel indices (inclusive),
# a box with annotation (xmin=1, xmax=W) covers the whole image.
# In coordinate space this is represented by (xmin=0, xmax=W)
if bbox_zero_based is False:
bbox[0] -= 1.0
bbox[1] -= 1.0
instances.append(
{"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
dicts.append(r)
print("Skip classes:", list(skip_classes))
return dicts |
Convert a dataset into its open-set version.
In other words, those samples which doesn't belong to `private_classes` will be marked as "unknown".
Be aware that `open_set` will change the label number of each category.
Args:
dataset_class (class): Dataset class. Only subclass of ``ImageList`` can be open-set.
public_classes (sequence[str]): A sequence of which categories need to be kept in the open-set dataset. Each element of `public_classes` must belong to the `classes` list of `dataset_class`.
private_classes (sequence[str], optional): A sequence of which categories need to be marked as "unknown" in the open-set dataset. Each element of `private_classes` must belong to the `classes` list of `dataset_class`. Default: ().
Examples::
>>> public_classes = ['back_pack', 'bike', 'calculator', 'headphones', 'keyboard']
>>> private_classes = ['laptop_computer', 'monitor', 'mouse', 'mug', 'projector']
>>> # create a open-set dataset class which has classes
>>> # 'back_pack', 'bike', 'calculator', 'headphones', 'keyboard' and 'unknown'.
>>> OpenSetOffice31 = open_set(Office31, public_classes, private_classes)
>>> # create an instance of the open-set dataset
>>> dataset = OpenSetDataset(root="data/office31", task="A") | def open_set(dataset_class: ClassVar, public_classes: Sequence[str],
private_classes: Optional[Sequence[str]] = ()) -> ClassVar:
"""
Convert a dataset into its open-set version.
In other words, those samples which doesn't belong to `private_classes` will be marked as "unknown".
Be aware that `open_set` will change the label number of each category.
Args:
dataset_class (class): Dataset class. Only subclass of ``ImageList`` can be open-set.
public_classes (sequence[str]): A sequence of which categories need to be kept in the open-set dataset.\
Each element of `public_classes` must belong to the `classes` list of `dataset_class`.
private_classes (sequence[str], optional): A sequence of which categories need to be marked as "unknown" \
in the open-set dataset. Each element of `private_classes` must belong to the `classes` list of \
`dataset_class`. Default: ().
Examples::
>>> public_classes = ['back_pack', 'bike', 'calculator', 'headphones', 'keyboard']
>>> private_classes = ['laptop_computer', 'monitor', 'mouse', 'mug', 'projector']
>>> # create a open-set dataset class which has classes
>>> # 'back_pack', 'bike', 'calculator', 'headphones', 'keyboard' and 'unknown'.
>>> OpenSetOffice31 = open_set(Office31, public_classes, private_classes)
>>> # create an instance of the open-set dataset
>>> dataset = OpenSetDataset(root="data/office31", task="A")
"""
if not (issubclass(dataset_class, ImageList)):
raise Exception("Only subclass of ImageList can be openset")
class OpenSetDataset(dataset_class):
def __init__(self, **kwargs):
super(OpenSetDataset, self).__init__(**kwargs)
samples = []
all_classes = list(deepcopy(public_classes)) + ["unknown"]
for (path, label) in self.samples:
class_name = self.classes[label]
if class_name in public_classes:
samples.append((path, all_classes.index(class_name)))
elif class_name in private_classes:
samples.append((path, all_classes.index("unknown")))
self.samples = samples
self.classes = all_classes
self.class_to_idx = {cls: idx
for idx, cls in enumerate(self.classes)}
return OpenSetDataset |
Default open-set used in some paper.
Args:
dataset_class (class): Dataset class. Currently, dataset_class must be one of
:class:`~tllib.vision.datasets.office31.Office31`, :class:`~tllib.vision.datasets.officehome.OfficeHome`,
:class:`~tllib.vision.datasets.visda2017.VisDA2017`,
source (bool): Whether the dataset is used for source domain or not. | def default_open_set(dataset_class: ClassVar, source: bool) -> ClassVar:
"""
Default open-set used in some paper.
Args:
dataset_class (class): Dataset class. Currently, dataset_class must be one of
:class:`~tllib.vision.datasets.office31.Office31`, :class:`~tllib.vision.datasets.officehome.OfficeHome`,
:class:`~tllib.vision.datasets.visda2017.VisDA2017`,
source (bool): Whether the dataset is used for source domain or not.
"""
if dataset_class == Office31:
public_classes = Office31.CLASSES[:20]
if source:
private_classes = ()
else:
private_classes = Office31.CLASSES[20:]
elif dataset_class == OfficeHome:
public_classes = sorted(OfficeHome.CLASSES)[:25]
if source:
private_classes = ()
else:
private_classes = sorted(OfficeHome.CLASSES)[25:]
elif dataset_class == VisDA2017:
public_classes = ('bicycle', 'bus', 'car', 'motorcycle', 'train', 'truck')
if source:
private_classes = ()
else:
private_classes = ('aeroplane', 'horse', 'knife', 'person', 'plant', 'skateboard')
else:
raise NotImplementedError("Unknown openset domain adaptation dataset: {}".format(dataset_class.__name__))
return open_set(dataset_class, public_classes, private_classes) |
Convert a dataset into its partial version.
In other words, those samples which doesn't belong to `partial_classes` will be discarded.
Yet `partial` will not change the label space of `dataset_class`.
Args:
dataset_class (class): Dataset class. Only subclass of ``ImageList`` can be partial.
partial_classes (sequence[str]): A sequence of which categories need to be kept in the partial dataset. Each element of `partial_classes` must belong to the `classes` list of `dataset_class`.
Examples::
>>> partial_classes = ['back_pack', 'bike', 'calculator', 'headphones', 'keyboard']
>>> # create a partial dataset class
>>> PartialOffice31 = partial(Office31, partial_classes)
>>> # create an instance of the partial dataset
>>> dataset = PartialDataset(root="data/office31", task="A") | def partial(dataset_class: ClassVar, partial_classes: Sequence[str]) -> ClassVar:
"""
Convert a dataset into its partial version.
In other words, those samples which doesn't belong to `partial_classes` will be discarded.
Yet `partial` will not change the label space of `dataset_class`.
Args:
dataset_class (class): Dataset class. Only subclass of ``ImageList`` can be partial.
partial_classes (sequence[str]): A sequence of which categories need to be kept in the partial dataset.\
Each element of `partial_classes` must belong to the `classes` list of `dataset_class`.
Examples::
>>> partial_classes = ['back_pack', 'bike', 'calculator', 'headphones', 'keyboard']
>>> # create a partial dataset class
>>> PartialOffice31 = partial(Office31, partial_classes)
>>> # create an instance of the partial dataset
>>> dataset = PartialDataset(root="data/office31", task="A")
"""
if not (issubclass(dataset_class, ImageList)):
raise Exception("Only subclass of ImageList can be partial")
class PartialDataset(dataset_class):
def __init__(self, **kwargs):
super(PartialDataset, self).__init__(**kwargs)
assert all([c in self.classes for c in partial_classes])
samples = []
for (path, label) in self.samples:
class_name = self.classes[label]
if class_name in partial_classes:
samples.append((path, label))
self.samples = samples
self.partial_classes = partial_classes
self.partial_classes_idx = [self.class_to_idx[c] for c in partial_classes]
return PartialDataset |
Default partial used in some paper.
Args:
dataset_class (class): Dataset class. Currently, dataset_class must be one of
:class:`~tllib.vision.datasets.office31.Office31`, :class:`~tllib.vision.datasets.officehome.OfficeHome`,
:class:`~tllib.vision.datasets.visda2017.VisDA2017`,
:class:`~tllib.vision.datasets.partial.imagenet_caltech.ImageNetCaltech`
and :class:`~tllib.vision.datasets.partial.caltech_imagenet.CaltechImageNet`. | def default_partial(dataset_class: ClassVar) -> ClassVar:
"""
Default partial used in some paper.
Args:
dataset_class (class): Dataset class. Currently, dataset_class must be one of
:class:`~tllib.vision.datasets.office31.Office31`, :class:`~tllib.vision.datasets.officehome.OfficeHome`,
:class:`~tllib.vision.datasets.visda2017.VisDA2017`,
:class:`~tllib.vision.datasets.partial.imagenet_caltech.ImageNetCaltech`
and :class:`~tllib.vision.datasets.partial.caltech_imagenet.CaltechImageNet`.
"""
if dataset_class == Office31:
kept_classes = OfficeCaltech.CLASSES
elif dataset_class == OfficeHome:
kept_classes = sorted(OfficeHome.CLASSES)[:25]
elif dataset_class == VisDA2017:
kept_classes = sorted(VisDA2017.CLASSES)[:6]
elif dataset_class in [ImageNetCaltech, CaltechImageNet]:
kept_classes = dataset_class.CLASSES
else:
raise NotImplementedError("Unknown partial domain adaptation dataset: {}".format(dataset_class.__name__))
return partial(dataset_class, kept_classes) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.