python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import torch
from transformers import (
PreTrainedModel,
PretrainedConfig,
AutoConfig,
AutoModel,
)
from transformers.modeling_outputs import BaseModelOutputWithPooling
class SwagConfig(PretrainedConfig):
model_type = "swag"
def __init__(
self,
config_name="vit_b16",
hidden_size=768,
**kwargs
):
super().__init__(**kwargs)
self.config_name = config_name
self.hidden_size = hidden_size
AutoConfig.register("swag", SwagConfig)
class SwagModel(PreTrainedModel):
config_class = SwagConfig
@classmethod
def from_orig_pretrained(cls, config_name):
swag = torch.hub.load("facebookresearch/swag", model=config_name)
config = SwagConfig(config_name=config_name, hidden_size=swag.hidden_dim)
model = SwagModel(config)
model.swag = swag
return model
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
import os
ckpt_path = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin")
state_dict = torch.load(os.path.join(ckpt_path))
config = AutoConfig.from_pretrained(pretrained_model_name_or_path)
model = SwagModel(config)
model.load_state_dict(state_dict, strict=True)
return model
def __init__(self, config):
super().__init__(config)
self.config = config
self.swag = torch.hub.load("facebookresearch/swag", model=config.config_name)
self.post_init()
def _init_weights(self, module):
self.swag.init_weights() # check existence.
def forward(
self,
pixel_values=None,
# attention_mask=None,
# head_mask=None,
output_attentions=None,
output_hidden_states=None,
# interpolate_pos_encoding=None,
return_dict=None
):
# https://github.com/rwightman/pytorch-image-models/blob/e0c4eec4b66dc14ae96097c7b4a7ef2af45ba309/timm/models/vision_transformer.py#L358
# pre_logits is nn.Identity and token means from CLS [:, 0]
sequence_output = self.swag(pixel_values)
pooled_output = sequence_output
if not return_dict:
return (sequence_output, pooled_output)
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=None, # encoder_outputs.hidden_states,
attentions=None, # encoder_outputs.attentions,
)
AutoModel.register(SwagConfig, SwagModel)
if __name__ == '__main__':
# dump this model for AutoModel: `python -m hfmodels.swag`
models = ["vit_b16", "vit_l16", "vit_h14"]
for model in models:
vision_model = SwagModel.from_orig_pretrained(model)
vision_model.save_pretrained(f"pretrained_models/{model}_swag_hf")
|
CiT-main
|
hfmodels/swag.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from .moco import MoCoModel, MoCoConfig
from .augreg import AugRegModel, AugRegConfig
from .swag import SwagModel, SwagConfig
|
CiT-main
|
hfmodels/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import torch
import sys
sys.path.append("moco-v3") # repo path to moco-v3
from transformers import (
PreTrainedModel,
PretrainedConfig,
AutoConfig,
AutoModel,
)
from torch import nn
from transformers.modeling_outputs import BaseModelOutputWithPooling
from vits import vit_base
from functools import partial
from moco.builder import MoCo_ViT
from collections import OrderedDict
class MoCoConfig(PretrainedConfig):
"""
refer `https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/configuration_vit.py#L29`
`model_type` only has three choices.
https://github.com/huggingface/transformers/blob/05fa1a7ac17bb7aa07b9e0c1e138ecb31a28bbfe/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py#L94
how to make sure `hidden_size` match checkpoint ?
"""
model_type = "moco"
def __init__(
self,
config_name="vit_base_patch16",
hidden_size=256,
**kwargs
):
super().__init__(**kwargs)
self.config_name = config_name
self.hidden_size = hidden_size
AutoConfig.register("moco", MoCoConfig)
class MoCoModel(PreTrainedModel):
config_class = MoCoConfig
@classmethod
def from_orig_pretrained(cls, ckpt_dir):
"""load from original checkpoint; used to save a HF checkpoint, see main."""
config = MoCoConfig(hidden_size=256)
model = MoCoModel(config)
print("loading weights from", ckpt_dir)
ckpt = torch.load(ckpt_dir, map_location='cpu')
state_dict = OrderedDict()
for k, v in ckpt['state_dict'].items():
k = k.replace('module.', '')
for prefix in ["momentum_encoder", "predictor"]:
if k.startswith(prefix):
break
else:
state_dict[k.replace("base_encoder.", "")] = v
model.moco.load_state_dict(state_dict, strict=True)
model.eval()
return model
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
import os
ckpt_path = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin")
state_dict = torch.load(os.path.join(ckpt_path))
config = AutoConfig.from_pretrained(pretrained_model_name_or_path)
model = MoCoModel(config)
model.load_state_dict(state_dict, strict=True)
return model
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.moco = MoCo_ViT(
partial(vit_base, stop_grad_conv1=True),
256, 4096, 0.2
).base_encoder
self.post_init()
def _init_weights(self, m):
# borrowed from mae
if isinstance(m, nn.Linear):
# we use xavier_uniform following official JAX ViT:
torch.nn.init.xavier_uniform_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(
self,
pixel_values=None,
# attention_mask=None,
# head_mask=None,
output_attentions=None,
output_hidden_states=None,
# interpolate_pos_encoding=None,
return_dict=None
):
encoder_outputs = self.moco(pixel_values)
sequence_output = encoder_outputs.unsqueeze(1)
pooled_output = encoder_outputs
if not return_dict:
return (sequence_output, pooled_output) # + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=encoder_outputs,
hidden_states=None, # encoder_outputs.hidden_states,
attentions=None, # encoder_outputs.attentions,
)
AutoModel.register(MoCoConfig, MoCoModel)
if __name__ == '__main__':
# dump this model for AutoModel: `python -m hfmodels.moco`
vision_model = MoCoModel.from_orig_pretrained("pretrained_models/moco/vit-b-300ep.pth.tar")
vision_model.save_pretrained("pretrained_models/moco_hf")
|
CiT-main
|
hfmodels/moco.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import numpy as np
import pickle
import re
import time
import sqlite3
import webdataset as wds
from urllib.parse import unquote
from tqdm import tqdm
# Borrowed from SLIP but add tag field to be consistent with LiT: https://lbsn.vgiscience.org/yfcc-introduction/
def to_pkl():
cleanhtml = re.compile('<a.*?>|</a>|<b>|</b>|<i>|</i>')
cleanurl = re.compile('http\S+|www\S+')
print('=> loading YFCC image ids')
image_ids = np.load('data/yfcc15m/flickr_unique_ids.npy')
image_ids = set(image_ids)
print('=> loading CLIP image ids')
print('=> collecting and cleaning subset captions')
captioned = []
valid_image_ids = []
with open('/datasets01/yfcc100m/090517/yfcc100m_dataset.txt') as f:
for l in tqdm(f):
row = l.strip().split('\t')
if int(row[0]) in image_ids:
title = unquote(row[8]).replace('+', ' ')
title = re.sub(cleanhtml, '', title)
title = re.sub(cleanurl, '', title).strip()
desc = unquote(row[9]).replace('+', ' ')
desc = re.sub(cleanhtml, '', desc)
desc = re.sub(cleanurl, '', desc).strip()
tag = ",".join([row[10].strip(), row[11].strip()])
tag = unquote(tag).replace('+', ' ')
tag = re.sub(cleanhtml, '', tag)
tag = re.sub(cleanurl, '', tag).strip()
if any([len(title) > 0, len(desc) > 0, len(tag) > 0]):
captioned.append((int(row[0]), title, desc, tag))
valid_image_ids.append(int(row[0]))
with open('data/yfcc100m/yfcc100m_captioned_w_tag.pkl', 'wb') as f:
pickle.dump(captioned, f)
with open('data/yfcc100m/yfcc100m_image_ids.pkl', 'wb') as f:
pickle.dump(valid_image_ids, f)
print('Total captioned images:', len(captioned)) # 94514285
def write_json():
with open('data/yfcc100m/yfcc100m_captioned_w_tag.pkl', 'rb') as f:
captioned = pickle.load(f)
from collections import defaultdict
repos = defaultdict(dict)
for idx, (image_id, title, desc, tag) in enumerate(captioned):
index = format(image_id, "0>8d")
repo = index[:2]
z = index[2: 5]
repos[f"{str(repo).zfill(2)}_{str(z).zfill(3)}"][str(image_id).zfill(8)] = {"title": title, "desc": desc, "tag": tag}
import json
from pathlib import Path
for repo in repos:
_repo, z = repo.split("_")
Path(f"data/yfcc100m/yfcc100m_captioned_w_tag/{_repo}").mkdir(parents=True, exist_ok=True)
with open(f"data/yfcc100m/yfcc100m_captioned_w_tag/{_repo}/{z}.json", "w") as fw:
json.dump(repos[repo], fw)
to_pkl()
write_json()
|
CiT-main
|
scripts/make_yfcc100m_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import numpy as np
import pickle
import re
from urllib.parse import unquote
from tqdm import tqdm
# Borrowed from SLIP but add tag field to be consistent with LiT: https://lbsn.vgiscience.org/yfcc-introduction/
cleanhtml = re.compile('<a.*?>|</a>|<b>|</b>|<i>|</i>')
cleanurl = re.compile('http\S+|www\S+')
print('=> loading YFCC image ids')
image_ids = np.load('data/yfcc15m/flickr_unique_ids.npy')
image_ids = set(image_ids)
print('=> loading CLIP image ids')
clip_ids = set()
with open('data/yfcc15m/yfcc100m_subset_data.tsv') as f:
for l in tqdm(f.readlines()):
row = l.strip().split('\t')
clip_ids.add(int(row[0]))
print('=> collecting and cleaning subset captions')
captioned = []
with open('/datasets01/yfcc100m/090517/yfcc100m_dataset.txt') as f:
for l in tqdm(f):
row = l.strip().split('\t')
if int(row[0]) in image_ids:
if int(row[0]) in clip_ids:
title = unquote(row[8]).replace('+', ' ')
title = re.sub(cleanhtml, '', title)
title = re.sub(cleanurl, '', title)
desc = unquote(row[9]).replace('+', ' ')
desc = re.sub(cleanhtml, '', desc)
desc = re.sub(cleanurl, '', desc)
tag = ",".join([row[10].strip(), row[11].strip()])
tag = unquote(tag).replace('+', ' ')
tag = re.sub(cleanhtml, '', tag)
tag = re.sub(cleanurl, '', tag)
captioned.append((int(row[0]), title, desc, tag))
with open('data/yfcc15m/yfcc15m_w_tag.pkl', 'wb') as f:
pickle.dump(captioned, f)
print('Total captioned images:', len(captioned)) # 14689580
|
CiT-main
|
scripts/make_yfcc15m_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import json
import os
import pickle
import zipfile
import numpy as np
import torch
import random
from PIL import Image, ImageFile
from torchvision import datasets as t_datasets
ImageFile.LOAD_TRUNCATED_IMAGES = True
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def yfcc_loader(root, index):
index = format(index, "0>8d")
repo = index[:2]
z = index[2: 5]
file_img = index[5:] + '.jpg'
path_zip = os.path.join(root, 'images', repo, z) + '.zip'
with zipfile.ZipFile(path_zip, 'r') as myzip:
img = Image.open(myzip.open(file_img))
return img.convert('RGB')
def aug_tag(tag):
delims = [" ", ",", ";", "/", "\n"]
delim = random.choice(delims)[0]
segs = [seg.strip() for seg in tag.split(",") if len(seg.strip()) > 0]
random.shuffle(segs)
tag = delim.join(segs)
return tag
class ImageCaptionDatasetBase(torch.utils.data.Dataset):
def __init__(self, args, dataset, root, metadata, task_example_ids=None, with_vision=True, with_text=True, max_sample=None):
self.with_vision = with_vision
self.with_text = with_text
self.dataset = dataset
self.root = root
if hasattr(args, "aug_tag"):
self.aug_tag = args.aug_tag
if self.dataset in ["yfcc100m_tag"]:
self.json_root = os.path.join(os.path.dirname(metadata), "yfcc100m_captioned_w_tag")
self.samples = []
if task_example_ids is not None:
if isinstance(task_example_ids, list):
self.samples.extend(task_example_ids)
else:
self.samples.extend(list(task_example_ids))
print(f"apply task filter with {len(self.samples)} examples.")
else:
with open(metadata, 'rb') as f:
samples = pickle.load(f)
self.samples.extend(samples)
if max_sample is not None and len(self.samples) >= max_sample:
self.samples = self.samples[:max_sample]
elif self.dataset in ['yfcc15m_tag', 'yfcc15m']:
with open(metadata, 'rb') as f:
samples = pickle.load(f)
self.samples = []
if task_example_ids is not None:
if isinstance(task_example_ids, list):
# build the index of sample and follow the list order.
image_id_to_sample = {}
for image_id, title, desc, tag in samples:
title, desc, tag = title.strip(), desc.strip(), tag.strip()
if len(title) > 0:
image_id_to_sample["_".join([str(image_id).zfill(8), "title"])] = {"image_id": image_id, "title": title}
if len(desc) > 0:
image_id_to_sample["_".join([str(image_id).zfill(8), "desc"])] = {"image_id": image_id, "desc": desc}
if "tag" in self.dataset and len(tag) > 0:
image_id_to_sample["_".join([str(image_id).zfill(8), "tag"])] = {"image_id": image_id, "tag": tag}
for image_key in task_example_ids:
if max_sample is not None and len(self.samples) >= max_sample:
break
image_id, field = image_key.split("_")
image_id = image_id.zfill(8)
image_key = "_".join([image_id, field])
self.samples.append(image_id_to_sample[image_key])
else:
for image_id, title, desc, tag in samples:
title, desc, tag = title.strip(), desc.strip(), tag.strip()
if str(image_id).zfill(8) + "_title" in task_example_ids and len(title) > 0:
self.samples.append({"image_id": image_id, "title": title})
if str(image_id).zfill(8) + "_desc" in task_example_ids and len(desc) > 0:
self.samples.append({"image_id": image_id, "desc": desc})
if "tag" in self.dataset and str(image_id).zfill(8) + "_tag" in task_example_ids and len(tag) > 0:
self.samples.append({"image_id": image_id, "tag": tag})
if max_sample is not None and len(self.samples) >= max_sample:
break
print(f"apply task filter with {len(self.samples)} examples.")
else:
for image_id, title, desc, tag in samples:
title, desc, tag = title.strip(), desc.strip(), tag.strip()
rec = {}
if len(title) > 0:
rec["title"] = title
if len(desc) > 0:
rec["desc"] = desc
if "tag" in self.dataset and len(tag) > 0:
rec["tag"] = tag
if len(rec) > 0:
rec["image_id"] = image_id
self.samples.append(rec)
if max_sample is not None and len(self.samples) >= max_sample:
break
else:
raise ValueError(f"unknown dataset {self.dataset}")
def get_raw_item(self, i):
if self.dataset in ["yfcc100m_tag"]:
sample = self.samples[i]
if isinstance(sample, str):
index, key = sample.split("_")
else:
index = sample
index = format(index, "0>8d")
img = yfcc_loader(self.root, int(index)) if self.with_vision else None
if self.with_text:
repo = index[:2]
z = index[2: 5]
with open(f"{self.json_root}/{repo}/{z}.json") as fr:
repo_z = json.load(fr)
rec = repo_z[str(index).zfill(8)]
if not isinstance(sample, str):
key = random.choice([key for key in rec if len(rec[key]) > 0])
index = "_".join([str(index).zfill(8), key])
if key == "tag" and (hasattr(self, "aug_tag") and self.aug_tag):
caption = aug_tag(rec[key])
else:
caption = rec[key]
elif self.dataset in ['yfcc15m_tag', 'yfcc15m']:
rec = self.samples[i]
index = rec["image_id"]
img = yfcc_loader(self.root, index) if self.with_vision else None
if self.with_text:
key = random.choice([_key for _key in rec if _key != "image_id"])
index = "_".join([str(index).zfill(8), key])
if key == "tag" and hasattr(self, "aug_tag"):
caption = aug_tag(rec[key])
else:
caption = rec[key]
else:
raise ValueError(f"unknown dataset {self.dataset}")
return index, img, caption
def __getitem__(self, i):
raise NotImplementedError
def __len__(self):
return len(self.samples)
class ImageCaptionDatasetCLIP(ImageCaptionDatasetBase):
def __init__(self, args, dataset, root, metadata, task_example_ids, transform=None, tokenizer=None, max_bert_length=77, with_vision=True, with_text=True, max_sample=None):
super().__init__(args, dataset, root, metadata, task_example_ids, with_vision, with_text, max_sample)
self.max_bert_length = max_bert_length
self.transform = transform
self.tokenizer = tokenizer
def __getitem__(self, i):
index, img, caption = self.get_raw_item(i)
result = {"image_ids": index}
# apply transformation
if img is not None and self.transform is not None:
img = self.transform(img)
result["pixel_values"] = img
# tokenize caption
if caption is not None and self.tokenizer is not None:
inputs = self.tokenizer(caption, padding="max_length", truncation=True, max_length=self.max_bert_length, return_tensors="pt")
for key in inputs:
inputs[key] = inputs[key][0]
result.update(**inputs)
result["captions"] = caption
return result
class FileListDataset(torch.utils.data.Dataset):
def __init__(self, images, labels, transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.images = np.load(images)
self.labels = np.load(labels)
def __getitem__(self, index):
img = pil_loader(self.images[index])
target = self.labels[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.images)
def get_downstream_dataset(catalog, name, is_train, transform):
entry = catalog[name]
root = entry['path']
if entry['type'] == 'imagefolder':
dataset = t_datasets.ImageFolder(os.path.join(root, entry['train'] if is_train else entry['test']),
transform=transform)
elif entry['type'] == 'special':
if name == 'cifar10':
dataset = t_datasets.CIFAR10(root, train=is_train,
transform=transform, download=True)
elif name == 'cifar100':
dataset = t_datasets.CIFAR100(root, train=is_train,
transform=transform, download=True)
elif name == 'stl10':
dataset = t_datasets.STL10(root, split='train' if is_train else 'test',
transform=transform, download=True)
elif name == 'mnist':
dataset = t_datasets.MNIST(root, train=is_train,
transform=transform, download=True)
elif entry['type'] == 'filelist':
path = entry['train'] if is_train else entry['test']
val_images = os.path.join(root, path + '_images.npy')
val_labels = os.path.join(root, path + '_labels.npy')
if name == 'clevr_counts':
target_transform = lambda x: ['count_10', 'count_3', 'count_4', 'count_5', 'count_6', 'count_7', 'count_8', 'count_9'].index(x)
else:
target_transform = None
dataset = FileListDataset(val_images, val_labels, transform, target_transform)
else:
raise Exception('Unknown dataset')
return dataset
|
CiT-main
|
clipeval/datasets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import torch
import json
import os
from sklearn import metrics
def load_metadata(metadir="clipeval"):
with open(os.path.join(metadir, 'dataset_catalog.json')) as f:
catalog = json.load(f)
with open(os.path.join(metadir, 'templates.json')) as f:
all_templates = json.load(f)
with open(os.path.join(metadir, 'labels.json')) as f:
all_labels = json.load(f)
return catalog, all_templates, all_labels
def evaluate(d, val_loader, templates, labels, model, tokenizer, max_bert_length, classnorm=False):
print('Evaluating {}'.format(d))
is_acc = d not in ['aircraft', 'pets', 'caltech101', 'flowers', 'kinetics700_frames', 'hateful_memes']
acc_or_outputs = validate_zeroshot(val_loader, templates, labels, model, tokenizer, is_acc, max_bert_length, classnorm)
if d in ['aircraft', 'pets', 'caltech101', 'flowers']:
metric = mean_per_class(*acc_or_outputs)
elif d == 'kinetics700_frames':
top1, top5 = accuracy(*acc_or_outputs, topk=(1, 5))
metric = (top1 + top5) / 2
metric = metric.item()
elif d == 'hateful_memes':
metric = roc_auc(*acc_or_outputs)
else:
metric = acc_or_outputs
return metric
@torch.no_grad()
def build_text_features(templates, labels, model, tokenizer, max_bert_length=77, skip_text_projection=False, classnorm=False):
# (huxu) TODO: add device
text_features = []
for label in labels:
if isinstance(label, list):
texts = [t.format(l) for t in templates for l in label]
else:
texts = [t.format(label) for t in templates]
texts = tokenizer(texts, padding=True, truncation=True, max_length=max_bert_length, return_tensors="pt")
for key in texts:
texts[key] = texts[key].to(next(model.parameters()).device, non_blocking=True)
# texts = texts.view(-1, max_bert_length).contiguous()
class_embeddings = model(**texts, skip_text_projection=skip_text_projection)["text_embeds"]
class_embeddings = class_embeddings / class_embeddings.norm(dim=-1, keepdim=True)
class_embeddings = class_embeddings.mean(dim=0)
text_features.append(class_embeddings)
text_features = torch.stack(text_features, dim=0)
mean, std = None, None
if classnorm:
mean, std = text_features.mean(dim=0)[None, :], text_features.std(dim=0)[None, :]
text_features = (text_features - mean) / std
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
return text_features, mean, std
@torch.no_grad()
def validate_zeroshot(val_loader, templates, labels, model, tokenizer, is_acc, max_bert_length, classnorm=False):
# switch to evaluate mode
model.cuda()
model.eval()
total_top1 = 0
total_images = 0
all_outputs = []
all_targets = []
text_features = None
for samples in val_loader:
if text_features is None:
print('=> encoding captions')
text_features, mean, std = build_text_features(templates, labels, model, tokenizer, max_bert_length, classnorm=classnorm)
if isinstance(samples, tuple) or isinstance(samples, list):
images, target = samples[0], samples[1]
elif isinstance(samples, dict):
images, target = samples["pixel_values"], samples["targets"]
else:
raise ValueError("unknown sample type", type(samples))
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# encode images
image_features = model(pixel_values=images)["image_embeds"]
if classnorm:
image_features = (image_features - mean) / std
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logits_per_image = image_features @ text_features.t()
logits_per_image = logits_per_image.cpu()
target = target.cpu()
if is_acc:
# measure accuracy and record loss
pred = logits_per_image.argmax(dim=1)
correct = pred.eq(target).sum()
total_top1 += correct.item()
total_images += images.size(0)
else:
all_outputs.append(logits_per_image)
all_targets.append(target)
if is_acc:
return 100 * total_top1 / total_images
else:
return torch.cat(all_outputs), torch.cat(all_targets)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def mean_per_class(outputs, targets):
pred = outputs.argmax(1)
confusion_matrix = metrics.confusion_matrix(targets, pred)
per_classes = confusion_matrix.diagonal() / confusion_matrix.sum(axis=1)
return 100 * per_classes.mean()
def roc_auc(outputs, targets):
pos_score = outputs[:, 1] - outputs[:, 0]
metric = metrics.roc_auc_score(targets, pos_score)
return 100 * metric
if __name__ == '__main__':
logits = torch.randn(128, 10)
targets = torch.randint(size=(128,), low=0, high=10)
evaluate("imagenet", logits, targets)
|
CiT-main
|
clipeval/eval_zeroshot.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
import yaml
import torch
import utils.logger
from utils import main_utils, eval_utils
import torch.multiprocessing as mp
parser = argparse.ArgumentParser(description='Evaluation on ESC Sound Classification')
parser.add_argument('cfg', metavar='CFG', help='config file')
parser.add_argument('model_cfg', metavar='CFG', help='config file')
parser.add_argument('--quiet', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--test-only', action='store_true')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--distributed', action='store_true')
parser.add_argument('--port', default='1234')
def main():
ngpus = torch.cuda.device_count()
args = parser.parse_args()
cfg = yaml.safe_load(open(args.cfg))
if args.test_only:
cfg['test_only'] = True
if args.resume:
cfg['resume'] = True
if args.debug:
cfg['num_workers'] = 1
cfg['dataset']['batch_size'] = 4
if args.distributed:
mp.spawn(main_worker, nprocs=ngpus, args=(ngpus, cfg['dataset']['fold'], args, cfg))
else:
main_worker(None, ngpus, cfg['dataset']['fold'], args, cfg)
def main_worker(gpu, ngpus, fold, args, cfg):
args.gpu = gpu
args.world_size = ngpus
# Prepare folder and logger
eval_dir, model_cfg, logger = eval_utils.prepare_environment(args, cfg, fold)
# Model
model, ckp_manager = eval_utils.build_model(model_cfg, cfg, eval_dir, args, logger)
# Optimizer
optimizer, scheduler = main_utils.build_optimizer(model.parameters(), cfg['optimizer'], logger)
# Datasets
train_loader, test_loader, dense_loader = eval_utils.build_dataloaders(
cfg['dataset'], fold, cfg['num_workers'], args.distributed, logger)
################################ Train ################################
start_epoch, end_epoch = 0, cfg['optimizer']['num_epochs']
if cfg['resume'] and ckp_manager.checkpoint_exists(last=True):
start_epoch = ckp_manager.restore(model, optimizer, scheduler, restore_last=True)
logger.add_line("Loaded checkpoint '{}' (epoch {})".format(ckp_manager.last_checkpoint_fn(), start_epoch))
if not cfg['test_only']:
logger.add_line("=" * 30 + " Training " + "=" * 30)
# Warmup. Train classifier for a few epochs.
if start_epoch == 0 and 'warmup_classifier' in cfg['optimizer'] and cfg['optimizer']['warmup_classifier']:
n_wu_epochs = cfg['optimizer']['warmup_epochs'] if 'warmup_epochs' in cfg['optimizer'] else 5
cls_opt, _ = main_utils.build_optimizer(
params=[p for n, p in model.named_parameters() if 'feature_extractor' not in n],
cfg={'lr': {'base_lr': cfg['optimizer']['lr']['base_lr'], 'milestones': [n_wu_epochs,], 'gamma': 1.},
'weight_decay': cfg['optimizer']['weight_decay'],
'name': cfg['optimizer']['name']}
)
for epoch in range(n_wu_epochs):
run_phase('train', train_loader, model, cls_opt, epoch, args, cfg, logger)
top1, _ = run_phase('test', test_loader, model, None, epoch, args, cfg, logger)
# Main training loop
for epoch in range(start_epoch, end_epoch):
scheduler.step(epoch=epoch)
if args.distributed:
train_loader.sampler.set_epoch(epoch)
test_loader.sampler.set_epoch(epoch)
logger.add_line('='*30 + ' Epoch {} '.format(epoch) + '='*30)
logger.add_line('LR: {}'.format(scheduler.get_lr()))
run_phase('train', train_loader, model, optimizer, epoch, args, cfg, logger)
top1, _ = run_phase('test', test_loader, model, None, epoch, args, cfg, logger)
ckp_manager.save(model, optimizer, scheduler, epoch, eval_metric=top1)
################################ Eval ################################
logger.add_line('\n' + '=' * 30 + ' Final evaluation ' + '=' * 30)
cfg['dataset']['test']['clips_per_video'] = 25 # Evaluate clip-level predictions with 25 clips per video for metric stability
train_loader, test_loader, dense_loader = eval_utils.build_dataloaders(cfg['dataset'], fold, cfg['num_workers'], args.distributed, logger)
top1, top5 = run_phase('test', test_loader, model, None, end_epoch, args, cfg, logger)
top1_dense, top5_dense = run_phase('test_dense', dense_loader, model, None, end_epoch, args, cfg, logger)
logger.add_line('\n' + '=' * 30 + ' Evaluation done ' + '=' * 30)
logger.add_line('Clip@1: {:6.2f}'.format(top1))
logger.add_line('Clip@5: {:6.2f}'.format(top5))
logger.add_line('Video@1: {:6.2f}'.format(top1_dense))
logger.add_line('Video@5: {:6.2f}'.format(top5_dense))
def run_phase(phase, loader, model, optimizer, epoch, args, cfg, logger):
from utils import metrics_utils
batch_time = metrics_utils.AverageMeter('Time', ':6.3f', window_size=100)
data_time = metrics_utils.AverageMeter('Data', ':6.3f', window_size=100)
loss_meter = metrics_utils.AverageMeter('Loss', ':.4e')
top1_meter = metrics_utils.AverageMeter('Acc@1', ':6.2f')
top5_meter = metrics_utils.AverageMeter('Acc@5', ':6.2f')
progress = utils.logger.ProgressMeter(len(loader), meters=[batch_time, data_time, loss_meter, top1_meter, top5_meter],
phase=phase, epoch=epoch, logger=logger)
# switch to train/test mode
model.train(phase == 'train')
if phase in {'test_dense', 'test'}:
model = eval_utils.BatchWrapper(model, cfg['dataset']['batch_size'])
criterion = torch.nn.CrossEntropyLoss()
softmax = torch.nn.Softmax(dim=1)
end = time.time()
logger.add_line('\n{}: Epoch {}'.format(phase, epoch))
for it, sample in enumerate(loader):
data_time.update(time.time() - end)
video = sample['frames']
target = sample['label'].cuda()
if args.gpu is not None:
video = video.cuda(args.gpu, non_blocking=True)
if torch.cuda.device_count() == 1 and args.gpu is None:
video = video.cuda()
# compute outputs
if phase == 'test_dense':
batch_size, clips_per_sample = video.shape[0], video.shape[1]
video = video.flatten(0, 1).contiguous()
if phase == 'train':
logits = model(video)
else:
with torch.no_grad():
logits = model(video)
# compute loss and accuracy
if phase == 'test_dense':
confidence = softmax(logits).view(batch_size, clips_per_sample, -1).mean(1)
labels_tiled = target.unsqueeze(1).repeat(1, clips_per_sample).view(-1)
loss = criterion(logits, labels_tiled)
else:
confidence = softmax(logits)
loss = criterion(logits, target)
with torch.no_grad():
acc1, acc5 = metrics_utils.accuracy(confidence, target, topk=(1, 5))
loss_meter.update(loss.item(), target.size(0))
top1_meter.update(acc1[0], target.size(0))
top5_meter.update(acc5[0], target.size(0))
# compute gradient and do SGD step
if phase == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (it + 1) % 100 == 0 or it == 0 or it + 1 == len(loader):
progress.display(it+1)
if args.distributed:
progress.synchronize_meters(args.gpu)
progress.display(len(loader) * args.world_size)
return top1_meter.avg, top5_meter.avg
if __name__ == '__main__':
main()
|
AVID-CMA-main
|
eval-action-recg.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import random
import time
import warnings
import yaml
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.multiprocessing as mp
import utils.logger
from utils import main_utils
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('cfg', help='model directory')
parser.add_argument('--quiet', action='store_true')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:15475', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
def main():
args = parser.parse_args()
cfg = yaml.safe_load(open(args.cfg))
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args, cfg))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args, cfg)
def main_worker(gpu, ngpus_per_node, args, cfg):
args.gpu = gpu
# Setup environment
args = main_utils.initialize_distributed_backend(args, ngpus_per_node)
logger, tb_writter, model_dir = main_utils.prep_environment(args, cfg)
# Define model
model = main_utils.build_model(cfg['model'], logger)
model, args, cfg['dataset']['batch_size'], cfg['num_workers'] = main_utils.distribute_model_to_cuda(model, args, cfg['dataset']['batch_size'], cfg['num_workers'], ngpus_per_node)
# Define dataloaders
train_loader = main_utils.build_dataloaders(cfg['dataset'], cfg['num_workers'], args.distributed, logger)
# Define criterion
device = args.gpu if args.gpu is not None else 0
cfg['loss']['args']['embedding_dim'] = model.module.out_dim
cfg['loss']['args']['device'] = device
train_criterion = main_utils.build_criterion(cfg['loss'], logger=logger)
# Define optimizer
optimizer, scheduler = main_utils.build_optimizer(
params=list(model.parameters())+list(train_criterion.parameters()),
cfg=cfg['optimizer'],
logger=logger)
ckp_manager = main_utils.CheckpointManager(model_dir, rank=args.rank)
# Optionally resume from a checkpoint
start_epoch, end_epoch = 0, cfg['optimizer']['num_epochs']
if cfg['resume']:
if ckp_manager.checkpoint_exists(last=True):
start_epoch = ckp_manager.restore(restore_last=True, model=model, optimizer=optimizer, train_criterion=train_criterion)
scheduler.step(start_epoch)
logger.add_line("Checkpoint loaded: '{}' (epoch {})".format(ckp_manager.last_checkpoint_fn(), start_epoch))
else:
logger.add_line("No checkpoint found at '{}'".format(ckp_manager.last_checkpoint_fn()))
cudnn.benchmark = True
############################ TRAIN #########################################
test_freq = cfg['test_freq'] if 'test_freq' in cfg else 1
for epoch in range(start_epoch, end_epoch):
if epoch in cfg['optimizer']['lr']['milestones']:
ckp_manager.save(epoch, model=model, train_criterion=train_criterion, optimizer=optimizer, filename='checkpoint-ep{}.pth.tar'.format(epoch))
if args.distributed:
train_loader.sampler.set_epoch(epoch)
scheduler.step(epoch)
train_criterion.set_epoch(epoch)
# Train for one epoch
logger.add_line('='*30 + ' Epoch {} '.format(epoch) + '='*30)
logger.add_line('LR: {}'.format(scheduler.get_lr()))
run_phase('train', train_loader, model, optimizer, train_criterion, epoch, args, cfg, logger, tb_writter)
if epoch % test_freq == 0 or epoch == end_epoch - 1:
ckp_manager.save(epoch+1, model=model, optimizer=optimizer, train_criterion=train_criterion)
def run_phase(phase, loader, model, optimizer, criterion, epoch, args, cfg, logger, tb_writter):
from utils import metrics_utils
logger.add_line('\n{}: Epoch {}'.format(phase, epoch))
batch_time = metrics_utils.AverageMeter('Time', ':6.3f', window_size=100)
data_time = metrics_utils.AverageMeter('Data', ':6.3f', window_size=100)
loss_meter = metrics_utils.AverageMeter('Loss', ':.3e')
progress = utils.logger.ProgressMeter(len(loader), [batch_time, data_time, loss_meter],
phase=phase, epoch=epoch, logger=logger, tb_writter=tb_writter)
# switch to train mode
model.train(phase == 'train')
end = time.time()
device = args.gpu if args.gpu is not None else 0
for i, sample in enumerate(loader):
# measure data loading time
data_time.update(time.time() - end)
# Prepare batch
video, audio, index = sample['frames'], sample['audio'], sample['index']
video = video.cuda(device, non_blocking=True)
audio = audio.cuda(device, non_blocking=True)
index = index.cuda(device, non_blocking=True)
# compute audio and video embeddings
if phase == 'train':
video_emb, audio_emb = model(video, audio)
else:
with torch.no_grad():
video_emb, audio_emb = model(video, audio)
# compute loss
loss, loss_debug = criterion(video_emb, audio_emb, index)
loss_meter.update(loss.item(), video.size(0))
# compute gradient and do SGD step during training
if phase == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print to terminal and tensorboard
step = epoch * len(loader) + i
if (i+1) % cfg['print_freq'] == 0 or i == 0 or i+1 == len(loader):
progress.display(i+1)
if tb_writter is not None:
for key in loss_debug:
tb_writter.add_scalar('{}-batch/{}'.format(phase, key), loss_debug[key].item(), step)
# Sync metrics across all GPUs and print final averages
if args.distributed:
progress.synchronize_meters(args.gpu)
progress.display(len(loader)*args.world_size)
if tb_writter is not None:
for meter in progress.meters:
tb_writter.add_scalar('{}-epoch/{}'.format(phase, meter.name), meter.avg, epoch)
if __name__ == '__main__':
main()
|
AVID-CMA-main
|
main-avid.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
import yaml
import torch
from utils import main_utils, eval_utils
import utils.logger
import torch.multiprocessing as mp
parser = argparse.ArgumentParser(description='Evaluation on ESC Sound Classification')
parser.add_argument('cfg', metavar='CFG', help='config file')
parser.add_argument('model_cfg', metavar='CFG', help='config file')
parser.add_argument('--quiet', action='store_true')
parser.add_argument('--test-only', action='store_true')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--distributed', action='store_true')
parser.add_argument('--port', default='1234')
def main():
args = parser.parse_args()
cfg = yaml.safe_load(open(args.cfg))
if args.test_only:
cfg['test_only'] = True
if args.resume:
cfg['resume'] = True
if args.debug:
cfg['num_workers'] = 1
cfg['dataset']['batch_size'] = 4
ngpus = torch.cuda.device_count()
for fold in range(1, cfg['dataset']['num_folds']+1):
if args.distributed:
mp.spawn(main_worker, nprocs=ngpus, args=(ngpus, fold, args, cfg))
else:
main_worker(None, ngpus, fold, args, cfg)
def main_worker(gpu, ngpus, fold, args, cfg):
args.gpu = gpu
args.world_size = ngpus
# Prepare folder and logger
eval_dir, model_cfg, logger = eval_utils.prepare_environment(args, cfg, fold)
# Model
model, ckp_manager = eval_utils.build_model(model_cfg, cfg, eval_dir, args, logger)
# Optimizer
optimizer, scheduler = main_utils.build_optimizer(model.parameters(), cfg['optimizer'], logger)
# Datasets
train_loader, test_loader, dense_loader = eval_utils.build_dataloaders(
cfg['dataset'], fold, cfg['num_workers'], args.distributed, logger)
################################ Train ################################
start_epoch, end_epoch = 0, cfg['optimizer']['num_epochs']
if (cfg['resume'] or args.test_only) and ckp_manager.checkpoint_exists(last=True):
start_epoch = ckp_manager.restore(model, optimizer, scheduler, restore_last=True)
logger.add_line("Loaded checkpoint '{}' (epoch {})".format(ckp_manager.last_checkpoint_fn(), start_epoch))
if not cfg['test_only']:
logger.add_line("=" * 30 + " Training " + "=" * 30)
for epoch in range(start_epoch, end_epoch):
scheduler.step(epoch=epoch)
if args.distributed:
train_loader.sampler.set_epoch(epoch)
test_loader.sampler.set_epoch(epoch)
logger.add_line('='*30 + ' Epoch {} '.format(epoch) + '='*30)
logger.add_line('LR: {}'.format(scheduler.get_lr()))
run_phase('train', train_loader, model, optimizer, epoch, args, cfg, logger)
run_phase('test', test_loader, model, None, epoch, args, cfg, logger)
ckp_manager.save(model, optimizer, scheduler, epoch)
################################ Eval ################################
logger.add_line('\n' + '=' * 30 + ' Final evaluation ' + '=' * 30)
cfg['dataset']['test']['clips_per_video'] = 25
train_loader, test_loader, dense_loader = eval_utils.build_dataloaders(cfg['dataset'], fold, cfg['num_workers'], args.distributed, logger)
top1_dense, top5_dense = run_phase('test_dense', dense_loader, model, None, end_epoch, args, cfg, logger)
top1, top5 = run_phase('test', test_loader, model, None, end_epoch, args, cfg, logger)
logger.add_line('\n' + '=' * 30 + ' Evaluation done ' + '=' * 30)
for ft in top1:
logger.add_line('')
logger.add_line('[{}] Clip@1: {:6.2f}'.format(ft, top1[ft]))
logger.add_line('[{}] Clip@5: {:6.2f}'.format(ft, top5[ft]))
logger.add_line('[{}] Video@1: {:6.2f}'.format(ft, top1_dense[ft]))
logger.add_line('[{}] Video@5: {:6.2f}'.format(ft, top5_dense[ft]))
def run_phase(phase, loader, model, optimizer, epoch, args, cfg, logger):
from utils import metrics_utils
logger.add_line('\n{}: Epoch {}'.format(phase, epoch))
feature_names = cfg['model']['args']['feat_names']
batch_time = metrics_utils.AverageMeter('Time', ':6.3f', 100)
data_time = metrics_utils.AverageMeter('Data', ':6.3f', 100)
loss_meters = {ft: metrics_utils.AverageMeter('Loss', ':.4e', 0) for ft in feature_names}
top1_meters = {ft: metrics_utils.AverageMeter('Acc@1', ':6.2f', 0) for ft in feature_names}
top5_meters = {ft: metrics_utils.AverageMeter('Acc@5', ':6.2f', 0) for ft in feature_names}
progress = {'timers': utils.logger.ProgressMeter(len(loader), meters=[batch_time, data_time], phase=phase, epoch=epoch, logger=logger)}
progress.update({ft: utils.logger.ProgressMeter(len(loader), meters=[loss_meters[ft], top1_meters[ft], top5_meters[ft]], phase=phase, epoch=epoch, logger=logger) for ft in feature_names})
# switch to train/test mode
model.train(phase == 'train')
if phase in {'test_dense', 'test'}:
model = BatchWrapper(model, cfg['dataset']['batch_size'])
end = time.time()
criterion = torch.nn.CrossEntropyLoss()
softmax = torch.nn.Softmax(dim=1)
for it, sample in enumerate(loader):
data_time.update(time.time() - end)
video = sample['frames']
target = sample['label'].cuda()
if args.gpu is not None:
video = video.cuda(args.gpu, non_blocking=True)
if phase == 'test_dense':
batch_size, clips_per_sample = video.shape[0], video.shape[1]
video = video.flatten(0, 1).contiguous()
# compute outputs
if phase == 'train':
logits = model(video)
else:
with torch.no_grad():
logits = model(video)
# compute loss and measure accuracy
total_loss = 0.
for ft in feature_names:
if phase == 'test_dense':
confidence = softmax(logits[ft]).view(batch_size, clips_per_sample, -1).mean(1)
target_tiled = target.unsqueeze(1).repeat(1, clips_per_sample).view(-1)
loss = criterion(logits[ft], target_tiled)
else:
confidence = softmax(logits[ft])
loss = criterion(logits[ft], target)
total_loss += loss
with torch.no_grad():
acc1, acc5 = metrics_utils.accuracy(confidence, target, topk=(1, 5))
loss_meters[ft].update(loss.item(), target.size(0))
top1_meters[ft].update(acc1[0].item(), target.size(0))
top5_meters[ft].update(acc5[0].item(), target.size(0))
# compute gradient and do SGD step
if phase == 'train':
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (it + 1) % 100 == 0 or it == 0 or it + 1 == len(loader):
for ft in progress:
progress[ft].display(it+1)
if args.distributed:
for ft in progress:
progress[ft].synchronize_meters(args.gpu)
progress[ft].display(len(loader) * args.world_size)
return {ft: top1_meters[ft].avg for ft in feature_names}, {ft: top5_meters[ft].avg for ft in feature_names}
class BatchWrapper:
def __init__(self, model, batch_size):
self.model = model
self.batch_size = batch_size
def __call__(self, x):
from collections import defaultdict
outs = defaultdict(list)
for i in range(0, x.shape[0], self.batch_size):
odict = self.model(x[i:i + self.batch_size])
for k in odict:
outs[k] += [odict[k]]
for k in outs:
outs[k] = torch.cat(outs[k], 0)
return outs
if __name__ == '__main__':
main()
|
AVID-CMA-main
|
eval-action-recg-linear.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import csv
import numpy as np
import glob
from datasets.video_db import VideoDataset
DATA_PATH = '/data/datasets/AS240/data/'
CACHE_PATH = 'datasets/cache/audioset'
class AudiosetClasses:
def __init__(self):
ann_list = list(csv.DictReader(open(CACHE_PATH + '/class_labels_indices.csv')))
self.classes = [ann['mid'] for ann in ann_list]
self.class_label = {ann['mid']: int(ann['index']) for ann in ann_list}
self.display_name = {ann['mid']: ann['display_name'] for ann in ann_list}
def __getitem__(self, index):
return self.display_name[self.classes[index]]
def __len__(self):
return len(self.classes)
def class2index(self, class_string):
return self.class_label[class_string]
class AudioSet(VideoDataset):
def __init__(self, subset,
return_video=True,
video_clip_duration=1.,
video_fps=25.,
video_transform=None,
return_audio=False,
audio_clip_duration=1.,
audio_fps=None,
audio_fps_out=64,
audio_transform=None,
return_labels=False,
return_index=False,
max_offsync_augm=0,
mode='clip',
clips_per_video=1,
):
root = f"{DATA_PATH}/{subset.split('-')[0]}_segments/video"
classes = AudiosetClasses()
filenames = [f"{ln.strip().split()[0]}" for ln in open(f"{CACHE_PATH}/{subset}.txt")]
available = set([fn.split('/')[-1].split('.')[0] for fn in glob.glob(f"{root}/*")])
filenames = [fn for fn in filenames if fn.split('.')[0] in available]
assert return_labels is False
labels = None
super(AudioSet, self).__init__(
return_video=return_video,
video_clip_duration=video_clip_duration,
video_root=root,
video_fns=filenames,
video_fps=video_fps,
video_transform=video_transform,
return_audio=return_audio,
audio_clip_duration=audio_clip_duration,
audio_root=root,
audio_fns=filenames,
audio_fps=audio_fps,
audio_fps_out=audio_fps_out,
audio_transform=audio_transform,
return_labels=return_labels,
labels=labels,
return_index=return_index,
max_offsync_augm=max_offsync_augm,
mode=mode,
clips_per_video=clips_per_video,
)
self.name = 'AudioSet dataset'
self.root = root
self.subset = subset
self.num_videos = len(filenames)
self.num_classes = len(classes)
self.sample_id = np.array([fn.split('.')[0].encode('utf-8') for fn in filenames])
|
AVID-CMA-main
|
datasets/audioset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .audioset import AudioSet
from .kinetics import Kinetics
from .ucf import UCF
from .hmdb import HMDB
|
AVID-CMA-main
|
datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import numpy as np
import random
import librosa
from utils.videotransforms import video_transforms, volume_transforms, tensor_transforms
class VideoPrep_MSC_CJ(object):
def __init__(self,
crop=(224, 224),
color=(0.4, 0.4, 0.4, 0.2),
min_area=0.08,
augment=True,
normalize=True,
totensor=True,
num_frames=8,
pad_missing=False,
):
self.crop = crop
self.augment = augment
self.num_frames = num_frames
self.pad_missing = pad_missing
if normalize:
assert totensor
if augment:
transforms = [
video_transforms.RandomResizedCrop(crop, scale=(min_area, 1.)),
video_transforms.RandomHorizontalFlip(),
video_transforms.ColorJitter(*color),
]
else:
transforms = [
video_transforms.Resize(int(crop[0]/0.875)),
video_transforms.CenterCrop(crop),
]
if totensor:
transforms += [volume_transforms.ClipToTensor()]
if normalize:
transforms += [tensor_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
self.transform = video_transforms.Compose(transforms)
def __call__(self, frames):
frames = self.transform(frames)
if self.pad_missing:
while True:
n_missing = self.num_frames - frames.shape[1]
if n_missing > 0:
frames = torch.cat((frames, frames[:, :n_missing]), 1)
else:
break
return frames
class VideoPrep_Crop_CJ(object):
def __init__(self,
resize=(256, 256),
crop=(224, 224),
color=(0.4, 0.4, 0.4, 0.2),
num_frames=8,
pad_missing=False,
augment=True,
normalize=True,
totensor=True,
):
self.resize = resize
self.crop = crop
self.augment = augment
self.num_frames = num_frames
self.pad_missing = pad_missing
if normalize:
assert totensor
if augment:
transforms = [
video_transforms.Resize(resize),
video_transforms.RandomCrop(crop),
video_transforms.RandomHorizontalFlip(),
video_transforms.ColorJitter(*color),
]
else:
transforms = [
video_transforms.Resize(resize),
video_transforms.CenterCrop(crop),
]
if totensor:
transforms += [volume_transforms.ClipToTensor()]
if normalize:
transforms += [tensor_transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
self.transform = video_transforms.Compose(transforms)
def __call__(self, frames):
if isinstance(frames[0], list):
return torch.stack([self(f) for f in frames])
frames = self.transform(frames)
if self.pad_missing:
while True:
n_missing = self.num_frames - frames.shape[1]
if n_missing > 0:
frames = torch.cat((frames, frames[:, :n_missing]), 1)
else:
break
return frames
class AudioPrep(object):
def __init__(self, trim_pad=True, duration=None, missing_as_zero=False, augment=False, to_tensor=False, volume=0.1):
self.trim_pad = trim_pad
self.missing_as_zero = missing_as_zero
self.augment = augment
self.to_tensor = to_tensor
self.volume = volume
if trim_pad:
assert duration is not None
self.duration = duration
def __call__(self, sig, sr, duration=None):
if duration is None:
duration = self.duration
num_frames = int(duration*sr)
# Check if audio is missing
if self.missing_as_zero and sig is None:
sig = np.zeros((1, num_frames), dtype=np.float32)
# Downmix to mono
sig = sig.mean(0).astype(np.float32)
# Trim or pad to constant shape
if self.trim_pad:
if sig.shape[0] > num_frames:
sig = sig[:num_frames]
elif sig.shape[0] < num_frames:
n_pad = num_frames - sig.shape[0]
sig = np.pad(sig, (0, n_pad), mode='constant', constant_values=(0., 0.))
# Augment by changing volume +/- 10%
if self.augment:
sig *= random.uniform(1.-self.volume, 1.+self.volume)
sig = sig[np.newaxis]
if self.to_tensor:
sig = torch.from_numpy(sig)
return sig, sr
class LogSpectrogram(object):
def __init__(self, fps, n_fft=512, hop_size=0.005, normalize=False):
self.inp_fps = fps
self.n_fft = n_fft
self.hop_size = hop_size
self.rate = 1./hop_size
self.normalize = normalize
if self.normalize:
if n_fft == 512 and fps == 24000:
stats = np.load('datasets/assets/audio-spectDB-24k-513-norm-stats.npz')
elif n_fft == 256 and fps == 24000:
stats = np.load('datasets/assets/audio-spectDB-24k-257-norm-stats.npz')
self.mean, self.std = stats['mean'], stats['std']
def __call__(self, sig, sr, duration=None):
hop_length = int(self.hop_size * sr)
spect = np.abs(librosa.stft(sig[0], n_fft=self.n_fft*2, hop_length=hop_length))**2.
spect = np.concatenate([spect[:1], spect[1:].reshape(self.n_fft//2, 2, -1).mean(1)], 0)
if duration is not None:
num_frames = int(duration * self.rate)
spect = spect[:, :num_frames]
spect = librosa.core.power_to_db(spect, top_db=100)
if self.normalize:
spect = (spect - self.mean[:, np.newaxis]) / (self.std[:, np.newaxis] + 1e-5)
spect_tensor = torch.from_numpy(spect)
spect_tensor = torch.transpose(spect_tensor, 0, 1).unsqueeze(0)
return spect_tensor, self.rate
|
AVID-CMA-main
|
datasets/preprocessing.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import random
import torch
import numpy as np
import torch.utils.data as data
from utils.ioutils import av_wrappers
from collections import defaultdict
def chararray(fn_list):
charr = np.chararray(len(fn_list), itemsize=max([len(fn) for fn in fn_list]))
for i in range(len(fn_list)):
charr[i] = fn_list[i]
return charr
class VideoDataset(data.Dataset):
def __init__(self,
return_video=True,
video_root=None,
video_fns=None,
video_clip_duration=1.,
video_fps=25,
video_transform=None,
return_audio=True,
audio_root=None,
audio_fns=None,
audio_clip_duration=1.,
audio_fps=None,
audio_fps_out=None,
audio_transform=None,
return_labels=False,
labels=None,
return_index=False,
mode='clip',
clips_per_video=1,
max_offsync_augm=0,
):
super(VideoDataset, self).__init__()
self.num_samples = 0
self.return_video = return_video
self.video_root = video_root
if return_video:
self.video_fns = chararray(video_fns)
self.num_samples = self.video_fns.shape[0]
self.video_fps = video_fps
if video_transform is not None:
if not isinstance(video_transform, list):
video_transform = [video_transform]
self.video_transform = video_transform
self.return_audio = return_audio
self.audio_root = audio_root
if return_audio:
self.audio_fns = chararray(audio_fns)
self.num_samples = self.audio_fns.shape[0]
self.audio_fps = audio_fps
self.audio_fps_out = audio_fps_out
self.audio_transform = audio_transform
self.return_labels = return_labels
if return_labels:
self.labels = np.array(labels)
self.return_index = return_index
self.video_clip_duration = video_clip_duration
self.audio_clip_duration = audio_clip_duration
self.max_offsync_augm = max_offsync_augm
self.clips_per_video = clips_per_video
self.mode = mode
def _load_sample(self, sample_idx):
video_ctr = None
if self.return_video:
video_fn = '{}/{}'.format(self.video_root, self.video_fns[sample_idx].decode())
video_ctr = av_wrappers.av_open(video_fn)
audio_ctr = None
if self.return_audio:
audio_fn = '{}/{}'.format(self.audio_root, self.audio_fns[sample_idx].decode())
if self.return_video and audio_fn == video_fn:
audio_ctr = video_ctr
else:
audio_ctr = av_wrappers.av_open(audio_fn)
return video_ctr, audio_ctr
def __getitem__(self, index):
if self.mode == 'clip':
try:
sample_idx = index % self.num_samples
video_ctr, audio_ctr = self._load_sample(sample_idx)
v_ss, v_dur, a_ss, a_dur = self._sample_snippet(video_ctr, audio_ctr)
sample = self._get_clip(sample_idx, video_ctr, audio_ctr, v_ss, a_ss, video_clip_duration=v_dur, audio_clip_duration=a_dur)
if sample is None:
return self[(index+1) % len(self)]
return sample
except Exception:
return self[(index+1) % len(self)]
else:
video_ctr, audio_ctr = self._load_sample(index)
# Load entire video
vs, vf, ss, sf = self._get_time_lims(video_ctr, audio_ctr)
start_time = vs
final_time = vf
if self.return_audio:
start_time = max(vs, ss) if ss < 0 else vs
final_time = min(vf, sf) if ss < 0 else vf
if final_time <= start_time:
final_time = start_time + max(self.video_clip_duration, self.audio_clip_duration)
video_dur = final_time - start_time
sample = self._get_clip(index, video_ctr, audio_ctr, start_time, start_time, video_clip_duration=video_dur, audio_clip_duration=video_dur)
# Split video into overlapping chunks
chunks = defaultdict(list)
if self.return_video:
nf = sample['frames'].shape[1]
chunk_size = int(self.video_clip_duration * self.video_fps)
if chunk_size >= nf:
chunks['frames'] = torch.stack([sample['frames'] for _ in range(self.clips_per_video)])
else:
timestamps = np.linspace(0, max(nf - chunk_size, 1), self.clips_per_video).astype(int)
chunks['frames'] = torch.stack([sample['frames'][:, ss:ss+chunk_size] for ss in timestamps])
if self.return_audio:
nf = sample['audio'].shape[1]
chunk_size = int(self.audio_clip_duration * self.audio_fps_out)
if chunk_size >= nf:
chunks['audio'] = torch.stack([sample['audio'] for _ in range(self.clips_per_video)])
else:
timestamps = np.linspace(0, max(nf - chunk_size, 1), self.clips_per_video).astype(int)
chunks['audio'] = torch.stack([sample['audio'][:, ss:ss+chunk_size] for ss in timestamps])
if self.return_labels:
chunks['label'] = sample['label']
if self.return_index:
ts = torch.from_numpy(np.linspace(start_time, final_time-self.video_clip_duration, self.clips_per_video))
chunks['index'] = torch.stack([sample['index'][:1].repeat(self.clips_per_video), ts.float()], dim=1)
return chunks
def __len__(self):
if self.mode == 'clip':
return self.num_samples * self.clips_per_video
else:
return self.num_samples
def __repr__(self):
desc = "{}\n - Root: {}\n - Subset: {}\n - Num videos: {}\n - Num samples: {}\n".format(
self.name, self.root, self.subset, self.num_videos, self.num_videos * self.clips_per_video)
if self.return_video:
desc += " - Example video: {}/{}\n".format(self.video_root, self.video_fns[0].decode())
if self.return_audio:
desc += " - Example audio: {}/{}\n".format(self.audio_root, self.audio_fns[0].decode())
return desc
def _get_time_lims(self, video_ctr, audio_ctr):
video_st, video_ft, audio_st, audio_ft = None, None, None, None
if video_ctr is not None:
video_stream = video_ctr.streams.video[0]
tbase = video_stream.time_base
video_st = video_stream.start_time * tbase
video_dur = video_stream.duration * tbase
video_ft = video_st + video_dur
if audio_ctr is not None:
audio_stream = audio_ctr.streams.audio[0]
tbase = audio_stream.time_base
audio_st = audio_stream.start_time * tbase
audio_dur = audio_stream.duration * tbase
audio_ft = audio_st + audio_dur
return video_st, video_ft, audio_st, audio_ft
def _sample_snippet(self, video_ctr, audio_ctr):
video_st, video_ft, audio_st, audio_ft = self._get_time_lims(video_ctr, audio_ctr)
if not self.return_audio:
video_duration = video_ft - video_st
if self.video_clip_duration > video_duration:
return 0., video_duration, 0., video_duration
else:
min_d, max_d = self.video_clip_duration, min(self.video_clip_duration, video_duration)
duration = random.uniform(min_d, max_d)
sample_ss_v = random.uniform(video_st, video_ft - duration)
return sample_ss_v, duration, sample_ss_v, duration
else:
min_ss = max(audio_st, video_st)
max_ss = min(audio_ft - self.audio_clip_duration, video_ft - self.video_clip_duration)
assert max_ss > min_ss
if self.audio_clip_duration > self.video_clip_duration:
sample_ss_a = random.uniform(min_ss, max_ss)
sample_tt_a = sample_ss_a + self.audio_clip_duration
win_min = max(sample_ss_a - self.max_offsync_augm, video_st)
win_max = min(sample_tt_a + self.max_offsync_augm - self.video_clip_duration, video_ft)
sample_ss_v = random.uniform(win_min, win_max)
return sample_ss_v, self.video_clip_duration, sample_ss_a, self.audio_clip_duration
else:
sample_ss_v = random.uniform(min_ss, max_ss)
sample_tt_v = sample_ss_v + self.video_clip_duration
win_min = max(sample_ss_v - self.max_offsync_augm, audio_st)
win_max = min(sample_tt_v + self.max_offsync_augm - self.audio_clip_duration, audio_ft)
sample_ss_a = random.uniform(win_min, win_max)
return sample_ss_v, self.video_clip_duration, sample_ss_a, self.audio_clip_duration
def _get_clip(self, clip_idx, video_ctr, audio_ctr, video_start_time, audio_start_time, video_clip_duration=None, audio_clip_duration=None):
if video_clip_duration is None:
video_clip_duration = self.video_clip_duration
if audio_clip_duration is None:
audio_clip_duration = self.audio_clip_duration
sample = {}
if self.return_video:
frames, fps, start_time = av_wrappers.av_load_video(
video_ctr,
video_fps=self.video_fps,
start_time=video_start_time,
duration=video_clip_duration,
)
if self.video_transform is not None:
for t in self.video_transform:
frames = t(frames)
sample['frames'] = frames
audio_start_time = audio_start_time - (video_start_time - start_time)
if self.return_audio:
samples, rate = av_wrappers.av_laod_audio(
audio_ctr,
audio_fps=self.audio_fps,
start_time=audio_start_time,
duration=audio_clip_duration,
)
if self.audio_transform is not None:
if isinstance(self.audio_transform, list):
for t in self.audio_transform:
samples, rate = t(samples, rate, audio_clip_duration)
else:
samples, rate = self.audio_transform(samples, rate)
sample['audio'] = samples
if self.return_labels:
lbl = self.labels[clip_idx]
if isinstance(lbl, np.ndarray):
sample['label'] = torch.from_numpy(lbl)
else:
sample['label'] = lbl
if self.return_index:
sample['index'] = clip_idx
return sample
|
AVID-CMA-main
|
datasets/video_db.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from datasets.video_db import VideoDataset
DATA_PATH = '/data/datasets/hmdb/videos'
ANNO_PATH = '/data/datasets/hmdb/splits/'
class HMDB(VideoDataset):
def __init__(self, subset,
return_video=True,
video_clip_duration=1.,
video_fps=25.,
video_transform=None,
return_audio=False,
return_labels=False,
max_offsync_augm=0,
mode='clip',
clips_per_video=20,
):
assert return_audio is False
self.name = 'HMDB-101'
self.root = DATA_PATH
self.subset = subset
# Get filenames
classes = sorted(os.listdir(DATA_PATH))
subset, split = subset.split('-')
subset_id = {'train': '1', 'test': '2'}[subset]
filenames, labels = [], []
for cls in classes:
for ln in open(f'{ANNO_PATH}/{cls}_test_{split}.txt'):
fn, ss = ln.strip().split()
if ss == subset_id:
filenames += [f"{cls}/{fn}"]
labels += [classes.index(cls)]
self.classes = classes
self.num_classes = len(self.classes)
self.num_videos = len(filenames)
super(HMDB, self).__init__(
return_video=return_video,
video_clip_duration=video_clip_duration,
video_root=DATA_PATH,
video_fns=filenames,
video_fps=video_fps,
video_transform=video_transform,
return_audio=False,
return_labels=return_labels,
labels=labels,
max_offsync_augm=max_offsync_augm,
mode=mode,
clips_per_video=clips_per_video,
)
|
AVID-CMA-main
|
datasets/hmdb.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from datasets.video_db import VideoDataset
DATA_PATH = '/data/datasets/UCF101/data'
ANNO_PATH = '/data/datasets/UCF101/ucfTrainTestlist/'
class UCF(VideoDataset):
def __init__(self, subset,
video_clip_duration=0.5,
return_video=True,
video_fps=16.,
video_transform=None,
return_audio=False,
return_labels=False,
max_offsync_augm=0,
mode='clip',
clips_per_video=20,
):
assert return_audio is False
self.name = 'UCF-101'
self.root = DATA_PATH
self.subset = subset
classes_fn = f'{ANNO_PATH}/classInd.txt'
self.classes = [l.strip().split()[1] for l in open(classes_fn)]
filenames = [ln.strip().split()[0] for ln in open(f'{ANNO_PATH}/{subset}.txt')]
labels = [fn.split('/')[0] for fn in filenames]
labels = [self.classes.index(cls) for cls in labels]
self.num_classes = len(self.classes)
self.num_videos = len(filenames)
super(UCF, self).__init__(
return_video=return_video,
video_root=DATA_PATH,
video_clip_duration=video_clip_duration,
video_fns=filenames,
video_fps=video_fps,
video_transform=video_transform,
return_audio=False,
return_labels=return_labels,
labels=labels,
max_offsync_augm=max_offsync_augm,
mode=mode,
clips_per_video=clips_per_video,
)
|
AVID-CMA-main
|
datasets/ucf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import glob
import numpy as np
DATA_PATH = '/data/datasets/kinetics/'
from datasets.video_db import VideoDataset
class Kinetics(VideoDataset):
def __init__(self, subset,
return_video=True,
video_clip_duration=1.,
video_fps=25.,
video_transform=None,
return_audio=False,
audio_clip_duration=1.,
audio_fps=None,
audio_fps_out=64,
audio_transform=None,
return_labels=False,
return_index=False,
max_offsync_augm=0,
mode='clip',
clips_per_video=1,
):
classes = sorted(os.listdir(f"{DATA_PATH}/{subset}"))
filenames = ['/'.join(fn.split('/')[-2:]) for fn in glob.glob(f"{DATA_PATH}/{subset}/*/*.mp4")]
labels = [classes.index(fn.split('/')[-2]) for fn in filenames]
super(Kinetics, self).__init__(
return_video=return_video,
video_root=f"{DATA_PATH}/{subset}",
video_fns=filenames,
video_clip_duration=video_clip_duration,
video_fps=video_fps,
video_transform=video_transform,
return_audio=return_audio,
audio_root=f"{DATA_PATH}/{subset}",
audio_fns=filenames,
audio_clip_duration=audio_clip_duration,
audio_fps=audio_fps,
audio_fps_out=audio_fps_out,
audio_transform=audio_transform,
return_labels=return_labels,
labels=labels,
return_index=return_index,
mode=mode,
clips_per_video=clips_per_video,
max_offsync_augm=max_offsync_augm,
)
self.name = 'Kinetics dataset'
self.root = f"{DATA_PATH}/{subset}"
self.subset = subset
self.classes = classes
self.num_videos = len(filenames)
self.num_classes = len(classes)
self.sample_id = np.array([fn.split('/')[-1].split('.')[0].encode('utf-8') for fn in filenames])
|
AVID-CMA-main
|
datasets/kinetics.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
|
AVID-CMA-main
|
utils/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import datetime
import sys
import torch
from torch import distributed as dist
class Logger(object):
def __init__(self, quiet=False, log_fn=None, rank=0, prefix=""):
self.rank = rank if rank is not None else 0
self.quiet = quiet
self.log_fn = log_fn
self.prefix = ""
if prefix:
self.prefix = prefix + ' | '
self.file_pointers = []
if self.rank == 0:
if self.quiet:
open(log_fn, 'w').close()
def add_line(self, content):
if self.rank == 0:
msg = self.prefix+content
if self.quiet:
fp = open(self.log_fn, 'a')
fp.write(msg+'\n')
fp.flush()
fp.close()
else:
print(msg)
sys.stdout.flush()
class ProgressMeter(object):
def __init__(self, num_batches, meters, phase, epoch=None, logger=None, tb_writter=None):
self.batches_per_epoch = num_batches
self.batch_fmtstr = self._get_batch_fmtstr(epoch, num_batches)
self.meters = meters
self.phase = phase
self.epoch = epoch
self.logger = logger
self.tb_writter = tb_writter
def display(self, batch):
step = self.epoch * self.batches_per_epoch + batch
date = str(datetime.datetime.now())
entries = ['{} | {} {}'.format(date, self.phase, self.batch_fmtstr.format(batch))]
entries += [str(meter) for meter in self.meters]
if self.logger is None:
print('\t'.join(entries))
else:
self.logger.add_line('\t'.join(entries))
if self.tb_writter is not None:
for meter in self.meters:
self.tb_writter.add_scalar('{}-batch/{}'.format(self.phase, meter.name), meter.val, step)
def _get_batch_fmtstr(self, epoch, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
epoch_str = '[{}]'.format(epoch) if epoch is not None else ''
return epoch_str+'[' + fmt + '/' + fmt.format(num_batches) + ']'
def synchronize_meters(self, cur_gpu):
metrics = torch.tensor([m.avg for m in self.progress.meters]).cuda(cur_gpu)
metrics_gather = [torch.ones_like(metrics) for _ in range(dist.get_world_size())]
dist.all_gather(metrics_gather, metrics)
metrics = torch.stack(metrics_gather).mean(0).cpu().numpy()
for meter, m in zip(self.progress.meters, metrics):
meter.avg = m
|
AVID-CMA-main
|
utils/logger.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torch import distributed as dist
def _gather_from_all(tensor):
"""
Gather tensors from all gpus
"""
gathered_tensor = [torch.zeros_like(tensor) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_tensor, tensor)
gathered_tensor = torch.cat(gathered_tensor, 0)
return gathered_tensor
|
AVID-CMA-main
|
utils/distributed_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from collections import deque
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', window_size=0):
self.name = name
self.fmt = fmt
self.window_size = window_size
self.reset()
def reset(self):
if self.window_size > 0:
self.q = deque(maxlen=self.window_size)
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
if self.window_size > 0:
self.q.append((val, n))
self.count = sum([n for v, n in self.q])
self.sum = sum([v * n for v, n in self.q])
else:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
|
AVID-CMA-main
|
utils/metrics_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class AliasMethod(object):
"""
From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
"""
def __init__(self, probs):
if probs.sum() > 1:
probs.div_(probs.sum())
K = len(probs)
self.prob = torch.zeros(K)
self.alias = torch.LongTensor([0]*K)
# Sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K.
smaller = []
larger = []
for kk, prob in enumerate(probs):
self.prob[kk] = K*prob
if self.prob[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
# Loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture.
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
self.alias[small] = large
self.prob[large] = (self.prob[large] - 1.0) + self.prob[small]
if self.prob[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
for last_one in smaller+larger:
self.prob[last_one] = 1
def to(self, device):
self.prob = self.prob.to(device)
self.alias = self.alias.to(device)
def draw(self, N):
"""
Draw N samples from multinomial
:param N: number of samples
:return: samples
"""
K = self.alias.size(0)
kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K)
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
# b is whether a random number is greater than q
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1-b).long())
return oq + oj
|
AVID-CMA-main
|
utils/alias_method.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import shutil
import torch
import numpy as np
import torch.distributed as dist
import datetime
from utils.logger import Logger
def initialize_distributed_backend(args, ngpus_per_node):
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + args.gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
if args.rank == -1:
args.rank = 0
return args
def prep_environment(args, cfg):
from torch.utils.tensorboard import SummaryWriter
# Prepare loggers (must be configured after initialize_distributed_backend())
model_dir = '{}/{}'.format(cfg['model']['model_dir'], cfg['model']['name'])
if args.rank == 0:
prep_output_folder(model_dir, False)
log_fn = '{}/train.log'.format(model_dir)
logger = Logger(quiet=args.quiet, log_fn=log_fn, rank=args.rank)
logger.add_line(str(datetime.datetime.now()))
if any(['SLURM' in env for env in list(os.environ.keys())]):
logger.add_line("=" * 30 + " SLURM " + "=" * 30)
for env in os.environ.keys():
if 'SLURM' in env:
logger.add_line('{:30}: {}'.format(env, os.environ[env]))
logger.add_line("=" * 30 + " Config " + "=" * 30)
def print_dict(d, ident=''):
for k in d:
if isinstance(d[k], dict):
logger.add_line("{}{}".format(ident, k))
print_dict(d[k], ident=' '+ident)
else:
logger.add_line("{}{}: {}".format(ident, k, str(d[k])))
print_dict(cfg)
logger.add_line("=" * 30 + " Args " + "=" * 30)
for k in args.__dict__:
logger.add_line('{:30} {}'.format(k, args.__dict__[k]))
tb_writter = None
if cfg['log2tb'] and args.rank == 0:
tb_dir = '{}/tensorboard'.format(model_dir)
os.system('mkdir -p {}'.format(tb_dir))
tb_writter = SummaryWriter(tb_dir)
return logger, tb_writter, model_dir
def build_model(cfg, logger=None):
import models
assert cfg['arch'] in models.__dict__, 'Unknown model architecture'
model = models.__dict__[cfg['arch']](**cfg['args'])
if logger is not None:
if isinstance(model, (list, tuple)):
logger.add_line("=" * 30 + " Model " + "=" * 30)
for m in model:
logger.add_line(str(m))
logger.add_line("=" * 30 + " Parameters " + "=" * 30)
for m in model:
logger.add_line(parameter_description(m))
else:
logger.add_line("=" * 30 + " Model " + "=" * 30)
logger.add_line(str(model))
logger.add_line("=" * 30 + " Parameters " + "=" * 30)
logger.add_line(parameter_description(model))
return model
def distribute_model_to_cuda(models, args, batch_size, num_workers, ngpus_per_node):
if ngpus_per_node == 0:
return models, args, batch_size, num_workers
squeeze = False
if not isinstance(models, list):
models = [models]
squeeze = True
for i in range(len(models)):
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
models[i].cuda(args.gpu)
models[i] = torch.nn.parallel.DistributedDataParallel(models[i], device_ids=[args.gpu])
else:
models[i].cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
models[i] = torch.nn.parallel.DistributedDataParallel(models[i])
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
models[i] = models[i].cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
models[i] = torch.nn.DataParallel(models[i]).cuda()
if squeeze:
models = models[0]
if args.distributed and args.gpu is not None:
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
batch_size = int(batch_size / ngpus_per_node)
num_workers = int((num_workers + ngpus_per_node - 1) / ngpus_per_node)
return models, args, batch_size, num_workers
def build_dataloaders(cfg, num_workers, distributed, logger):
train_loader = build_dataloader(cfg, cfg['train'], num_workers, distributed)
logger.add_line("\n"+"="*30+" Train data "+"="*30)
logger.add_line(str(train_loader.dataset))
return train_loader
def build_dataloader(db_cfg, split_cfg, num_workers, distributed):
import torch.utils.data as data
import torch.utils.data.distributed
from datasets import preprocessing
import datasets
# Video transforms
num_frames = int(db_cfg['video_clip_duration'] * db_cfg['video_fps'])
if db_cfg['transforms'] == 'crop+color':
video_transform = preprocessing.VideoPrep_Crop_CJ(
resize=db_cfg['frame_size'],
crop=(db_cfg['crop_size'], db_cfg['crop_size']),
augment=split_cfg['use_augmentation'],
num_frames=num_frames,
pad_missing=True,
)
elif db_cfg['transforms'] == 'msc+color':
video_transform = preprocessing.VideoPrep_MSC_CJ(
crop=(db_cfg['crop_size'], db_cfg['crop_size']),
augment=split_cfg['use_augmentation'],
num_frames=num_frames,
pad_missing=True,
)
else:
raise ValueError('Unknown transform')
# Audio transforms
audio_transforms = [
preprocessing.AudioPrep(
trim_pad=True,
duration=db_cfg['audio_clip_duration'],
augment=split_cfg['use_augmentation'],
missing_as_zero=True),
preprocessing.LogSpectrogram(
db_cfg['audio_fps'],
n_fft=db_cfg['n_fft'],
hop_size=1. / db_cfg['spectrogram_fps'],
normalize=True)
]
audio_fps_out = db_cfg['spectrogram_fps']
if db_cfg['name'] == 'audioset':
dataset = datasets.AudioSet
elif db_cfg['name'] == 'kinetics':
dataset = datasets.Kinetics
else:
raise ValueError('Unknown dataset')
clips_per_video = split_cfg['clips_per_video'] if 'clips_per_video' in split_cfg else 1
db = dataset(
subset=split_cfg['split'],
return_video=True,
video_clip_duration=db_cfg['video_clip_duration'],
video_fps=db_cfg['video_fps'],
video_transform=video_transform,
return_audio=True,
audio_clip_duration=db_cfg['audio_clip_duration'],
audio_fps=db_cfg['audio_fps'],
audio_fps_out=audio_fps_out,
audio_transform=audio_transforms,
max_offsync_augm=0.5 if split_cfg['use_augmentation'] else 0,
return_labels=False,
return_index=True,
mode='clip',
clips_per_video=clips_per_video,
)
if distributed:
sampler = torch.utils.data.distributed.DistributedSampler(db)
else:
sampler = None
loader = torch.utils.data.DataLoader(
db,
batch_size=db_cfg['batch_size'],
shuffle=(sampler is None),
drop_last=split_cfg['drop_last'],
num_workers=num_workers,
pin_memory=True,
sampler=sampler)
return loader
def build_criterion(cfg, logger=None):
import criterions
criterion = criterions.__dict__[cfg['name']](**cfg['args'])
if logger is not None:
logger.add_line(str(criterion))
return criterion
def build_optimizer(params, cfg, logger=None):
if cfg['name'] == 'sgd':
optimizer = torch.optim.SGD(
params=params,
lr=cfg['lr']['base_lr'],
momentum=cfg['momentum'],
weight_decay=cfg['weight_decay'],
nesterov=cfg['nesterov']
)
elif cfg['name'] == 'adam':
optimizer = torch.optim.Adam(
params=params,
lr=cfg['lr']['base_lr'],
weight_decay=cfg['weight_decay'],
betas=cfg['betas'] if 'betas' in cfg else [0.9, 0.999]
)
else:
raise ValueError('Unknown optimizer.')
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg['lr']['milestones'], gamma=cfg['lr']['gamma'])
return optimizer, scheduler
class CheckpointManager(object):
def __init__(self, checkpoint_dir, rank=0):
self.checkpoint_dir = checkpoint_dir
self.rank = rank
self.best_metric = 0.
def save(self, epoch, filename=None, eval_metric=0., **kwargs):
if self.rank != 0:
return
is_best = False
if eval_metric > self.best_metric:
self.best_metric = eval_metric
is_best = True
state = {'epoch': epoch}
for k in kwargs:
state[k] = kwargs[k].state_dict()
if filename is None:
save_checkpoint(state=state, is_best=is_best, model_dir=self.checkpoint_dir)
else:
save_checkpoint(state=state, is_best=False, filename='{}/{}'.format(self.checkpoint_dir, filename))
def last_checkpoint_fn(self):
return '{}/checkpoint.pth.tar'.format(self.checkpoint_dir)
def best_checkpoint_fn(self):
return '{}/model_best.pth.tar'.format(self.checkpoint_dir)
def checkpoint_fn(self, last=False, best=False):
assert best or last
assert not (last and best)
if last:
return self.last_checkpoint_fn()
if best:
return self.best_checkpoint_fn()
def checkpoint_exists(self, last=False, best=False):
return os.path.isfile(self.checkpoint_fn(last, best))
def restore(self, fn=None, restore_last=False, restore_best=False, **kwargs):
checkpoint_fn = fn if fn is not None else self.checkpoint_fn(restore_last, restore_best)
ckp = torch.load(checkpoint_fn, map_location={'cuda:0': 'cpu'})
start_epoch = ckp['epoch']
for k in kwargs:
if k == 'train_criterion':
kwargs[k].load_state_dict(ckp[k], strict=False)
else:
kwargs[k].load_state_dict(ckp[k])
return start_epoch
def save_checkpoint(state, is_best, model_dir='.', filename=None):
if filename is None:
filename = '{}/checkpoint.pth.tar'.format(model_dir)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, '{}/model_best.pth.tar'.format(model_dir))
def prep_output_folder(model_dir, evaluate):
if evaluate:
assert os.path.isdir(model_dir)
else:
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
def parameter_description(model):
desc = ''
for n, p in model.named_parameters():
desc += "{:70} | {:10} | {:30} | {}\n".format(
n, 'Trainable' if p.requires_grad else 'Frozen',
' x '.join([str(s) for s in p.size()]), str(np.prod(p.size())))
return desc
|
AVID-CMA-main
|
utils/main_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torch import nn
import torch.distributed as dist
import utils.logger
from utils import main_utils
import yaml
import os
def prepare_environment(args, cfg, fold):
if args.distributed:
while True:
try:
dist.init_process_group(backend='nccl', init_method='tcp://localhost:{}'.format(args.port), world_size=args.world_size, rank=args.gpu)
break
except RuntimeError:
args.port = str(int(args.port) + 1)
model_cfg = yaml.safe_load(open(args.model_cfg))['model']
eval_dir = '{}/{}/eval-{}/fold-{:02d}'.format(model_cfg['model_dir'], model_cfg['name'], cfg['benchmark']['name'], fold)
os.makedirs(eval_dir, exist_ok=True)
yaml.safe_dump(cfg, open('{}/config.yaml'.format(eval_dir), 'w'))
logger = utils.logger.Logger(quiet=args.quiet, log_fn='{}/eval.log'.format(eval_dir), rank=args.gpu)
if any(['SLURM' in env for env in list(os.environ.keys())]):
logger.add_line("=" * 30 + " SLURM " + "=" * 30)
for env in os.environ.keys():
if 'SLURM' in env:
logger.add_line('{:30}: {}'.format(env, os.environ[env]))
logger.add_line("=" * 30 + " Config " + "=" * 30)
def print_dict(d, ident=''):
for k in d:
if isinstance(d[k], dict):
logger.add_line("{}{}".format(ident, k))
print_dict(d[k], ident=' '+ident)
else:
logger.add_line("{}{}: {}".format(ident, k, str(d[k])))
print_dict(cfg)
logger.add_line("=" * 30 + " Model Config " + "=" * 30)
print_dict(model_cfg)
return eval_dir, model_cfg, logger
def distribute_model_to_cuda(model, args, cfg):
if torch.cuda.device_count() == 1:
model = model.cuda()
elif args.distributed:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
cfg['dataset']['batch_size'] = max(cfg['dataset']['batch_size'] // args.world_size, 1)
cfg['num_workers'] = max(cfg['num_workers'] // args.world_size, 1)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model = torch.nn.DataParallel(model).cuda()
return model
def build_dataloader(db_cfg, split_cfg, fold, num_workers, distributed):
import torch.utils.data as data
from datasets import preprocessing
if db_cfg['transform'] == 'msc+color':
video_transform = preprocessing.VideoPrep_MSC_CJ(
crop=(db_cfg['crop_size'], db_cfg['crop_size']),
num_frames=int(db_cfg['video_fps'] * db_cfg['clip_duration']),
pad_missing=True,
augment=split_cfg['use_augmentation'],
min_area=db_cfg['min_area'],
color=db_cfg['color'],
)
elif db_cfg['transform'] == 'crop+color':
video_transform = preprocessing.VideoPrep_Crop_CJ(
crop=(db_cfg['crop_size'], db_cfg['crop_size']),
num_frames=int(db_cfg['video_fps'] * db_cfg['clip_duration']),
pad_missing=True,
augment=split_cfg['use_augmentation'],
)
else:
raise ValueError
import datasets
if db_cfg['name'] == 'ucf101':
dataset = datasets.UCF
elif db_cfg['name'] == 'hmdb51':
dataset = datasets.HMDB
elif db_cfg['name'] == 'kinetics':
dataset = datasets.Kinetics
else:
raise ValueError('Unknown dataset')
db = dataset(
subset=split_cfg['split'].format(fold=fold),
return_video=True,
video_clip_duration=db_cfg['clip_duration'],
video_fps=db_cfg['video_fps'],
video_transform=video_transform,
return_audio=False,
return_labels=True,
mode=split_cfg['mode'],
clips_per_video=split_cfg['clips_per_video'],
)
if distributed:
sampler = torch.utils.data.distributed.DistributedSampler(db)
else:
sampler = None
drop_last = split_cfg['drop_last'] if 'drop_last' in split_cfg else True
loader = data.DataLoader(
db,
batch_size=db_cfg['batch_size'] if split_cfg['mode'] == 'clip' else max(1, db_cfg['batch_size']//split_cfg['clips_per_video']),
num_workers=num_workers,
pin_memory=True,
shuffle=(sampler is None) and split_cfg['use_shuffle'],
sampler=sampler,
drop_last=drop_last
)
return loader
def build_dataloaders(cfg, fold, num_workers, distributed, logger):
logger.add_line("=" * 30 + " Train DB " + "=" * 30)
train_loader = build_dataloader(cfg, cfg['train'], fold, num_workers, distributed)
logger.add_line(str(train_loader.dataset))
logger.add_line("=" * 30 + " Test DB " + "=" * 30)
test_loader = build_dataloader(cfg, cfg['test'], fold, num_workers, distributed)
logger.add_line(str(test_loader.dataset))
logger.add_line("=" * 30 + " Dense DB " + "=" * 30)
dense_loader = build_dataloader(cfg, cfg['test_dense'], fold, num_workers, distributed)
logger.add_line(str(dense_loader.dataset))
return train_loader, test_loader, dense_loader
class CheckpointManager(object):
def __init__(self, checkpoint_dir, rank=0):
self.checkpoint_dir = checkpoint_dir
self.best_metric = 0.
self.rank = rank
def save(self, model, optimizer, scheduler, epoch, eval_metric=0.):
if self.rank is not None and self.rank != 0:
return
is_best = False
if eval_metric > self.best_metric:
self.best_metric = eval_metric
is_best = True
main_utils.save_checkpoint(state={
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
}, is_best=is_best, model_dir=self.checkpoint_dir)
def last_checkpoint_fn(self):
return '{}/checkpoint.pth.tar'.format(self.checkpoint_dir)
def best_checkpoint_fn(self):
return '{}/model_best.pth.tar'.format(self.checkpoint_dir)
def checkpoint_fn(self, last=False, best=False):
assert best or last
assert not (last and best)
if last:
return self.last_checkpoint_fn()
if best:
return self.best_checkpoint_fn()
def checkpoint_exists(self, last=False, best=False):
return os.path.isfile(self.checkpoint_fn(last, best))
def restore(self, model, optimizer, scheduler, restore_last=False, restore_best=False):
checkpoint_fn = self.checkpoint_fn(restore_last, restore_best)
ckp = torch.load(checkpoint_fn, map_location={'cuda:0': 'cpu'})
start_epoch = ckp['epoch']
model.load_state_dict(ckp['state_dict'])
optimizer.load_state_dict(ckp['optimizer'])
scheduler.load_state_dict(ckp['scheduler'])
return start_epoch
class ClassificationWrapper(torch.nn.Module):
def __init__(self, feature_extractor, n_classes, feat_name, feat_dim, pooling_op=None, use_dropout=False, dropout=0.5):
super(ClassificationWrapper, self).__init__()
self.feature_extractor = feature_extractor
self.feat_name = feat_name
self.use_dropout = use_dropout
if pooling_op is not None:
self.pooling = eval('torch.nn.'+pooling_op)
else:
self.pooling = None
if use_dropout:
self.dropout = torch.nn.Dropout(dropout)
self.classifier = torch.nn.Linear(feat_dim, n_classes)
def forward(self, *inputs):
emb = self.feature_extractor(*inputs, return_embs=True)[self.feat_name]
emb_pool = self.pooling(emb) if self.pooling is not None else emb
emb_pool = emb_pool.view(inputs[0].shape[0], -1)
if self.use_dropout:
emb_pool = self.dropout(emb_pool)
logit = self.classifier(emb_pool)
return logit
class Classifier(nn.Module):
def __init__(self, n_classes, feat_name, feat_dim, pooling, l2_norm=False, use_bn=True, use_dropout=False):
super(Classifier, self).__init__()
self.use_bn = use_bn
self.feat_name = feat_name
self.pooling = eval('nn.'+pooling) if pooling is not None else None
self.l2_norm = l2_norm
if use_bn:
self.bn = nn.BatchNorm1d(feat_dim)
self.use_dropout = use_dropout
if use_dropout:
self.dropout = nn.Dropout()
self.classifier = nn.Linear(feat_dim, n_classes)
def forward(self, x):
with torch.no_grad():
if self.use_dropout:
x = self.dropout(x)
if self.l2_norm:
x = nn.functional.normalize(x, p=2, dim=-1)
if self.pooling is not None and len(x.shape) > 2:
x = self.pooling(x)
x = x.view(x.shape[0], -1).contiguous().detach()
if self.use_bn:
x = self.bn(x)
return self.classifier(x)
class MOSTCheckpointManager(object):
def __init__(self, checkpoint_dir, rank=0):
self.rank = rank
self.checkpoint_dir = checkpoint_dir
self.best_metric = 0.
def save(self, model, optimizer, epoch, eval_metric=0.):
if self.rank != 0:
return
is_best = False
if eval_metric > self.best_metric:
self.best_metric = eval_metric
is_best = True
try:
state_dict = model.classifiers.state_dict()
except AttributeError:
state_dict = model.module.classifiers.state_dict()
main_utils.save_checkpoint(state={
'epoch': epoch,
'state_dict': state_dict,
'optimizer': optimizer.state_dict(),
}, is_best=is_best, model_dir=self.checkpoint_dir)
def last_checkpoint_fn(self):
return '{}/checkpoint.pth.tar'.format(self.checkpoint_dir)
def best_checkpoint_fn(self):
return '{}/model_best.pth.tar'.format(self.checkpoint_dir)
def checkpoint_fn(self, last=False, best=False):
assert best or last
# assert not (last and best)
if last:
return self.last_checkpoint_fn()
elif best:
return self.best_checkpoint_fn()
def checkpoint_exists(self, last=False, best=False):
return os.path.isfile(self.checkpoint_fn(last, best))
def restore(self, model, optimizer, restore_last=False, restore_best=False):
checkpoint_fn = self.checkpoint_fn(restore_last, restore_best)
ckp = torch.load(checkpoint_fn, map_location={'cuda:0': 'cpu'})
start_epoch = ckp['epoch']
try:
model.classifiers.load_state_dict(ckp['state_dict'])
except AttributeError:
model.module.classifiers.load_state_dict(ckp['state_dict'])
optimizer.load_state_dict(ckp['optimizer'])
return start_epoch
class MOSTModel(nn.Module):
def __init__(self, feature_extractor, n_classes, feat_names, feat_dims, pooling_ops, l2_norm=None, use_bn=False, use_dropout=False):
super(MOSTModel, self).__init__()
assert len(feat_dims) == len(pooling_ops) == len(feat_names)
n_outputs = len(feat_dims)
self.feat_names = feat_names
self.feat_dims = feat_dims
self.pooling_ops = pooling_ops
if l2_norm is None:
l2_norm = [False] * len(feat_names)
if not isinstance(l2_norm, list):
l2_norm = [l2_norm] * len(feat_names)
self.l2_norm = l2_norm
feature_extractor.train(False)
self.feature_extractor = feature_extractor
self.classifiers = nn.ModuleList([
Classifier(n_classes, feat_name=feat_names[i], feat_dim=feat_dims[i], pooling=pooling_ops[i], l2_norm=l2_norm[i], use_bn=use_bn, use_dropout=use_dropout) for i in range(n_outputs)
])
for p in self.feature_extractor.parameters():
p.requires_grad = False
def forward(self, *x):
with torch.no_grad():
embs = self.feature_extractor(*x, return_embs=self.feat_names)
embs = {ft: embs[ft] for ft in self.feat_names}
for classifier, ft in zip(self.classifiers, self.feat_names):
embs[ft] = classifier(embs[ft])
return embs
def build_model(feat_cfg, eval_cfg, eval_dir, args, logger):
import models
pretrained_net = models.__dict__[feat_cfg['arch']](**feat_cfg['args'])
# Load from checkpoint
checkpoint_fn = '{}/{}/checkpoint.pth.tar'.format(feat_cfg['model_dir'], feat_cfg['name'])
ckp = torch.load(checkpoint_fn, map_location='cpu')
pretrained_net.load_state_dict({k.replace('module.', ''): ckp['model'][k] for k in ckp['model']})
# Wrap with linear-head classifiers
if eval_cfg['model']['name'] == 'ClassificationWrapper':
model = ClassificationWrapper(feature_extractor=pretrained_net.video_model, **eval_cfg['model']['args'])
ckp_manager = CheckpointManager(eval_dir, rank=args.gpu)
elif eval_cfg['model']['name'] == 'MOSTWrapper':
model = MOSTModel(feature_extractor=pretrained_net.video_model, **eval_cfg['model']['args'])
ckp_manager = MOSTCheckpointManager(eval_dir, rank=args.gpu)
else:
raise ValueError
# Log model description
logger.add_line("=" * 30 + " Model " + "=" * 30)
logger.add_line(str(model))
logger.add_line("=" * 30 + " Parameters " + "=" * 30)
logger.add_line(main_utils.parameter_description(model))
logger.add_line("=" * 30 + " Pretrained model " + "=" * 30)
logger.add_line("File: {}\nEpoch: {}".format(checkpoint_fn, ckp['epoch']))
# Distribute
model = distribute_model_to_cuda(model, args, eval_cfg)
return model, ckp_manager
class BatchWrapper:
def __init__(self, model, batch_size):
self.model = model
self.batch_size = batch_size
def __call__(self, x):
outs = []
for i in range(0, x.shape[0], self.batch_size):
outs += [self.model(x[i:i + self.batch_size])]
return torch.cat(outs, 0)
|
AVID-CMA-main
|
utils/eval_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
|
AVID-CMA-main
|
utils/ioutils/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import av
import numpy as np
from fractions import Fraction
av.logging.set_level(0)
def av_open(inpt):
return av.open(inpt)
def av_load_video(container, video_fps=None, start_time=0, duration=None):
video_stream = container.streams.video[0]
_ss = video_stream.start_time * video_stream.time_base
_dur = video_stream.duration * video_stream.time_base
_ff = _ss + _dur
_fps = video_stream.average_rate
if video_fps is None:
video_fps = _fps
if duration is None:
duration = _ff - start_time
# Figure out which frames to decode
outp_times = [t for t in np.arange(start_time, min(start_time + duration - 0.5/_fps, _ff), 1./video_fps)][:int(duration*video_fps)]
outp_vframes = [int((t - _ss) * _fps) for t in outp_times]
start_time = outp_vframes[0] / float(_fps)
# Fast forward
container.seek(int(start_time * av.time_base))
# Decode snippet
frames = []
for frame in container.decode(video=0):
if len(frames) == len(outp_vframes):
break # All frames have been decoded
frame_no = frame.pts * frame.time_base * _fps
if frame_no < outp_vframes[len(frames)]:
continue # Not the frame we want
# Decode
pil_img = frame.to_image()
while frame_no >= outp_vframes[len(frames)]:
frames += [pil_img]
if len(frames) == len(outp_vframes):
break # All frames have been decoded
return frames, video_fps, start_time
def av_laod_audio(container, audio_fps=None, start_time=0, duration=None):
audio_stream = container.streams.audio[0]
_ss = audio_stream.start_time * audio_stream.time_base if audio_stream.start_time is not None else 0.
_dur = audio_stream.duration * audio_stream.time_base
_ff = _ss + _dur
_fps = audio_stream.rate
if audio_fps is None:
resample = False
audio_fps = _fps
else:
resample = True
audio_resampler = av.audio.resampler.AudioResampler(format="s16p", layout="mono", rate=audio_fps)
if duration is None:
duration = _ff - start_time
duration = min(duration, _ff - start_time)
end_time = start_time + duration
# Fast forward
container.seek(int(start_time * av.time_base))
# Decode snippet
data, timestamps = [], []
for frame in container.decode(audio=0):
frame_pts = frame.pts * frame.time_base
frame_end_pts = frame_pts + Fraction(frame.samples, frame.rate)
if frame_end_pts < start_time: # Skip until start time
continue
if frame_pts > end_time: # Exit if clip has been extracted
break
try:
frame.pts = None
if resample:
np_snd = audio_resampler.resample(frame).to_ndarray()
else:
np_snd = frame.to_ndarray()
data += [np_snd]
timestamps += [frame_pts]
except AttributeError:
break
data = np.concatenate(data, 1)
# Trim audio
start_decoded_time = timestamps[0]
ss = int((start_time - start_decoded_time) * audio_fps)
t = int(duration * audio_fps)
if ss < 0:
data = np.pad(data, ((0, 0), (-ss, 0)), 'constant', constant_values=0)
ss = 0
if t > data.shape[1]:
data = np.pad(data, ((0, 0), (0, t-data.shape[1])), 'constant', constant_values=0)
data = data[:, ss: ss+t]
data = data / np.iinfo(data.dtype).max
return data, audio_fps
|
AVID-CMA-main
|
utils/ioutils/av_wrappers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import random
import torch
from utils.videotransforms.utils import functional as F
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation
Given mean: m and std: s
will normalize each channel as channel = (channel - mean) / std
Args:
mean (int): mean value
std (int): std value
"""
def __init__(self, mean, std):
self.mean = torch.tensor(mean).view(3, 1, 1, 1)
self.std = torch.tensor(std).view(3, 1, 1, 1)
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor of stacked images or image
of size (C, H, W) to be normalized
Returns:
Tensor: Normalized stack of image of image
"""
return F.normalize(tensor, self.mean, self.std)
class SpatialRandomCrop(object):
"""Crops a random spatial crop in a spatio-temporal
numpy or tensor input [Channel, Time, Height, Width]
"""
def __init__(self, size):
"""
Args:
size (tuple): in format (height, width)
"""
self.size = size
def __call__(self, tensor):
h, w = self.size
_, _, tensor_h, tensor_w = tensor.shape
if w > tensor_w or h > tensor_h:
error_msg = (
'Initial tensor spatial size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial tensor is ({t_w}, {t_h})'.format(
t_w=tensor_w, t_h=tensor_h, w=w, h=h))
raise ValueError(error_msg)
x1 = random.randint(0, tensor_w - w)
y1 = random.randint(0, tensor_h - h)
cropped = tensor[:, :, y1:y1 + h, x1:x1 + h]
return cropped
|
AVID-CMA-main
|
utils/videotransforms/tensor_transforms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
from PIL import Image
import torch
from utils.videotransforms.utils import images as imageutils
class ClipToTensor(object):
"""Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
"""
def __init__(self, channel_nb=3, div_255=True, numpy=False):
self.channel_nb = channel_nb
self.div_255 = div_255
self.numpy = numpy
def __call__(self, clip):
"""
Args: clip (list of numpy.ndarray): clip (list of images)
to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format(
ch)
elif isinstance(clip[0], Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = imageutils.convert_img(img)
np_clip[:, img_idx, :, :] = img
if self.numpy:
if self.div_255:
np_clip = np_clip / 255
return np_clip
else:
tensor_clip = torch.from_numpy(np_clip)
if not isinstance(tensor_clip, torch.FloatTensor):
tensor_clip = tensor_clip.float()
if self.div_255:
tensor_clip = tensor_clip.div(255)
return tensor_clip
class ToTensor(object):
"""Converts numpy array to tensor
"""
def __call__(self, array):
tensor = torch.from_numpy(array)
return tensor
|
AVID-CMA-main
|
utils/videotransforms/volume_transforms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numbers
import numpy as np
import PIL
def crop_clip(clip, min_h, min_w, h, w):
if isinstance(clip[0], np.ndarray):
cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
cropped = [
img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return cropped
def resize_clip(clip, size, interpolation='bilinear'):
if isinstance(clip[0], np.ndarray):
if isinstance(size, numbers.Number):
im_h, im_w, im_c = clip[0].shape
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[1], size[0]
if interpolation == 'bilinear':
np_inter = cv2.INTER_LINEAR
else:
np_inter = cv2.INTER_NEAREST
scaled = [
cv2.resize(img, size, interpolation=np_inter) for img in clip
]
elif isinstance(clip[0], PIL.Image.Image):
if isinstance(size, numbers.Number):
im_w, im_h = clip[0].size
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[1], size[0]
if interpolation == 'bilinear':
pil_inter = PIL.Image.NEAREST
else:
pil_inter = PIL.Image.BILINEAR
scaled = [img.resize(size, pil_inter) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return scaled
def get_resize_sizes(im_h, im_w, size):
if im_w < im_h:
ow = size
oh = int(size * im_h / im_w)
else:
oh = size
ow = int(size * im_w / im_h)
return oh, ow
|
AVID-CMA-main
|
utils/videotransforms/functional.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import PIL
import torch
from utils.videotransforms.utils import images as imageutils
class ToStackedTensor(object):
"""Converts a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
or PIL Images to a torch.FloatTensor of shape (m*C x H x W)
in the range [0, 1.0]
"""
def __init__(self, channel_nb=3):
self.channel_nb = channel_nb
def __call__(self, clip):
"""
Args:
clip (list of numpy.ndarray or PIL.Image.Image): clip
(list of images) to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'got {} channels instead of 3'.format(
ch)
elif isinstance(clip[0], PIL.Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb * len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, PIL.Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = imageutils.convert_img(img)
np_clip[img_idx * self.channel_nb:(
img_idx + 1) * self.channel_nb, :, :] = img
tensor_clip = torch.from_numpy(np_clip)
return tensor_clip.float().div(255)
|
AVID-CMA-main
|
utils/videotransforms/stack_transforms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numbers
import random
import numpy as np
import PIL
import torchvision
import warnings
import math
from utils.videotransforms import functional as F
from torchvision.transforms import functional as vF
class Compose(object):
"""Composes several transforms
Args:
transforms (list of ``Transform`` objects): list of transforms
to compose
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
for t in self.transforms:
clip = t(clip)
return clip
class RandomDrop(object):
"""Randomly drops frames in order to keep a fixed number of frames
"""
def __init__(self, num_frames):
self.num_frames = num_frames
def __call__(self, clip):
assert len(clip) >= self.num_frames
if len(clip) == self.num_frames:
return clip
idx = sorted(random.sample(range(len(clip)), k=self.num_frames))
return [clip[i] for i in idx]
class UniformDrop(object):
"""Randomly drops frames in order to keep a fixed number of frames
"""
def __init__(self, num_frames=None, ss_ratio=None):
self.num_frames = num_frames
self.ss_ratio = ss_ratio
def __call__(self, clip):
if self.num_frames is not None:
if len(clip) <= self.num_frames:
return clip
idx = np.linspace(0, len(clip)-1, self.num_frames, endpoint=True).astype(int)
return [clip[i] for i in idx]
elif self.ss_ratio is not None:
if self.ss_ratio == 1:
return clip
idx = np.arange(0, len(clip), self.ss_ratio).astype(int)
return [clip[i] for i in idx]
class RandomHorizontalFlip(object):
"""Horizontally flip the list of given images randomly
with a probability 0.5
"""
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Randomly flipped clip
"""
if random.random() < 0.5:
if isinstance(clip[0], np.ndarray):
return [np.fliplr(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [
img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
' but got list of {0}'.format(type(clip[0])))
return clip
class RandomGray(object):
"""Horizontally flip the list of given images randomly
with a probability 0.5
"""
def __init__(self, p):
self.p = p
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Randomly flipped clip
"""
rand_gray_clip = []
for img in clip:
if random.random() < self.p:
img = vF.to_grayscale(img)
rand_gray_clip.append(img)
return rand_gray_clip
class RandomResize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'):
self.ratio = ratio
self.interpolation = interpolation
def __call__(self, clip):
scaling_factor = random.uniform(self.ratio[0], self.ratio[1])
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
new_w = int(im_w * scaling_factor)
new_h = int(im_h * scaling_factor)
new_size = (new_w, new_h)
resized = F.resize_clip(
clip, new_size, interpolation=self.interpolation)
return resized
class Resize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, size, interpolation='nearest'):
self.size = size
self.interpolation = interpolation
def __call__(self, clip):
resized = F.resize_clip(
clip, self.size, interpolation=self.interpolation)
return resized
class RandomCrop(object):
"""Extract random crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = random.randint(0, im_w - w)
y1 = random.randint(0, im_h - h)
cropped = F.crop_clip(clip, y1, x1, h, w)
return cropped
class CenterCrop(object):
"""Extract center crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = int(round((im_w - w) / 2.))
y1 = int(round((im_h - h) / 2.))
cropped = F.crop_clip(clip, y1, x1, h, w)
return cropped
class TenCrop(object):
"""Extract center crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
cropped = []
for frame in clip:
cropped += list(vF.ten_crop(frame, self.size))
return cropped
class RandomResizedCrop(object):
"""Crops a series of PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=PIL.Image.BILINEAR):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if (in_ratio < min(ratio)):
w = img.size[0]
h = int(round(w / min(ratio)))
elif (in_ratio > max(ratio)):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, clip):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(clip[0], self.scale, self.ratio)
return [vF.resized_crop(img, i, j, h, w, self.size, self.interpolation) for img in clip]
def __repr__(self):
interpolate_str = '?'
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation and hue of the clip
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def get_params(self, brightness, contrast, saturation, hue):
if brightness > 0:
brightness_factor = random.uniform(
max(0, 1 - brightness), 1 + brightness)
else:
brightness_factor = None
if contrast > 0:
contrast_factor = random.uniform(
max(0, 1 - contrast), 1 + contrast)
else:
contrast_factor = None
if saturation > 0:
saturation_factor = random.uniform(
max(0, 1 - saturation), 1 + saturation)
else:
saturation_factor = None
if hue > 0:
hue_factor = random.uniform(-hue, hue)
else:
hue_factor = None
return brightness_factor, contrast_factor, saturation_factor, hue_factor
def __call__(self, clip):
"""
Args:
clip (list): list of PIL.Image
Returns:
list PIL.Image : list of transformed PIL.Image
"""
if isinstance(clip[0], np.ndarray):
raise TypeError(
'Color jitter not yet implemented for numpy arrays')
elif isinstance(clip[0], PIL.Image.Image):
brightness, contrast, saturation, hue = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue)
# Create img transform function sequence
img_transforms = []
if brightness is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
if saturation is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
if hue is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
if contrast is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
random.shuffle(img_transforms)
# Apply to all images
jittered_clip = []
for img in clip:
for func in img_transforms:
img = func(img)
jittered_clip.append(img)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return jittered_clip
class TemporalJitter(object):
"""Crop video sequence temporally with jitter
Args:
n_frames (int): number of output frames
min_scale (float): minimum subsample rate (default 1.0)
max_scale (float): maximum subsample rate (default 1.0)
"""
def __init__(self, n_frames, time_scale=(1.,1.)):
self.n_frames = n_frames
self.time_scale = time_scale
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
rate = self.time_scale[0] if self.time_scale[0] == self.time_scale[1] else random.uniform(self.time_scale[0], min(self.time_scale[1], float(len(clip))/float(self.n_frames)))
clip_ss = [clip[int(t)] for t in np.arange(0, len(clip), rate)]
if len(clip_ss) == self.n_frames:
clip_out = clip_ss
elif len(clip_ss) < self.n_frames:
# Wrap to fill frames
clip_out = [clip_ss[t%len(clip_ss)] for t in range(self.n_frames)]
else:
# Extract random crop
max_init_t = len(clip_ss) - self.n_frames
init_t = random.randint(0, max_init_t)
clip_out = clip_ss[init_t:init_t + self.n_frames]
return clip_out
class TemporalCenterCrop(object):
"""Crop video sequence temporally with jitter
Args:
n_frames (int): number of output frames
min_scale (float): minimum subsample rate (default 1.0)
max_scale (float): maximum subsample rate (default 1.0)
"""
def __init__(self, n_frames, time_scale=1.):
self.n_frames = n_frames
self.time_scale = time_scale
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
clip_ss = [clip[int(t)] for t in np.arange(0, len(clip), self.time_scale)]
if len(clip_ss) == self.n_frames:
clip_out = clip_ss
elif len(clip_ss) < self.n_frames:
# Wrap to fill frames
clip_out = [clip_ss[t%len(clip_ss)] for t in range(self.n_frames)]
else:
# Extract random crop
init_t = (len(clip_ss) - self.n_frames)//2
clip_out = clip_ss[init_t:init_t + self.n_frames]
return clip_out
|
AVID-CMA-main
|
utils/videotransforms/video_transforms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def normalize(tensor, mean, std):
"""
Args:
tensor (Tensor): Tensor to normalize
Returns:
Tensor: Normalized tensor
"""
tensor.sub_(mean).div_(std)
return tensor
|
AVID-CMA-main
|
utils/videotransforms/utils/functional.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
def convert_img(img):
"""Converts (H, W, C) numpy.ndarray to (C, W, H) format
"""
if len(img.shape) == 3:
img = img.transpose(2, 0, 1)
if len(img.shape) == 2:
img = np.expand_dims(img, 0)
return img
|
AVID-CMA-main
|
utils/videotransforms/utils/images.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .video import *
from .audio import *
from .av_wrapper import *
|
AVID-CMA-main
|
models/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import numpy as np
class Basic2DBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride=(1, 1)):
self.__dict__.update(locals())
super(Basic2DBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=(3, 3), padding=(1, 1), stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=(3, 3), padding=(1, 1), bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
return x
class BasicR2P1DBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride=(1, 1, 1)):
super(BasicR2P1DBlock, self).__init__()
spt_stride = (1, stride[1], stride[2])
tmp_stride = (stride[0], 1, 1)
self.spt_conv1 = nn.Conv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=spt_stride, padding=(0, 1, 1), bias=False)
self.spt_bn1 = nn.BatchNorm3d(out_planes)
self.tmp_conv1 = nn.Conv3d(out_planes, out_planes, kernel_size=(3, 1, 1), stride=tmp_stride, padding=(1, 0, 0), bias=False)
self.tmp_bn1 = nn.BatchNorm3d(out_planes)
self.spt_conv2 = nn.Conv3d(out_planes, out_planes, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
self.spt_bn2 = nn.BatchNorm3d(out_planes)
self.tmp_conv2 = nn.Conv3d(out_planes, out_planes, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False)
self.out_bn = nn.BatchNorm3d(out_planes)
self.relu = nn.ReLU(inplace=True)
if in_planes != out_planes or any([s!=1 for s in stride]):
self.res = True
self.res_conv = nn.Conv3d(in_planes, out_planes, kernel_size=(1, 1, 1), stride=stride, padding=(0, 0, 0), bias=False)
else:
self.res = False
def forward(self, x):
x_main = self.tmp_conv1(self.relu(self.spt_bn1(self.spt_conv1(x))))
x_main = self.relu(self.tmp_bn1(x_main))
x_main = self.tmp_conv2(self.relu(self.spt_bn2(self.spt_conv2(x_main))))
x_res = self.res_conv(x) if self.res else x
x_out = self.relu(self.out_bn(x_main + x_res))
return x_out
|
AVID-CMA-main
|
models/network_blocks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
__all__ = [
'av_wrapper'
]
class Head(nn.Module):
def __init__(self, input_dim, proj_dims):
super(Head, self).__init__()
if not isinstance(proj_dims, list):
proj_dims = [proj_dims]
projection = []
for i, d in enumerate(proj_dims):
projection += [nn.Linear(input_dim, d)]
input_dim = d
if i < len(proj_dims)-1:
projection += [nn.ReLU(inplace=True)]
self.projection = nn.Sequential(*projection)
self.out_dim = proj_dims[-1]
def forward(self, x):
return self.projection(x)
class AV_Wrapper(nn.Module):
def __init__(self, video_model, audio_model, proj_dim=128):
super(AV_Wrapper, self).__init__()
self.video_model = video_model
self.audio_model = audio_model
self.use_linear_proj = proj_dim is not None
if proj_dim is not None:
self.video_proj = Head(video_model.out_dim, proj_dim)
self.audio_proj = Head(audio_model.out_dim, proj_dim)
self.out_dim = self.video_proj.out_dim
else:
self.out_dim = video_model.out_dim
def forward(self, video, audio):
video_emb = self.video_model(video)
video_emb = video_emb.view(video_emb.shape[0], video_emb.shape[1])
if self.use_linear_proj:
video_emb = self.video_proj(video_emb)
audio_emb = self.audio_model(audio)
audio_emb = audio_emb.view(audio_emb.shape[0], audio_emb.shape[1])
if self.use_linear_proj:
audio_emb = self.audio_proj(audio_emb)
return video_emb, audio_emb
def av_wrapper(video_backbone, video_backbone_args, audio_backbone, audio_backbone_args, proj_dim=128, checkpoint=None):
import models
assert video_backbone in models.__dict__, 'Unknown model architecture'
assert audio_backbone in models.__dict__, 'Unknown model architecture'
video_model = models.__dict__[video_backbone](**video_backbone_args)
audio_model = models.__dict__[audio_backbone](**audio_backbone_args)
model = AV_Wrapper(video_model, audio_model, proj_dim=proj_dim)
if checkpoint is not None:
ckp = torch.load(checkpoint, map_location='cpu')
nn.DataParallel(model).load_state_dict(ckp['model'])
return model
|
AVID-CMA-main
|
models/av_wrapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
from .network_blocks import Basic2DBlock
__all__ = [
'Conv2D'
]
class Conv2D(nn.Module):
def __init__(self, depth=10):
super(Conv2D, self).__init__()
assert depth==10
self.conv1 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=7, padding=3, stride=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.block1 = Basic2DBlock(64, 64, stride=(2, 2))
self.block2 = Basic2DBlock(64, 128, stride=(2, 2))
self.block3 = Basic2DBlock(128, 256, stride=(2, 2))
self.block4 = Basic2DBlock(256, 512)
self.pool = nn.AdaptiveMaxPool2d((1, 1))
self.out_dim = 512
def forward(self, x, return_embs=False):
x_c1 = self.conv1(x)
x_b1 = self.block1(x_c1)
x_b2 = self.block2(x_b1)
x_b3 = self.block3(x_b2)
x_b4 = self.block4(x_b3)
x_pool = self.pool(x_b4)
if return_embs:
return {'conv2x': x_b1, 'conv3x': x_b2, 'conv4x': x_b3, 'conv5x': x_b4, 'pool': x_pool}
else:
return x_pool
|
AVID-CMA-main
|
models/audio.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
from models.network_blocks import BasicR2P1DBlock
class R2Plus1D(nn.Module):
"""
Adapted from https://github.com/facebookresearch/VMZ/blob/4c14ee6f8eae8e2ac97fc4c05713b8a112eb1f28/lib/models/video_model.py
Adaptation has a full Conv3D stem, and does not adjust for the number of dimensions between the spatial and temporal convolution.
"""
def __init__(self, depth=18):
super(R2Plus1D, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv3d(3, 64, kernel_size=(3, 7, 7), padding=(1, 3, 3), stride=(1, 2, 2), bias=False),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
)
if depth == 10:
self.conv2x = BasicR2P1DBlock(64, 64)
self.conv3x = BasicR2P1DBlock(64, 128, stride=(2, 2, 2))
self.conv4x = BasicR2P1DBlock(128, 256, stride=(2, 2, 2))
self.conv5x = BasicR2P1DBlock(256, 512, stride=(2, 2, 2))
elif depth == 18:
self.conv2x = nn.Sequential(BasicR2P1DBlock(64, 64), BasicR2P1DBlock(64, 64))
self.conv3x = nn.Sequential(BasicR2P1DBlock(64, 128, stride=(2, 2, 2)), BasicR2P1DBlock(128, 128))
self.conv4x = nn.Sequential(BasicR2P1DBlock(128, 256, stride=(2, 2, 2)), BasicR2P1DBlock(256, 256))
self.conv5x = nn.Sequential(BasicR2P1DBlock(256, 512, stride=(2, 2, 2)), BasicR2P1DBlock(512, 512))
elif depth == 34:
self.conv2x = nn.Sequential(BasicR2P1DBlock(64, 64), BasicR2P1DBlock(64, 64), BasicR2P1DBlock(64, 64))
self.conv3x = nn.Sequential(BasicR2P1DBlock(64, 128, stride=(2, 2, 2)), BasicR2P1DBlock(128, 128), BasicR2P1DBlock(128, 128), BasicR2P1DBlock(128, 128))
self.conv4x = nn.Sequential(BasicR2P1DBlock(128, 256, stride=(2, 2, 2)), BasicR2P1DBlock(256, 256), BasicR2P1DBlock(256, 256), BasicR2P1DBlock(256, 256), BasicR2P1DBlock(256, 256), BasicR2P1DBlock(256, 256))
self.conv5x = nn.Sequential(BasicR2P1DBlock(256, 512, stride=(2, 2, 2)), BasicR2P1DBlock(512, 512), BasicR2P1DBlock(512, 512))
self.pool = nn.AdaptiveMaxPool3d((1, 1, 1))
self.out_dim = 512
def forward(self, x, return_embs=False):
x_c1 = self.conv1(x)
x_b1 = self.conv2x(x_c1)
x_b2 = self.conv3x(x_b1)
x_b3 = self.conv4x(x_b2)
x_b4 = self.conv5x(x_b3)
x_pool = self.pool(x_b4)
if return_embs:
return {'conv1': x_c1, 'conv2x': x_b1, 'conv3x': x_b2, 'conv4x': x_b3, 'conv5x': x_b4, 'pool': x_pool}
else:
return x_pool
|
AVID-CMA-main
|
models/video.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .avid import *
from .avid_cma import *
|
AVID-CMA-main
|
criterions/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torch import nn
from torch.nn import functional as F
import torch.distributed as dist
import pprint
from utils.distributed_utils import _gather_from_all
from utils.alias_method import AliasMethod
from criterions.nce import NCECriterion
__all__ = ['AVID']
class AVIDSimilarityMemoryBank(nn.Module):
def __init__(self,
memory_size,
embedding_dim,
xModal=True,
wModal=False,
num_negatives=1024,
momentum=0.5,
device=0
):
super(AVIDSimilarityMemoryBank, self).__init__()
self.num_negatives = num_negatives
self.temperature = 0.07
if not isinstance(momentum, (list, tuple)):
momentum = [momentum]*2
self.momentum = momentum
self.device = device
self.multinomial = AliasMethod(torch.ones(memory_size-1))
self.xModal = xModal
self.wModal = wModal
self.distributed = dist.is_available() and dist.is_initialized()
self.rank = dist.get_rank() if self.distributed else 0
self.init_memory(memory_size, embedding_dim)
def forward(self, video_emb, audio_emb, y):
K = int(self.num_negatives)
# Normalize embeddings
bs, dim = video_emb.shape
video_emb = F.normalize(video_emb, p=2, dim=1).view(bs, dim, 1)
audio_emb = F.normalize(audio_emb, p=2, dim=1).view(bs, dim, 1)
# Sample memories
with torch.no_grad():
video_pos_mem = self.view1_mem[y].view(bs, 1, dim)
audio_pos_mem = self.view2_mem[y].view(bs, 1, dim)
idx = self.sample_negatives(y, K).to(video_emb.device)
video_neg_mem = self.view1_mem[idx].view(bs, K, dim)
audio_neg_mem = self.view2_mem[idx].view(bs, K, dim)
# Compute scores
def compute_scores(context_emb, target_embs, T):
return [torch.bmm(trg, context_emb).squeeze(-1) / T for trg in target_embs]
scores = {}
if self.xModal:
scores['v2a'] = compute_scores(video_emb, [audio_pos_mem, audio_neg_mem], self.temperature)
scores['a2v'] = compute_scores(audio_emb, [video_pos_mem, video_neg_mem], self.temperature)
if self.wModal:
scores['v2v'] = compute_scores(video_emb, [video_pos_mem, video_neg_mem], self.temperature)
scores['a2a'] = compute_scores(audio_emb, [audio_pos_mem, audio_neg_mem], self.temperature)
# Update memory bank
self.update_memory(video_emb.squeeze(-1), audio_emb.squeeze(-1), y)
return scores
def sample_negatives(self, y, K):
bs = y.shape[0]
idx = self.multinomial.draw(bs * K).view(bs, -1).to(y.device)
idx = idx + (idx >= y.unsqueeze(1)).long() # Avoid self
return idx
def init_memory(self, num_items, embedding_dim):
self.register_buffer('view1_mem', torch.randn(num_items, embedding_dim))
self.register_buffer('view2_mem', torch.randn(num_items, embedding_dim))
self.view1_mem = F.normalize(self.view1_mem, p=2, dim=1)
self.view1_mem = self.view1_mem.cuda(self.device)
self.view2_mem = F.normalize(self.view2_mem, p=2, dim=1)
self.view2_mem = self.view2_mem.cuda(self.device)
if self.distributed:
dist.broadcast(self.view1_mem, 0)
dist.broadcast(self.view2_mem, 0)
dist.barrier()
def update_memory(self, video_emb, audio_emb, y):
video_mom = float(self.momentum[0])
audio_mom = float(self.momentum[1])
# gather embeddings from all gpus
if self.distributed:
video_emb_gathered = _gather_from_all(video_emb)
audio_emb_gathered = _gather_from_all(audio_emb)
y_gathered = _gather_from_all(y)
else:
video_emb_gathered = video_emb
audio_emb_gathered = audio_emb
y_gathered = y
# update audio and video memories
with torch.no_grad():
l1_pos = self.view1_mem.index_select(0, y_gathered.view(-1))
l1_pos.mul_(video_mom)
l1_pos.add_(torch.mul(video_emb_gathered, 1 - video_mom))
updated_l1 = F.normalize(l1_pos, p=2, dim=1)
self.view1_mem.index_copy_(0, y_gathered, updated_l1)
l2_pos = self.view2_mem.index_select(0, y_gathered.view(-1))
l2_pos.mul_(audio_mom)
l2_pos.add_(torch.mul(audio_emb_gathered, 1 - audio_mom))
updated_l2 = F.normalize(l2_pos, p=2, dim=1)
self.view2_mem.index_copy_(0, y_gathered, updated_l2)
def __repr__(self):
num_negatives = int(self.num_negatives)
view1_mom = float(self.momentum[0])
view2_mom = float(self.momentum[1])
repr_dict = {
'name': self._get_name(),
'num_negatives': num_negatives,
'momentum': [view1_mom, view2_mom],
'view1_buffer_size': self.view1_mem.shape,
'view2_buffer_size': self.view2_mem.shape,
}
return pprint.pformat(repr_dict, indent=2)
class AVID(nn.Module):
def __init__(self, num_data, embedding_dim,
num_negatives=4096,
momentum=0.9,
xModal_coeff=1.,
wModal_coeff=0.,
checkpoint=None,
device=0):
super(AVID, self).__init__()
'''
AVID criterion.
This module receives the output embeddings of the video
and audio models, computes their non-linear projections,
manages the memory bank and computes the final loss.
Args:
- num_data: number of instances in the training set.
- embedding_dim: output dimension of the non-linear projection.
- num_negatives: number of negatives to draw from memory bank to compute the NCE loss.
- momentum: memory bank EMA momemtum parameter.
- xModal_coeff: coefficient for the cross modal loss. (Cross-AVID: 1.0 | Self-AVID: 0.0 | Joint-AVID: 1.0)
- wModal_coeff: coefficient for the within modal loss. (Cross-AVID: 0.0 | Self-AVID: 1.0 | Joint-AVID: 1.0)
- checkpoint: optinally specify a checkpoint path to restore the memory bank and partition function
'''
self.nce_average = AVIDSimilarityMemoryBank(
memory_size=num_data,
embedding_dim=embedding_dim,
num_negatives=num_negatives,
momentum=momentum,
xModal=xModal_coeff>0.,
wModal=wModal_coeff>0.,
device=device
)
self.nce_average = self.nce_average.cuda(device)
sum_coeff = (xModal_coeff + wModal_coeff)
self.xModal_coeff = xModal_coeff / sum_coeff
self.wModal_coeff = wModal_coeff / sum_coeff
self.criterion = NCECriterion(num_data)
# Restore memory bank and partition function if necessary
if checkpoint is not None:
ckp = torch.load(checkpoint, map_location='cpu')['train_criterion']
state_dict = self.state_dict()
# Restore memory banks
state_dict['nce_average.view1_mem'] = ckp['nce_average.view1_mem']
state_dict['nce_average.view2_mem'] = ckp['nce_average.view2_mem']
# Restore partition function
Z = torch.stack([ckp[k] for k in ckp if 'avg_exp_score' in k]).mean()
for k in state_dict:
if 'avg_exp_score' in k:
state_dict[k] = Z
self.load_state_dict(state_dict)
def forward(self, emb1, emb2, target):
'''
Args
- emb1: Video embeddings `(N, D)`
- emb2: Audio embeddings `(N, D)`
- taget: Intance labels `(N)`
'''
tb_log = {}
# Compare output embeddings to memory bank embeddings and get scores
# scores given as: {task: [scores_positives, scores_negatives]}
scores = self.nce_average(emb1, emb2, target)
# Compute loss
xModal_loss, wModal_loss = 0., 0
for k in scores:
loss = self.criterion(*scores[k])
if k in {'v2a', 'a2v'}:
xModal_loss += loss / 2.
elif k in {'v2v', 'a2a'}:
wModal_loss += loss / 2.
# Tensorboard metrics
tb_log[f'Loss/{k}'] = loss
# Tensorboard metrics
tb_log['Loss/xModal'] = xModal_loss
tb_log['Loss/wModal'] = wModal_loss
# Final loss
total_loss = xModal_loss * self.xModal_coeff + wModal_loss * self.wModal_coeff
return total_loss, tb_log
def set_epoch(self, epoch):
pass
|
AVID-CMA-main
|
criterions/avid.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torch import nn
import torch.distributed as dist
from utils.distributed_utils import _gather_from_all
class NCECriterion(nn.Module):
def __init__(self, nLem):
super(NCECriterion, self).__init__()
self.nLem = nLem
self.register_buffer('avg_exp_score', torch.tensor(-1.))
self.distributed = dist.is_available() and dist.is_initialized()
def compute_partition_function(self, out):
if self.avg_exp_score > 0:
# Use precomputed value
return self.avg_exp_score
with torch.no_grad():
batch_mean = out.mean().unsqueeze(0)
if self.distributed:
batch_mean_gathered = _gather_from_all(batch_mean)
all_batch_mean = batch_mean_gathered.mean().squeeze()
else:
all_batch_mean = batch_mean
Z = all_batch_mean
self.avg_exp_score = Z
return self.avg_exp_score
def forward(self, scores_pos, scores_neg):
K = scores_neg.size(1)
# Compute unnormalized distributions
exp_scores_pos = torch.exp(scores_pos)
exp_scores_neg = torch.exp(scores_neg)
# Compute partition function and normalize
with torch.no_grad():
avg_exp_score = self.compute_partition_function(exp_scores_neg)
# eq 5.1 : P(origin=model) = Pmt / (Pmt + k*Pnt)
Pmt = torch.div(exp_scores_pos, exp_scores_pos + K * avg_exp_score)
lnPmtSum = -torch.log(Pmt).mean(-1)
# eq 5.2 : P(origin=noise) = k*Pn / (Pms + k*Pn)
Pon = torch.div(K * avg_exp_score, exp_scores_neg + K * avg_exp_score)
lnPonSum = -torch.log(Pon).sum(-1)
loss = (lnPmtSum + lnPonSum).mean()
return loss
|
AVID-CMA-main
|
criterions/nce.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import multiprocessing as mp
mp.set_start_method('spawn', force=True)
import torch
from torch import nn
from torch.nn import functional as F
import torch.distributed as dist
import numpy as np
import pprint
from utils.alias_method import AliasMethod
from criterions.nce import NCECriterion
from criterions.avid import AVIDSimilarityMemoryBank
__all__ = ['AVID_CMA']
class CMASampler:
def __init__(self, video_mem, audio_mem, sampling_args):
'''
Class responsible for finding the audio-visual correspondences from the audio and video memory banks.
Correspondences are computed by calling the sample() method.
To speed things up, this code will be distributed over different GPUs and synced at the end.
:param video_mem: Video memory bank
:param audio_mem: Audio memory bank
:param sampling_args: Dictionary with two fields.
`type`: Type of correspondence. Options are `consensus`, `union`, `video` and `audio`.
Refer to the paper for their meaning.
`pos_k`: number of positive correspondences to sample for each instance.
'''
self.video_mem = video_mem.cpu()
self.audio_mem = audio_mem.cpu()
self.sampling_args = sampling_args
def sample_instance(self, gpu, q_job, q_data):
video_mem = self.video_mem.cuda(gpu)
audio_mem = self.audio_mem.cuda(gpu)
while True:
query_idx = q_job.get()
if query_idx is None:
break
# Compute video and audio cosine similarities
video_sim = torch.mm(video_mem, video_mem[query_idx].t())
audio_sim = torch.mm(audio_mem, audio_mem[query_idx].t())
# Compute agreement score
if self.sampling_args['type'] == 'consensus':
similarity = torch.stack([video_sim, audio_sim], 0).min(dim=0)[0]
elif self.sampling_args['type'] == 'union':
similarity = torch.stack([video_sim, audio_sim], 0).max(dim=0)[0]
elif self.sampling_args['type'] == 'video':
similarity = video_sim
elif self.sampling_args['type'] == 'audio':
similarity = audio_sim
else:
raise ValueError
# Return top-k correspondences
pos_sim, pos_idx = torch.topk(similarity, self.sampling_args['pos_k']+1, dim=0, sorted=True)
pos_index = pos_idx[1:].t().cpu().numpy() # Avoid self
pos_index = np.sort(pos_index, axis=1) # Sort indexes so that negative sampling can be done efficiently
q_data.put((query_idx, pos_index))
q_data.put((None, None))
def sample_dispatcher(self, q_job, workers=80):
num_items = self.video_mem.shape[0]
batch_size = 16
for i in range(0, num_items, batch_size):
query_idx = list(range(i, min(i+batch_size, num_items)))
q_job.put(query_idx)
for _ in range(workers):
q_job.put(None)
def sample_gather(self, q_data, workers=80):
num_items = self.video_mem.shape[0]
positive_index = np.zeros((num_items, self.sampling_args['pos_k'])).astype(int)
workers_done, done = 0, 0
while workers_done < workers:
query_idx, pos_idx = q_data.get()
if query_idx is None:
workers_done += 1
else:
done += pos_idx.shape[0]
positive_index[query_idx] = pos_idx
if done % (64*1000) == 0:
print(f"Done {done}/{num_items}")
return positive_index
def sample(self):
# Compute CMA correspondences. Runs on only one process. Distributes work over all gpus.
num_workers = torch.cuda.device_count()
q_job = mp.Queue(maxsize=1000)
q_data = mp.Queue(maxsize=1000)
# Start job launcher
disp = mp.Process(target=self.sample_dispatcher, args=(q_job, num_workers), daemon=True)
disp.start()
# Start workers
workers = []
for gpu in range(num_workers):
w = mp.Process(target=self.sample_instance, args=(gpu, q_job, q_data), daemon=True)
w.start()
workers += [w]
# Gather results from workers
pos_index = self.sample_gather(q_data, num_workers)
# Wait for all jobs to finish
[w.join() for w in workers]
disp.join()
return pos_index
class AVIDSimilarityPositiveExpansion(AVIDSimilarityMemoryBank):
def __init__(self,
memory_size,
embedding_dim,
xModalInst=True,
wModalInst=False,
xModalPos=False,
wModalPos=True,
num_negatives=1024,
num_negatives_within=None,
sampling_args=None,
momentum=0.5,
device=0,
):
super().__init__(memory_size=memory_size, embedding_dim=embedding_dim, xModal=xModalInst, wModal=wModalInst, num_negatives=num_negatives, momentum=momentum, device=device)
self.num_negatives_within = num_negatives_within
self.multinomial = AliasMethod(torch.ones(memory_size - sampling_args['pos_k']))
self.sampling_args = sampling_args
self.xModalInst = xModalInst
self.wModalInst = wModalInst
self.xModalPos = xModalPos
self.wModalPos = wModalPos
def forward(self, video_emb, audio_emb, y):
bs, dim = video_emb.shape
video_emb = F.normalize(video_emb, p=2, dim=1).view(bs, dim, 1)
audio_emb = F.normalize(audio_emb, p=2, dim=1).view(bs, dim, 1)
# Sample memories
with torch.no_grad():
video_self_mem = self.view1_mem[y].view(bs, 1, dim)
audio_self_mem = self.view2_mem[y].view(bs, 1, dim)
pos_idx, neg_idx = self.memory_sampling(y)
video_pos_mem = self.view1_mem[pos_idx]
audio_pos_mem = self.view2_mem[pos_idx]
video_neg_mem = self.view1_mem[neg_idx]
audio_neg_mem = self.view2_mem[neg_idx]
# Compute scores
def compute_scores(context_emb, target_embs, T):
return [torch.bmm(trg, context_emb).squeeze(-1) / T for trg in target_embs]
# Instance Discrimination
scores = {}
if self.xModalInst: # Cross-modal discrimination
scores['inst-v2a'] = compute_scores(video_emb, [audio_self_mem, audio_neg_mem], self.temperature)
scores['inst-a2v'] = compute_scores(audio_emb, [video_self_mem, video_neg_mem], self.temperature)
if self.wModalInst: # Within-modal discrimination
scores['inst-v2a'] = compute_scores(video_emb, [audio_self_mem, audio_neg_mem], self.temperature)
scores['inst-a2v'] = compute_scores(audio_emb, [video_self_mem, video_neg_mem], self.temperature)
# Positive Set Discrimination
if self.xModalPos: # Cross-modal discrimination
scores['pos-v2a'] = compute_scores(video_emb, [audio_pos_mem, audio_neg_mem], self.temperature)
scores['pos-a2v'] = compute_scores(audio_emb, [video_pos_mem, video_neg_mem], self.temperature)
if self.wModalPos: # Within-modal discrimination
# Potentially reduce number of negatives for within-modal discrimination
wm_video_neg_mem, wm_audio_neg_mem = video_neg_mem, audio_neg_mem
if self.num_negatives_within is not None:
wm_video_neg_mem = video_neg_mem[:, :self.num_negatives_within]
wm_audio_neg_mem = audio_neg_mem[:, :self.num_negatives_within]
scores['pos-v2v'] = compute_scores(video_emb, [video_pos_mem, wm_video_neg_mem], self.temperature)
scores['pos-a2a'] = compute_scores(audio_emb, [audio_pos_mem, wm_audio_neg_mem], self.temperature)
# Update memory
self.update_memory(video_emb.squeeze(-1), audio_emb.squeeze(-1), y)
return scores
def memory_sampling(self, y):
# Draw positives
positive_indexes = self.positive_set[y].long()
# Draw negatives
bs = y.shape[0]
rand_idx = self.multinomial.draw(bs * self.num_negatives).view(bs, -1).to(y.device)
# Avoid positives while sampling negatives (Positive list is sorted.)
pos_idx = self.positive_set[y].long()
ref = pos_idx - torch.range(0, pos_idx.shape[1]-1, dtype=pos_idx.dtype).to(pos_idx.device).unsqueeze(0)
negative_indexes = rand_idx + (rand_idx.unsqueeze(2) >= ref.unsqueeze(1)).sum(2)
return positive_indexes, negative_indexes
def find_correspondences(self):
if self.sampling_args['pos_k'] <= 0:
return
# Find CMA correspondences. Only do this on one process if running in distributed mode and sync at the end.
positive_set = np.zeros((self.view1_mem.shape[0], self.sampling_args['pos_k'])).astype(int)
if not self.distributed or self.distributed and self.rank == 0:
torch.cuda.empty_cache()
positive_set = CMASampler(self.view1_mem, self.view2_mem, self.sampling_args).sample()
# Find CMA correspondences. Only do this on one process if running in distributed mode and sync at the end.
if positive_set is not None:
self.register_buffer('positive_set', torch.from_numpy(positive_set).int())
self.positive_set = self.positive_set.cuda(self.device)
if self.distributed:
dist.broadcast(self.positive_set, 0)
if self.distributed:
dist.barrier()
def __repr__(self):
num_negatives = int(self.num_negatives)
view1_mom = float(self.momentum[0])
view2_mom = float(self.momentum[1])
repr_dict = {
'name': self._get_name(),
'num_negatives': num_negatives,
'momentum': [view1_mom, view2_mom],
'view1_buffer_size': self.view1_mem.shape,
'view2_buffer_size': self.view2_mem.shape,
}
return pprint.pformat(repr_dict, indent=2)
class AVID_CMA(nn.Module):
def __init__(self, num_data, embedding_dim,
num_negatives=1024,
num_negatives_within=None,
momentum=0.5,
xModalInstCoeff=1.,
wModalInstCoeff=0.,
xModalPosCoeff=0.,
wModalPosCoeff=1.,
sampling_args=None,
checkpoint=None,
resample_freq=-1,
device=0):
super(AVID_CMA, self).__init__()
'''
AVID_CMA criterion.
This module receives the output embeddings of the video
and audio models, computes their non-linear projections,
manages the memory bank, draws positive correspondences,
and computes the final loss (weighted average between
instance discrimination and positive discrimination losses).
Args:
- num_data: number of instances in the training set.
- embedding_dim: output dimension of the non-linear projection.
- num_negatives: number of negatives to draw from memory bank to compute the NCE loss.
- num_negatives_within: optionally reduce the number of negatives for the within-modal loss.
- momentum: memory bank EMA momentum parameter.
- xModalInstCoeff: coefficient for the cross modal instance discrimination loss. (AVID-CMA: 1.0)
- wModalInstCoeff: coefficient for the within modal instance discrimination loss. (AVID-AVID: 0.0)
- xModalPosCoeff: coefficient for the cross modal positive discrimination loss. (AVID-CMA: 0.0)
- wModalPosCoeff: coefficient for the within modal positive discrimination loss. (AVID-AVID: 1.0)
- checkpoint: optionally specify a checkpoint path to restore the memory bank and partition function
'''
# first setup the NCEAverage method to get the scores of the output wrt. memory bank negatives
self.nce_average = AVIDSimilarityPositiveExpansion(
memory_size=num_data,
embedding_dim=embedding_dim,
num_negatives=num_negatives,
num_negatives_within=num_negatives_within,
momentum=momentum,
xModalInst=xModalInstCoeff>0.,
xModalPos=xModalPosCoeff>0.,
wModalInst=wModalInstCoeff>0.,
wModalPos=wModalPosCoeff>0.,
sampling_args=sampling_args,
device=device
)
self.nce_average = self.nce_average.cuda(device)
# Loss coefficients
sum_coeff = xModalInstCoeff + wModalInstCoeff + xModalPosCoeff + wModalPosCoeff
self.xModalInstCoeff = xModalInstCoeff / sum_coeff
self.wModalInstCoeff = wModalInstCoeff / sum_coeff
self.xModalPosCoeff = xModalPosCoeff / sum_coeff
self.wModalPosCoeff = wModalPosCoeff / sum_coeff
# Setup loss function
self.criterion = NCECriterion(num_data)
# Restore memory bank and partition function from AVID checkpoint
# Needs to be done before finding correspondences
if checkpoint is not None:
ckp = torch.load(checkpoint, map_location='cpu')['train_criterion']
state_dict = self.state_dict()
# Restore memory banks
state_dict['nce_average.view1_mem'] = ckp['nce_average.view1_mem']
state_dict['nce_average.view2_mem'] = ckp['nce_average.view2_mem']
# Restore partition function
Z = torch.stack([ckp[k] for k in ckp if 'avg_exp_score' in k]).mean()
for k in state_dict:
if 'avg_exp_score' in k:
state_dict[k] = Z
self.load_state_dict(state_dict)
# Find CMA correspondences
self.resample_freq = resample_freq
self.nce_average.find_correspondences()
def forward(self, emb1, emb2, target):
'''
Args
- emb1: Video embeddings `(N, D)`
- emb2: Audio embeddings `(N, D)`
- taget: Intance labels `(N)`
'''
tb_log = {}
# Compare output embeddings to memory bank embeddings and get scores
scores = self.nce_average(emb1, emb2, target)
# Compute cross/within modal discrimination losses
xModalInst_loss, wModalInst_loss, xModalPos_loss, wModalPos_loss = 0., 0., 0., 0.
for k in scores:
loss = self.criterion(*scores[k])
if k in {'inst-v2a', 'inst-a2v'}:
xModalInst_loss += loss / 2.
elif k in {'inst-v2v', 'inst-a2a'}:
wModalInst_loss += loss / 2.
elif k in {'pos-v2a', 'pos-a2v'}:
xModalPos_loss += loss / 2.
elif k in {'pos-v2v', 'pos-a2a'}:
wModalPos_loss += loss / 2.
# Metrics for tensorboard
with torch.no_grad():
tb_log[f'Loss/{k}'] = loss
# Compute final loss
total_loss = xModalInst_loss * self.xModalInstCoeff
total_loss += wModalInst_loss * self.wModalInstCoeff
total_loss += xModalPos_loss * self.xModalPosCoeff
total_loss += wModalPos_loss * self.wModalPosCoeff
return total_loss, tb_log
def set_epoch(self, epoch):
# Recompute CMA correspondences every resample_freq epochs
if self.resample_freq > 0 and epoch > 0 and epoch % self.resample_freq == 0:
self.nce_average.find_correspondences()
|
AVID-CMA-main
|
criterions/avid_cma.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Evaluation script for object localization
import json
import argparse
import torch
import itertools
import numpy as np
from collections import defaultdict
from utils import bbox_overlaps_batch, get_frm_mask
from stanfordcorenlp import StanfordCoreNLP
from tqdm import tqdm
class ANetGrdEval(object):
def __init__(self, reference_file=None, submission_file=None,
split_file=None, val_split=None, iou_thresh=0.5, verbose=False):
if not reference_file:
raise IOError('Please input a valid reference file!')
if not submission_file:
raise IOError('Please input a valid submission file!')
self.iou_thresh = iou_thresh
self.verbose = verbose
self.val_split = val_split
self.import_ref(reference_file, split_file)
self.import_sub(submission_file)
def import_ref(self, reference_file=None, split_file=None):
with open(split_file) as f:
split_dict = json.load(f)
split = {}
for s in self.val_split:
split.update({i:i for i in split_dict[s]})
with open(reference_file) as f:
ref = json.load(f)['annotations']
ref = {k:v for k,v in ref.items() if k in split}
self.ref = ref
def import_sub(self, submission_file=None):
with open(submission_file) as f:
pred = json.load(f)['results']
self.pred = pred
def gt_grd_eval(self):
ref = self.ref
pred = self.pred
print('Number of videos in the reference: {}, number of videos in the submission: {}'.format(len(ref), len(pred)))
results = defaultdict(list)
for vid, anns in ref.items():
for seg, ann in anns['segments'].items():
if len(ann['frame_ind']) == 0:
continue # annotation not available
ref_bbox_all = torch.cat((torch.Tensor(ann['process_bnd_box']), \
torch.Tensor(ann['frame_ind']).unsqueeze(-1)), dim=1) # 5-D coordinates
sent_idx = set(itertools.chain.from_iterable(ann['process_idx'])) # index of word in sentence to evaluate
for idx in sent_idx:
sel_idx = [ind for ind, i in enumerate(ann['process_idx']) if idx in i]
ref_bbox = ref_bbox_all[sel_idx] # select matched boxes
# Note that despite discouraged, a single word could be annotated across multiple boxes/frames
assert(ref_bbox.size(0) > 0)
class_name = ann['process_clss'][sel_idx[0]][ann['process_idx'][sel_idx[0]].index(idx)]
if vid not in pred:
results[class_name].append(0) # video not grounded
elif seg not in pred[vid]:
results[class_name].append(0) # segment not grounded
elif idx not in pred[vid][seg]['idx_in_sent']:
results[class_name].append(0) # object not grounded
else:
pred_ind = pred[vid][seg]['idx_in_sent'].index(idx)
pred_bbox = torch.cat((torch.Tensor(pred[vid][seg]['bbox_for_all_frames'][pred_ind])[:,:4], \
torch.Tensor(range(10)).unsqueeze(-1)), dim=1)
frm_mask = torch.from_numpy(get_frm_mask(pred_bbox[:, 4].numpy(), \
ref_bbox[:, 4].numpy()).astype('uint8'))
overlap = bbox_overlaps_batch(pred_bbox[:, :5].unsqueeze(0), \
ref_bbox[:, :5].unsqueeze(0), frm_mask.unsqueeze(0))
results[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
print('Number of groundable objects in this split: {}'.format(len(results)))
grd_accu = np.mean([sum(hm)*1./len(hm) for i,hm in results.items()])
print('-' * 80)
print('The overall localization accuracy is {:.4f}'.format(grd_accu))
print('-' * 80)
if self.verbose:
print('Object frequency and grounding accuracy per class (descending by object frequency):')
accu_per_clss = {(i, sum(hm)*1./len(hm)):len(hm) for i,hm in results.items()}
accu_per_clss = sorted(accu_per_clss.items(), key=lambda x:x[1], reverse=True)
for accu in accu_per_clss:
print('{} ({}): {:.4f}'.format(accu[0][0], accu[1], accu[0][1]))
return grd_accu
def precision_recall_util(self, mode='all'):
ref = self.ref
pred = self.pred
print('Number of videos in the reference: {}, number of videos in the submission: {}'.format(len(ref), len(pred)))
nlp = StanfordCoreNLP('tools/stanford-corenlp-full-2018-02-27')
props={'annotators': 'lemma','pipelineLanguage':'en', 'outputFormat':'json'}
vocab_in_split = set()
prec = defaultdict(list)
prec_per_sent = defaultdict(list)
for vid, anns in tqdm(ref.items()):
for seg, ann in anns['segments'].items():
if len(ann['frame_ind']) == 0 or vid not in pred or seg not in pred[vid]:
continue # do not penalize if sentence not annotated
prec_per_sent_tmp = [] # for each sentence
ref_bbox_all = torch.cat((torch.Tensor(ann['process_bnd_box']),
torch.Tensor(ann['frame_ind']).unsqueeze(-1)), dim=1) # 5-D coordinates
idx_in_sent = {}
for box_idx, cls_lst in enumerate(ann['process_clss']):
vocab_in_split.update(set(cls_lst))
for cls_idx, cls in enumerate(cls_lst):
idx_in_sent[cls] = idx_in_sent.get(cls, []) + [ann['process_idx'][box_idx][cls_idx]]
sent_idx = set(itertools.chain.from_iterable(ann['process_idx'])) # index of gt object words
exclude_obj = {json.loads(nlp.annotate(token, properties=props)
)['sentences'][0]['tokens'][0]['lemma']: 1 for token_idx, token in
enumerate(ann['tokens']
) if (token_idx not in sent_idx and token != '')}
for pred_idx, class_name in enumerate(pred[vid][seg]['clss']):
if class_name in idx_in_sent:
gt_idx = min(idx_in_sent[class_name]) # always consider the first match...
sel_idx = [idx for idx, i in enumerate(ann['process_idx']) if gt_idx in i]
ref_bbox = ref_bbox_all[sel_idx] # select matched boxes
assert (ref_bbox.size(0) > 0)
pred_bbox = torch.cat((torch.Tensor(pred[vid][seg]['bbox_for_all_frames'][pred_idx])[:, :4],
torch.Tensor(range(10)).unsqueeze(-1)), dim=1)
frm_mask = torch.from_numpy(get_frm_mask(pred_bbox[:, 4].numpy(),
ref_bbox[:, 4].numpy()).astype('uint8'))
overlap = bbox_overlaps_batch(pred_bbox[:, :5].unsqueeze(0),
ref_bbox[:, :5].unsqueeze(0), frm_mask.unsqueeze(0))
prec[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
prec_per_sent_tmp.append(1 if torch.max(overlap) > self.iou_thresh else 0)
elif json.loads(nlp.annotate(class_name, properties=props))['sentences'][0]['tokens'][0]['lemma'] in exclude_obj:
pass # do not penalize if gt object word not annotated (missed)
else:
if mode == 'all':
prec[class_name].append(0) # hallucinated object
prec_per_sent_tmp.append(0)
prec_per_sent[vid + seg] = prec_per_sent_tmp
nlp.close()
# recall
recall = defaultdict(list)
recall_per_sent = defaultdict(list)
for vid, anns in ref.items():
for seg, ann in anns['segments'].items():
if len(ann['frame_ind']) == 0:
# print('no annotation available')
continue
recall_per_sent_tmp = [] # for each sentence
ref_bbox_all = torch.cat((torch.Tensor(ann['process_bnd_box']), \
torch.Tensor(ann['frame_ind']).unsqueeze(-1)), dim=1) # 5-D coordinates
sent_idx = set(itertools.chain.from_iterable(ann['process_idx'])) # index of gt object words
for gt_idx in sent_idx:
sel_idx = [idx for idx, i in enumerate(ann['process_idx']) if gt_idx in i]
ref_bbox = ref_bbox_all[sel_idx] # select matched boxes
# Note that despite discouraged, a single word could be annotated across multiple boxes/frames
assert(ref_bbox.size(0) > 0)
class_name = ann['process_clss'][sel_idx[0]][ann['process_idx'][sel_idx[0]].index(gt_idx)]
if vid not in pred:
recall[class_name].append(0) # video not grounded
recall_per_sent_tmp.append(0)
elif seg not in pred[vid]:
recall[class_name].append(0) # segment not grounded
recall_per_sent_tmp.append(0)
elif class_name in pred[vid][seg]['clss']:
pred_idx = pred[vid][seg]['clss'].index(class_name) # always consider the first match...
pred_bbox = torch.cat((torch.Tensor(pred[vid][seg]['bbox_for_all_frames'][pred_idx])[:,:4], \
torch.Tensor(range(10)).unsqueeze(-1)), dim=1)
frm_mask = torch.from_numpy(get_frm_mask(pred_bbox[:, 4].numpy(), \
ref_bbox[:, 4].numpy()).astype('uint8'))
overlap = bbox_overlaps_batch(pred_bbox[:, :5].unsqueeze(0), \
ref_bbox[:, :5].unsqueeze(0), frm_mask.unsqueeze(0))
recall[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
recall_per_sent_tmp.append(1 if torch.max(overlap) > self.iou_thresh else 0)
else:
if mode == 'all':
recall[class_name].append(0) # object not grounded
recall_per_sent_tmp.append(0)
recall_per_sent[vid + seg] = recall_per_sent_tmp
return prec, recall, prec_per_sent, recall_per_sent, vocab_in_split
def grd_eval(self, mode='all'):
if mode == 'all':
print('Evaluating on all object words.')
elif mode == 'loc':
print('Evaluating only on correctly-predicted object words.')
else:
raise Exception('Invalid loc mode!')
prec, recall, prec_per_sent, rec_per_sent, vocab_in_split = self.precision_recall_util(mode=mode)
# compute the per-class precision, recall, and F1 scores
num_vocab = len(vocab_in_split)
print('Number of groundable objects in this split: {}'.format(num_vocab))
print('Number of objects in prec and recall: {}, {}'.format(len(prec), len(recall)))
prec_cls = np.sum([sum(hm)*1./len(hm) for i,hm in prec.items()])*1./num_vocab
recall_cls = np.sum([sum(hm)*1./len(hm) for i,hm in recall.items()])*1./num_vocab
f1_cls = 2. * prec_cls * recall_cls / (prec_cls + recall_cls)
print('-' * 80)
print('The overall precision_{0} / recall_{0} / F1_{0} are {1:.4f} / {2:.4f} / {3:.4f}'.format(mode, prec_cls, recall_cls, f1_cls))
print('-' * 80)
if self.verbose:
print('Object frequency and grounding accuracy per class (descending by object frequency):')
accu_per_clss = {}
for i in vocab_in_split:
prec_clss = sum(prec[i])*1./len(prec[i]) if i in prec else 0
recall_clss = sum(recall[i])*1./len(recall[i]) if i in recall else 0
accu_per_clss[(i, prec_clss, recall_clss)] = (len(prec[i]), len(recall[i]))
accu_per_clss = sorted(accu_per_clss.items(), key=lambda x:x[1][1], reverse=True)
for accu in accu_per_clss:
print('{} ({} / {}): {:.4f} / {:.4f}'.format(accu[0][0], accu[1][0], accu[1][1], accu[0][1], accu[0][2]))
# compute the per-sent precision, recall, and F1 scores
num_segment_without_labels = 0
prec, rec, f1 = [], [], []
for seg_id, prec_list in prec_per_sent.items():
if rec_per_sent[seg_id] == []:
# skip the segment if no target objects
num_segment_without_labels += 1
else:
current_prec = 0 if prec_list == [] else np.mean(prec_list) # avoid empty prec_list
current_rec = np.mean(rec_per_sent[seg_id])
# if precision and recall are both 0, set the f1 to be 0
if current_prec == 0.0 and current_rec == 0.0:
current_f1_score = 0.0
else:
current_f1_score = 2. * current_prec * current_rec / (current_prec + current_rec) # per-sent F1
prec.append(current_prec)
rec.append(current_rec)
f1.append(current_f1_score)
num_predictions = 0
for _, pred_seg in self.pred.items():
num_predictions += len(pred_seg)
# divide the scores with the total number of predictions
avg_prec = np.sum(prec) / (num_predictions - num_segment_without_labels)
avg_rec = np.sum(rec) / (num_predictions - num_segment_without_labels)
avg_f1 = np.sum(f1) / (num_predictions - num_segment_without_labels)
print('-' * 80)
print('The overall precision_{0}_per_sent / recall_{0}_per_sent / F1_{0}_per_sent are {1:.4f} / {2:.4f} / {3:.4f}'.format(mode, avg_prec, avg_rec, avg_f1))
print('-' * 80)
return prec_cls, recall_cls, f1_cls, avg_prec, avg_rec, avg_f1
def main(args):
grd_evaluator = ANetGrdEval(reference_file=args.reference, submission_file=args.submission,
split_file=args.split_file, val_split=args.split,
iou_thresh=args.iou_thresh, verbose=args.verbose)
if args.eval_mode == 'GT':
print('Assuming the input boxes are based upon GT sentences.')
grd_evaluator.gt_grd_eval()
elif args.eval_mode == 'gen':
print('Assuming the input boxes are based upon generated sentences.')
grd_evaluator.grd_eval(mode=args.loc_mode)
else:
raise Exception('Invalid eval mode!')
if __name__=='__main__':
parser = argparse.ArgumentParser(description='ActivityNet-Entities object grounding evaluation script.')
parser.add_argument('-s', '--submission', type=str, default='', help='submission grounding result file')
parser.add_argument('-r', '--reference', type=str, default='data/anet_entities_cleaned_class_thresh50_trainval.json', help='reference file')
parser.add_argument('--split_file', type=str, default='data/split_ids_anet_entities.json', help='path to the split file')
parser.add_argument('--split', type=str, nargs='+', default=['validation'], help='which split(s) to evaluate')
parser.add_argument('--eval_mode', type=str, default='GT',
help='GT | gen, indicating whether the input is on GT sentences or generated sentences')
parser.add_argument('--loc_mode', type=str, default='all',
help='all | loc, when the input is on generate sentences, whether consider language error or not')
parser.add_argument('--iou_thresh', type=float, default=0.5, help='the iou threshold for grounding correctness')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
main(args)
|
ActivityNet-Entities-main
|
scripts/eval_grd_anet_entities.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Script to print stats on the NP annotation file
import numpy as np
import json
import csv
import sys
src_file = sys.argv[1] # 'anet_entities.json'
dataset_file = sys.argv[2] # 'anet_captions_all_splits.json'
split_file = sys.argv[3] # 'split_ids_anet_entities.json'
if __name__ == '__main__':
with open(src_file) as f:
data = json.load(f)['database']
with open(dataset_file) as f:
raw_data = json.load(f)
split_dict = {}
with open(split_file) as f:
split = json.load(f)
for s,ids in split.items():
split_dict.update({i:s for i in ids})
num_seg = np.sum([len(dat['segments']) for vid, dat in data.items()])
total_box = {}
total_dur = []
seg_splits = {}
for vid, dat in data.items():
for seg, ann in dat['segments'].items():
total_box[split_dict[vid]] = total_box.get(split_dict[vid], 0)+len(ann['objects'])
total_dur.append(float(raw_data[vid]['timestamps'][int(seg)][1]-raw_data[vid]['timestamps'][int(seg)][0]))
seg_splits[split_dict[vid]] = seg_splits.get(split_dict[vid], 0)+1
print('number of annotated video: {}'.format(len(data)))
print('number of annotated video segments: {}'.format(num_seg))
print('number of segments in each split: {}'.format(seg_splits))
print('total duration in hr: {}'.format(np.sum(total_dur)/3600))
print('total number of noun phrase boxes: {}'.format(total_box))
|
ActivityNet-Entities-main
|
scripts/anet_entities_np_stats.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Based on
# https://github.com/jiasenlu/NeuralBabyTalk/blob/master/misc/bbox_transform.py
# Licensed under The MIT License
# Copyright (c) 2017 Jiasen Lu
# --------------------------------------------------------
# Reorganized and modified by Jianwei Yang and Jiasen Lu
# --------------------------------------------------------
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import torch
import numpy as np
def bbox_overlaps_batch(anchors, gt_boxes, frm_mask=None):
"""
anchors: (N, 4) ndarray of float
gt_boxes: (b, K, 5) ndarray of float
frm_mask: (b, N, K) ndarray of bool
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
batch_size = gt_boxes.size(0)
if anchors.dim() == 2:
assert frm_mask == None, 'mask not implemented yet' # hasn't updated the mask yet
N = anchors.size(0)
K = gt_boxes.size(1)
anchors = anchors.view(1, N, 4).expand(batch_size, N, 4).contiguous()
gt_boxes = gt_boxes[:,:,:4].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4)
query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N, K, 4)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) -
torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) -
torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
overlaps = iw * ih / ua
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
elif anchors.dim() == 3:
N = anchors.size(1)
K = gt_boxes.size(1)
if anchors.size(2) == 5:
anchors = anchors[:,:,:5].contiguous()
else:
anchors = anchors[:,:,1:6].contiguous()
gt_boxes = gt_boxes[:,:,:5].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 5).expand(batch_size, N, K, 5)
query_boxes = gt_boxes.view(batch_size, 1, K, 5).expand(batch_size, N, K, 5)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) -
torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) -
torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
if frm_mask is not None:
# proposal and gt should be on the same frame to overlap
# frm_mask = ~frm_mask # bitwise not (~) does not work with uint8 in pytorch 1.3
frm_mask = 1 - frm_mask
# print('Percentage of proposals that are in the annotated frame: {}'.format(torch.mean(frm_mask.float())))
overlaps = iw * ih / ua
overlaps *= frm_mask.type(overlaps.type())
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
else:
raise ValueError('anchors input dimension is not correct.')
return overlaps
def get_frm_mask(proposals, gt_bboxs):
# proposals: num_pps
# gt_bboxs: num_box
num_pps = proposals.shape[0]
num_box = gt_bboxs.shape[0]
return (np.tile(proposals.reshape(-1,1), (1,num_box)) != np.tile(gt_bboxs, (num_pps,1)))
|
ActivityNet-Entities-main
|
scripts/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Script to preprocess the raw annotation output to NP/object annotation files
import os
import sys
import json
import argparse
import numpy as np
from collections import Counter, defaultdict
from stanfordcorenlp import StanfordCoreNLP
def define_split(database):
with open(args.train_cap_file) as f:
train_ids = json.load(f).keys()
with open(args.val_cap_file) as f:
valtest_ids = json.load(f).keys()
val_split = np.random.rand(len(valtest_ids))>=0.5 # split a half as the test split
val_ids = [valtest_ids[i] for i,j in enumerate(val_split) if j]
test_ids = [valtest_ids[i] for i,j in enumerate(val_split) if ~j]
vid_ids = set(database.keys())
train_ann_ids = vid_ids.intersection(set(train_ids))
val_ann_ids = vid_ids.intersection(set(val_ids))
test_ann_ids = vid_ids.intersection(set(test_ids))
print('All data - total: {}, train split: {}, val split: {}, test split: {}'.format(len(train_ids+val_ids+test_ids), len(train_ids), len(val_ids), len(test_ids)))
print('Annotated data - total: {}, train split: {}, val split: {}, and test split: {}'.format(
len(vid_ids), len(train_ann_ids), len(val_ann_ids), len(test_ann_ids)))
return [train_ids, val_ids, test_ids]
def extract_attr(database, splits):
split_dict = {}
for split in splits:
split_dict.update({s:s for s in split})
print('Object classes defined on {} videos, freq threshold is {}'.format(len(split_dict), args.freq_thresh))
attr_all = [] # all the attributes
for vid_id, vid in database.items():
if split_dict.get(vid_id, -1) != -1:
for seg_id, seg in vid['segments'].items():
for obj in seg['objects']:
assert(len(obj['frame_ind']) == 1)
for box_id, box in obj['frame_ind'].items():
tmp = []
attr_lst = []
sorted_attr = sorted(box['attributes'], key=lambda x:x[0]) # the attributes are unordered
for ind, attr in enumerate(sorted_attr):
assert(attr[0] >= 0)
if len(tmp) == 0:
tmp.append(attr[1].lower()) # convert to lowercase
else:
if attr[0] == (sorted_attr[ind-1][0]+1):
tmp.append(attr[1].lower())
else:
attr_lst.append(tmp)
tmp = [attr[1].lower()]
if len(tmp) > 0: # the last one
attr_lst.append(tmp)
# exclude empty box (no attribute)
# crowd boxes are ok for now
if len(attr_lst) == 0: # or box['crowds'] == 1
pass
# print('empty attribute at video {}, segment {}, box {}'.format(vid_id, seg_id, box_id))
else:
attr_all.extend([' '.join(i) for i in attr_lst])
return attr_all
def prep_all(database, database_cap, obj_cls_lst, w2l, nlp):
w2d = {}
for ind, obj in enumerate(obj_cls_lst):
w2d[obj] = ind
avg_box = [] # number of boxes per segment
avg_attr = [] # number of attributes per box
attr_all = [] # all the attributes
crowd_all = [] # all the crowd labels
attr_dict = defaultdict(list)
with open(args.attr_to_video_file) as f:
for line in f.readlines():
line_split = line.split(',')
attr_id = line_split[0]
vid_name = line_split[-1]
attr = ','.join(line_split[1:-1])
vid_id, seg_id = vid_name.strip().split('_segment_')
attr_dict[(vid_id, str(int(seg_id)))].append([int(attr_id), attr])
print('Number of segments with attributes: {}'.format(len(attr_dict)))
vid_seg_dict = {}
for vid_id, vid in database.items():
for seg_id, _ in vid['segments'].items():
vid_seg_dict[(vid_id, seg_id)] = vid_seg_dict.get((vid_id, seg_id), 0) + 1
new_database = {}
new_database_np = {}
seg_counter = 0
for vid_id, cap in database_cap.items():
new_database_np[vid_id] = {'segments':{}}
new_seg = {}
for cap_id in range(len(cap['sentences'])):
new_obj_lst = defaultdict(list)
seg_id = str(cap_id)
new_database_np[vid_id]['segments'][seg_id] = {'objects':[]}
if vid_seg_dict.get((vid_id, seg_id), 0) == 0:
new_obj_lst['tokens'] = nlp.word_tokenize(cap['sentences'][cap_id].encode('utf-8')) # sentences not in ANet-BB
else:
vid = database[vid_id]
seg = vid['segments'][seg_id]
# preprocess attributes
attr_sent = sorted(attr_dict[(vid_id, seg_id)], key=lambda x:x[0])
start_ind = attr_sent[0][0]
# legacy token issues from our annotation tool
for ind, tup in enumerate(attr_sent):
if attr_sent[ind][1] == '\\,':
attr_sent[ind][1] = ','
new_obj_lst['tokens'] = [i[1] for i in attr_sent] # all the word tokens
for obj in seg['objects']:
assert(len(obj['frame_ind']) == 1)
np_ann = {}
box_id = obj['frame_ind'].keys()[0]
box = obj['frame_ind'].values()[0]
np_ann['frame_ind'] = int(box_id)
np_ann.update(box)
if len(box['attributes']) > 0: # just in case the attribute is empty, though it should not be
tmp = []
tmp_ind = []
tmp_obj = []
attr_lst = []
attr_ind_lst = []
tmp_np_ind = []
np_lst = []
sorted_attr = sorted(box['attributes'], key=lambda x:x[0]) # the attributes are unordered
sorted_attr = [(x[0]-start_ind, x[1]) for x in sorted_attr] # index relative to the sent
for ind, attr in enumerate(sorted_attr):
assert(attr[0] >= 0)
attr_w = attr[1].lower()
if len(tmp) == 0:
tmp.append(attr_w) # convert to lowercase
tmp_np_ind.append(attr[0])
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj.append(attr_l)
tmp_ind.append(attr[0])
else:
if attr[0] == (sorted_attr[ind-1][0]+1):
tmp.append(attr_w)
tmp_np_ind.append(attr[0])
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj.append(attr_l)
tmp_ind.append(attr[0])
else:
np_lst.append([' '.join(tmp), tmp_np_ind])
if len(tmp_obj) >= 1:
attr_lst.append(tmp_obj[-1]) # the last noun is usually the head noun
attr_ind_lst.append(tmp_ind[-1])
tmp = [attr_w]
tmp_np_ind = [attr[0]]
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj = [attr_l]
tmp_ind = [attr[0]]
else:
tmp_obj = []
tmp_ind = []
else:
tmp_obj = []
tmp_ind = []
if len(tmp) > 0: # the last one
np_lst.append([' '.join(tmp), tmp_np_ind])
if len(tmp_obj) >= 1:
attr_lst.append(tmp_obj[-1]) # the last noun is usually the head noun
attr_ind_lst.append(tmp_ind[-1])
assert(len(np_lst) > 0)
np_ann['noun_phrases'] = np_lst
np_ann.pop('attributes', None)
new_database_np[vid_id]['segments'][seg_id]['objects'].append(np_ann)
# exclude empty box (no attribute)
# crowd boxes are ok for now
if len(attr_lst) == 0: # or box['crowds'] == 1
pass
# print('empty attribute at video {}, segment {}, box {}'.format(vid_id, seg_id, box_id))
else:
new_obj_lst['process_bnd_box'].append([box['xtl'], box['ytl'], box['xbr'], box['ybr']])
new_obj_lst['frame_ind'].append(int(box_id))
new_obj_lst['crowds'].append(box['crowds'])
new_obj_lst['process_clss'].append(attr_lst)
new_obj_lst['process_idx'].append(attr_ind_lst)
avg_attr.append(len(attr_lst))
attr_all.extend([' '.join(i) for i in attr_lst])
crowd_all.append(box['crowds'])
avg_box.append(len(new_obj_lst['frame_ind'])) # cound be 0
if len(new_obj_lst['frame_ind']) == 0:
new_obj_lst['process_bnd_box'] = []
new_obj_lst['frame_ind'] = [] # all empty
new_obj_lst['crowds'] = []
new_obj_lst['process_clss'] = []
new_obj_lst['process_idx'] = []
seg_counter += 1
new_seg[seg_id] = new_obj_lst
new_database_np[vid_id]['segments'][seg_id]['tokens'] = new_obj_lst['tokens']
new_database[vid_id] = {'segments':new_seg}
# quick stats
print('Number of videos: {} (including empty ones)'.format(len(new_database)))
print('Number of segments: {}'.format(seg_counter))
print('Average number of valid segments per video: {}'.format(np.mean([len(vid['segments']) for vid_id, vid in new_database.items()])))
print('Average number of box per segment: {} and frequency: {}'.format(np.mean(avg_box), Counter(avg_box)))
print('Average number of attributes per box: {} and frequency: {} (for valid box only)'.format(np.mean(avg_attr), Counter(avg_attr)))
crowd_freq = Counter(crowd_all)
print('Percentage of crowds: {} (for valid box only)'.format(crowd_freq[1]*1./(crowd_freq[1]+crowd_freq[0])))
return new_database, new_database_np
def freq_obj_list(attr_all, nlp, props):
# generate a list of object classes
num_nn_per_attr = []
anet_obj_cls = []
nn_wo_noun = [] # noun phrases that contain no nouns
w2lemma = defaultdict(list)
for i, v in enumerate(attr_all):
if i%10000 == 0:
print(i)
out = json.loads(nlp.annotate(v.encode('utf-8'), properties=props))
assert(out['sentences'] > 0)
counter = 0
for token in out['sentences'][0]['tokens']:
if ('NN' in token['pos']) or ('PRP' in token['pos']):
lemma_w = token['lemma']
anet_obj_cls.append(lemma_w)
w2lemma[token['word']].append(lemma_w)
counter += 1
num_nn_per_attr.append(counter)
if counter == 0:
nn_wo_noun.append(v)
top_nn_wo_noun = Counter(nn_wo_noun)
print('Frequency of NPs w/o nouns:')
print(top_nn_wo_noun.most_common(10))
print('Frequency of number of nouns per attribute:')
print(Counter(num_nn_per_attr))
top_obj_cls = Counter(anet_obj_cls)
print('Top 10 objects:', top_obj_cls.most_common(20))
obj_cls_lst = []
for w,freq in top_obj_cls.items():
if freq >= args.freq_thresh:
obj_cls_lst.append(w.encode('ascii'))
w2l = {}
for w, l in w2lemma.items():
# manually correct some machine lemmatization mistakes
spec_w2l = {'outfits':'outfit', 'mariachi':'mariachi', 'barrios':'barrio', 'mans':'man', 'bags':'bag', 'aerobics':'aerobic', 'motobikes':'motobike', 'graffiti':'graffiti', 'semi':'semi', 'los':'los', 'tutus':'tutu'}
if spec_w2l.get(w, -1) != -1: # one special case...
w2l[w] = spec_w2l[w]
print('Ambiguous lemma for: {}'.format(w))
else:
assert(len(set(l)) == 1)
w2l[w] = list(set(l))[0]
print('Number of words derived from lemma visual words {}'.format(len(w2l)))
return obj_cls_lst, w2l
def main(args):
nlp = StanfordCoreNLP(args.corenlp_path)
props={'annotators': 'ssplit, tokenize, lemma','pipelineLanguage':'en', 'outputFormat':'json'}
# load anet captions
with open(args.train_cap_file) as f:
database_cap = json.load(f)
with open(args.val_cap_file) as f:
database_cap.update(json.load(f))
print('Number of videos in ActivityNet Captions (train+val): {}'.format(len(database_cap)))
# load raw annotation output anet bb
with open(args.src_file) as f:
database = json.load(f)['database']
print('Number of videos in ActivityNet-BB (train+val): {}'.format(len(database)))
if os.path.isfile(args.split_file):
with open(args.split_file) as f:
all_splits = json.load(f)
splits = [all_splits['training'], all_splits['validation'], all_splits['testing']]
else:
raise '[WARNING] Cannot find the split file! Uncomment this if you want to create a new split.'
splits = define_split(database)
all_splits = {'training':splits[0], 'validation':splits[1], 'testing':splits[2]}
with open(args.split_file, 'w') as f:
json.dump(all_splits, f)
attr_all = extract_attr(database, splits[:2]) # define object classes on train/val data
obj_cls_lst, w2l = freq_obj_list(attr_all, nlp, props)
new_database, new_database_np = prep_all(database, database_cap, obj_cls_lst, w2l, nlp)
# write raw annotation file
new_database_np = {'database':new_database_np}
with open(args.target_np_file, 'w') as f:
json.dump(new_database_np, f)
# write pre-processed annotation file
new_database = {'vocab':obj_cls_lst, 'annotations':new_database}
with open(args.target_file, 'w') as f:
json.dump(new_database, f)
nlp.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='ActivityNet-Entities dataset preprocessing script.')
parser.add_argument('--dataset_root', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/', help='dataset root directory')
parser.add_argument('--corenlp_path', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttn/tools/stanford-corenlp-full-2018-02-27', help='path to stanford core nlp toolkit')
parser.add_argument('--freq_thresh', type=int, default=50, help='frequency threshold for determining object classes')
parser.add_argument('--train_cap_file', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/raw_annotation_file/train.json')
parser.add_argument('--val_cap_file', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/raw_annotation_file/val_1.json')
args = parser.parse_args()
args.src_file = args.dataset_root+'anet_bb.json' # the raw annotation file
args.target_np_file = args.dataset_root+'anet_entities.json' # output np file
args.target_file = args.dataset_root+'anet_entities_cleaned_class_thresh'+str(args.freq_thresh)+'.json' # output object file
args.attr_to_video_file = args.dataset_root+'attr_to_video.txt' # from annotation tool
args.split_file = args.dataset_root+'split_ids_anet_entities.json' # split file
np.random.seed(123) # make reproducible
main(args)
|
ActivityNet-Entities-main
|
scripts/attr_prep_tag_NP.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Script to print stats on the object annotation file
import numpy as np
import json
import csv
# import visdom
import sys
from collections import Counter
src_file = sys.argv[1] # 'anet_entities_cleaned_class_thresh50_trainval.json'
dataset_file = sys.argv[2] # 'anet_captions_all_splits.json'
split_file = sys.argv[3] # 'split_ids_anet_entities.json'
if __name__=='__main__':
with open(src_file) as f:
data = json.load(f)['annotations']
with open(dataset_file) as f:
raw_data = json.load(f)
split_dict = {}
with open(split_file) as f:
split = json.load(f)
for s,ids in split.items():
split_dict.update({i:s for i in ids})
num_seg = np.sum([len(dat['segments']) for vid, dat in data.items()])
total_box = {}
total_dur = []
seg_splits = {}
box_per_seg = []
obj_per_box = []
count_obj = []
for vid, dat in data.items():
for seg, ann in dat['segments'].items():
total_box[split_dict[vid]] = total_box.get(split_dict[vid], 0)+len(ann['process_bnd_box'])
total_dur.append(float(raw_data[vid]['timestamps'][int(seg)][1]-raw_data[vid]['timestamps'][int(seg)][0]))
seg_splits[split_dict[vid]] = seg_splits.get(split_dict[vid], 0)+1
box_per_seg.append(len(ann['process_bnd_box']))
for c in ann['process_clss']:
obj_per_box.append(len(c))
count_obj.extend(c)
print('number of annotated video: {}'.format(len(data)))
print('number of annotated video segments: {}'.format(num_seg))
print('number of segments in each split: {}'.format(seg_splits))
print('total duration in hr: {}'.format(np.sum(total_dur)/3600))
print('total number of phrase (not object) boxes: {}'.format(total_box))
print('box per segment, mean {}, std {}, count {}'.format(np.mean(box_per_seg), np.std(box_per_seg), Counter(box_per_seg)))
print('object per box, mean {}, std {}, count {}'.format(np.mean(obj_per_box), np.std(obj_per_box), Counter(obj_per_box)))
print('Top 10 object labels: {}'.format(Counter(count_obj).most_common(10)))
"""
# visualization
vis = visdom.Visdom()
vis.histogram(X=[i for i in box_per_seg if i < 20],
opts={'numbins': 20, 'xtickmax':20, 'xtickmin':0, 'xmax':20, 'xmin':0, 'title':'Distribution of number of boxes per segment', 'xtickfont':{'size':14}, \
'ytickfont':{'size':14}, 'xlabel':'Number of boxes', 'ylabel': 'Counts'})
vis.histogram(X=[i for i in obj_per_box if i < 100],
opts={'numbins': 100, 'xtickmax':100, 'xtickmin':0, 'xmax':100, 'xmin':0, 'title':'Distribution of number of object labels per box', 'xtickfont':{'size':14}, \
'ytickfont':{'size':14}, 'xlabel':'Number of object labels', 'ylabel': 'Counts'})
"""
|
ActivityNet-Entities-main
|
scripts/anet_entities_object_stats.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
class Path(object):
"""
User-specific path configuration.
Please complete the /path/to/* paths to point into valid directories.
"""
@staticmethod
def db_root_dir(database=''):
db_root = '/path/to/databases'
db_names = {'PASCAL_MT', 'NYUD_MT', 'BSDS500', 'NYUD_raw',
'PASCAL', 'COCO', 'FSV', 'MSRA10K', 'PASCAL-S'}
if database in db_names:
return os.path.join(db_root, database)
elif not database:
return db_root
else:
raise NotImplementedError
@staticmethod
def save_root_dir():
return './'
@staticmethod
def exp_dir():
return './'
@staticmethod
def models_dir():
return '/path/to/pre-trained/models/'
@staticmethod
def seism_root_dir():
# For edge detection evaluation (optional)
return '/path/to/seism'
|
astmt-master
|
mypath.py
|
astmt-master
|
experiments/__init__.py
|
|
astmt-master
|
experiments/classification/__init__.py
|
|
astmt-master
|
experiments/classification/imagenet/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import copy
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from fblib.util.classification.utils import convert_secs2time, time_string, time_file_str, AverageMeter
from fblib.networks.classification import se_resnet, mobilenet_v2, resnet, resnext
from fblib.util.mypath import Path
def parse_args():
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data', default=Path.db_root_dir('Imagenet'),
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='x50')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N',
help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR',
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=200, type=int, metavar='N',
help='print frequency (default: 100)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--n_gpu', type=int, default=8,
help='number of GPUs')
parser.add_argument('--group_norm', type=str2bool, default=False,
help='Group Normalization')
args = parser.parse_args()
args.prefix = time_file_str()
return args
def main():
args = parse_args()
best_prec1 = 0
if not args.group_norm:
save_dir = os.path.join(Path.exp_dir(), 'imagenet', args.arch)
else:
save_dir = os.path.join(Path.exp_dir(), 'imagenet', args.arch + '-GN')
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
log = open(os.path.join(save_dir, '{}.{}.log'.format(args.arch, args.prefix)), 'w')
# create model
print_log("=> creating model '{}'".format(args.arch), log)
resol = 224
if args.arch == 'res26':
model = resnet.resnet26(pretrained=False, group_norm=args.group_norm)
elif args.arch == 'res50':
model = resnet.resnet50(pretrained=False, group_norm=args.group_norm)
elif args.arch == 'res101':
model = resnet.resnet101(pretrained=False, group_norm=args.group_norm)
elif args.arch == 'x50':
model = resnext.resnext50_32x4d(pretrained=False)
elif args.arch == 'x101':
model = resnext.resnext101_32x4d(pretrained=False)
elif args.arch == 'res26-se':
model = se_resnet.se_resnet26(num_classes=1000)
elif args.arch == 'res50-se':
model = se_resnet.se_resnet50(num_classes=1000)
elif args.arch == 'res101-se':
model = se_resnet.se_resnet101(num_classes=1000)
elif args.arch == 'mobilenet-v2':
model = mobilenet_v2.mobilenet_v2(pretrained=False, n_class=1000, last_channel=2048)
print_log("=> Model : {}".format(model), log)
print_log("=> parameter : {}".format(args), log)
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features, device_ids=list(range(args.n_gpu)))
model.cuda()
else:
model = torch.nn.DataParallel(model, device_ids=list(range(args.n_gpu))).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=True)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print_log("=> loading checkpoint '{}'".format(args.resume), log)
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print_log("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']), log)
else:
raise ValueError("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(resol),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, sampler=None)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(resol),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion)
return
filename = os.path.join(save_dir, 'checkpoint.{}.{}.pth.tar'.format(args.arch, args.prefix))
bestname = os.path.join(save_dir, 'best.{}.{}.pth.tar'.format(args.arch, args.prefix))
start_time = time.time()
epoch_time = AverageMeter()
for epoch in range(args.start_epoch, args.epochs):
lr = adjust_learning_rate(optimizer, epoch, args)
need_hour, need_mins, need_secs = convert_secs2time(epoch_time.val * (args.epochs - epoch))
need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
print_log(' [{:s}] :: {:3d}/{:3d} ----- [{:s}] {:s} LR={:}'.format(args.arch, epoch, args.epochs, time_string(),
need_time, lr), log)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, log, args)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, log, args)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
'args': copy.deepcopy(args),
}, is_best, filename, bestname)
# measure elapsed time
epoch_time.update(time.time() - start_time)
start_time = time.time()
log.close()
def train(train_loader, model, criterion, optimizer, epoch, log, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target_var = target.cuda(non_blocking=True)
input_var = input.requires_grad_()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target_var, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print_log('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5), log)
def validate(val_loader, model, criterion, log, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (input_var, target) in enumerate(val_loader):
target_var = target.cuda(non_blocking=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target_var, topk=(1, 5))
losses.update(loss.item(), input_var.size(0))
top1.update(prec1.item(), input_var.size(0))
top5.update(prec5.item(), input_var.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print_log('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5), log)
print_log(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5,
error1=100 - top1.avg), log)
return top1.avg
def save_checkpoint(state, is_best, filename, bestname):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, bestname)
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
if __name__ == '__main__':
main()
|
astmt-master
|
experiments/classification/imagenet/train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torchvision import transforms
from torch.utils.data import DataLoader
from fblib.util.helpers import worker_seed
import fblib.util.pdf_visualizer as viz
# Losses
from fblib.layers.loss import BalancedCrossEntropyLoss, SoftMaxwithLoss, NormalsLoss, DepthLoss
# Dataloaders
import fblib.dataloaders as dbs
from fblib.dataloaders.combine_im_dbs import CombineIMDBs
from fblib.layers.loss import normal_ize
# Transformations
from fblib.dataloaders import custom_transforms as tr
# Collate for MIL
from fblib.util.custom_collate import collate_mil
def accuracy(output, target, topk=(1,), ignore_label=255):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = (target != ignore_label).sum().item()
if batch_size == 0:
return -1
_, pred = output.topk(maxk, 1, True, True)
if pred.shape[-1] == 1:
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
else:
correct = pred.eq(target.unsqueeze(1))
res = []
for _ in topk:
correct_k = correct[:].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def eval_all_results(p):
if 'DO_SEMSEG' in p and p.DO_SEMSEG:
from fblib.evaluation.eval_semseg import eval_and_store_semseg
for db in p['infer_db_names']:
eval_and_store_semseg(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_HUMAN_PARTS' in p and p.DO_HUMAN_PARTS:
from fblib.evaluation.eval_human_parts import eval_and_store_human_parts
for db in p['infer_db_names']:
eval_and_store_human_parts(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_NORMALS' in p and p.DO_NORMALS:
from fblib.evaluation.eval_normals import eval_and_store_normals
for db in p['infer_db_names']:
eval_and_store_normals(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_SAL' in p and p.DO_SAL:
from fblib.evaluation.eval_sal import eval_and_store_sal
for db in p['infer_db_names']:
eval_and_store_sal(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_DEPTH' in p and p.DO_DEPTH:
from fblib.evaluation.eval_depth import eval_and_store_depth
for db in p['infer_db_names']:
eval_and_store_depth(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_ALBEDO' in p and p.DO_ALBEDO:
from fblib.evaluation.eval_albedo import eval_and_store_albedo
for db in p['infer_db_names']:
eval_and_store_albedo(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_EDGE' in p and p.DO_EDGE and p['eval_edge']:
from fblib.evaluation.eval_edges import sync_and_evaluate_one_folder
for db in p['infer_db_names']:
sync_and_evaluate_one_folder(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
prefix=p['tasks_name'],
all_tasks_present=(p.MINI if 'MINI' in p else False))
def get_transformations(p):
"""
Get the transformations for training and testing
"""
# Training transformations
# Horizontal flips with probability of 0.5
transforms_tr = [tr.RandomHorizontalFlip()]
# Rotations and scaling
transforms_tr.extend([tr.ScaleNRotate(rots=(-20, 20), scales=(.75, 1.25),
flagvals={x: p.TASKS.FLAGVALS[x] for x in p.TASKS.FLAGVALS})])
# Fixed Resize to input resolution
transforms_tr.extend([tr.FixedResize(resolutions={x: tuple(p.TRAIN.SCALE) for x in p.TASKS.FLAGVALS},
flagvals={x: p.TASKS.FLAGVALS[x] for x in p.TASKS.FLAGVALS})])
transforms_tr.extend([tr.AddIgnoreRegions(), tr.ToTensor()])
transforms_tr = transforms.Compose(transforms_tr)
# Testing (during training transforms)
transforms_ts = []
transforms_ts.extend([tr.FixedResize(resolutions={x: tuple(p.TEST.SCALE) for x in p.TASKS.FLAGVALS},
flagvals={x: p.TASKS.FLAGVALS[x] for x in p.TASKS.FLAGVALS})])
transforms_ts.extend([tr.AddIgnoreRegions(), tr.ToTensor()])
transforms_ts = transforms.Compose(transforms_ts)
# Transformations to be used during inference
transforms_infer = transforms_ts
return transforms_tr, transforms_ts, transforms_infer
def get_loss(p, task=None):
if task == 'edge':
criterion = BalancedCrossEntropyLoss(size_average=True, pos_weight=p['edge_w'])
elif task == 'semseg' or task == 'human_parts':
criterion = SoftMaxwithLoss()
elif task == 'normals':
criterion = NormalsLoss(normalize=True, size_average=True, norm=p['normloss'])
elif task == 'sal':
criterion = BalancedCrossEntropyLoss(size_average=True)
elif task == 'depth':
criterion = DepthLoss()
elif task == 'albedo':
criterion = torch.nn.L1Loss(reduction='elementwise_mean')
else:
raise NotImplementedError('Undefined Loss: Choose a task among '
'edge, semseg, human_parts, sal, depth, albedo, or normals')
return criterion
def get_train_loader(p, db_name, transforms):
print('Preparing train loader for db: {}'.format(db_name))
db_names = [db_name] if isinstance(db_name, str) else db_name
dbs_train = {}
for db in db_names:
if db == 'PASCALContext':
dbs_train[db] = dbs.PASCALContext(split=['train'], transform=transforms, retname=True,
do_edge=p.DO_EDGE, do_human_parts=p.DO_HUMAN_PARTS,
do_semseg=p.DO_SEMSEG, do_normals=p.DO_NORMALS, do_sal=p.DO_SAL,
overfit=p['overfit'])
elif db == 'VOC12':
dbs_train[db] = dbs.VOC12(split=['train'], transform=transforms, retname=True,
do_semseg=p.DO_SEMSEG, overfit=p['overfit'])
elif db == 'SBD':
dbs_train[db] = dbs.SBD(split=['train', 'val'], transform=transforms, retname=True,
do_semseg=p.DO_SEMSEG, overfit=p['overfit'])
elif db == 'NYUD_nrm':
dbs_train[db] = dbs.NYUDRaw(split='train', transform=transforms, overfit=p['overfit'])
elif db == 'NYUD':
dbs_train[db] = dbs.NYUD_MT(split='train', transform=transforms, do_edge=p.DO_EDGE, do_semseg=p.DO_SEMSEG,
do_normals=p.DO_NORMALS, do_depth=p.DO_DEPTH, overfit=p['overfit'])
elif db == 'COCO':
dbs_train[db] = dbs.COCOSegmentation(split='train2017', transform=transforms, retname=True,
area_range=[1000, float("inf")], only_pascal_categories=True,
overfit=p['overfit'])
elif db == 'FSV':
dbs_train[db] = dbs.FSVGTA(split='train', mini=False, transform=transforms, retname=True,
do_semseg=p.DO_SEMSEG, do_albedo=p.DO_ALBEDO, do_depth=p.DO_DEPTH,
overfit=p['overfit'])
else:
raise NotImplemented("train_db_name: Choose among BSDS500, PASCALContext, VOC12, COCO, FSV, and NYUD")
if len(dbs_train) == 1:
db_train = dbs_train[list(dbs_train.keys())[0]]
else:
db_exclude = dbs.VOC12(split=['val'], transform=transforms, retname=True,
do_semseg=p.DO_SEMSEG, overfit=p['overfit'])
db_train = CombineIMDBs([dbs_train[x] for x in dbs_train], excluded=[db_exclude], repeat=[1, 1])
trainloader = DataLoader(db_train, batch_size=p['trBatch'], shuffle=True, drop_last=True,
num_workers=4, worker_init_fn=worker_seed, collate_fn=collate_mil)
return trainloader
def get_test_loader(p, db_name, transforms, infer=False):
print('Preparing test loader for db: {}'.format(db_name))
if db_name == 'BSDS500':
db_test = dbs.BSDS500(split=['test'], transform=transforms, overfit=p['overfit'])
elif db_name == 'PASCALContext':
db_test = dbs.PASCALContext(split=['val'], transform=transforms,
retname=True, do_edge=p.DO_EDGE, do_human_parts=p.DO_HUMAN_PARTS,
do_semseg=p.DO_SEMSEG, do_normals=p.DO_NORMALS, do_sal=p.DO_SAL,
overfit=p['overfit'])
elif db_name == 'VOC12':
db_test = dbs.VOC12(split=['val'], transform=transforms,
retname=True, do_semseg=p.DO_SEMSEG, overfit=p['overfit'])
elif db_name == 'NYUD':
db_test = dbs.NYUD_MT(split='val', transform=transforms, do_edge=p.DO_EDGE, do_semseg=p.DO_SEMSEG,
do_normals=p.DO_NORMALS, do_depth=p.DO_DEPTH, overfit=p['overfit'])
elif db_name == 'COCO':
db_test = dbs.COCOSegmentation(split='val2017', transform=transforms, retname=True,
area_range=[1000, float("inf")], only_pascal_categories=True,
overfit=p['overfit'])
elif db_name == 'FSV':
db_test = dbs.FSVGTA(split='test', mini=True, transform=transforms, retname=True,
do_semseg=p.DO_SEMSEG, do_albedo=p.DO_ALBEDO, do_depth=p.DO_DEPTH,
overfit=p['overfit'])
else:
raise NotImplemented("test_db_name: Choose among BSDS500, PASCALContext, VOC12, COCO, FSV, and NYUD")
drop_last = False if infer else True
testloader = DataLoader(db_test, batch_size=p.TEST.BATCH_SIZE, shuffle=False, drop_last=drop_last,
num_workers=2, worker_init_fn=worker_seed)
return testloader
def get_output(output, task):
output = output.permute(0, 2, 3, 1)
if task == 'normals':
output = (normal_ize(output, dim=3) + 1.0) * 255 / 2.0
elif task in {'semseg', 'human_parts'}:
_, output = torch.max(output, dim=3)
elif task in {'edge', 'sal'}:
output = torch.squeeze(255 * 1 / (1 + torch.exp(-output)))
elif task in {'depth'}:
pass
else:
raise ValueError('Select one of the valid tasks')
return output.cpu().data.numpy()
|
astmt-master
|
experiments/dense_predict/common_configs.py
|
MAX_N_IMAGES_PER_GPU = {
'res26-8': 8,
'res26-16': 12,
'res50-8': 8,
'res50-16': 10,
'res101-8': 4,
'res101-16': 10,
'x50-8': 4,
'x50-16': 10,
'x101-8': 2,
'x101-16': 6,
}
|
astmt-master
|
experiments/dense_predict/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import cv2
import argparse
import torch
import tarfile
from six.moves import urllib
from easydict import EasyDict as edict
# Networks
import fblib.networks.deeplab_multi_task.deeplab_se_resnet_multitask as se_resnet_multitask
# Common configs
from experiments.dense_predict.common_configs import get_loss, get_train_loader, get_test_loader, get_transformations
from fblib.util.mypath import Path
def parse_args():
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='Multi-task learning with PASCAL')
# Select tasks
parser.add_argument('--active_tasks', type=int, nargs='+', default=[1, 1, 1, 1, 1],
help='Which tasks to train?')
parser.add_argument('--onlyVOC', type=str2bool, default=False,
help='Use standard training and testing for semantic segmentation')
# General parameters
parser.add_argument('--arch', type=str, default='se_res26',
help='network: se_res26, se_res50, se_res101')
parser.add_argument('--pretr', type=str, default='imagenet',
help='pre-trained model: "imagenet" or "scratch"')
parser.add_argument('--trBatch', type=int, default=8,
help='training batch size')
parser.add_argument('-lr', type=float, default=0.001,
help='initial learning rate. poly-learning rate is used.')
parser.add_argument('--lr_dec', type=float, default=1,
help='decoder learning rate multiplier')
parser.add_argument('-wd', type=float, default=1e-4,
help='Weight decay')
parser.add_argument('--epochs', type=int, default=60,
help='Total number of epochs for training')
parser.add_argument('--resume_epoch', type=int, default=0,
help='Resume Epoch #')
parser.add_argument('--cls', type=str, default='atrous-v3',
help='Classifier type')
parser.add_argument('--stride', type=int, default=16,
help='Output stride of ResNet backbone. If set to 16 saves significant memory')
parser.add_argument('--trNorm', type=str2bool, default=True,
help='train normalization layers of Backbone?')
parser.add_argument('--dec_w', type=int, default=64,
help='decoder width (default 256 in Deeplab v3+')
parser.add_argument('--overfit', type=str2bool, default=False,
help='overfit to small subset of data for debugging purposes')
# Modulation Parameters
parser.add_argument('--seenc', type=str2bool, default=True,
help='Squeeze and excitation per task for encoder? False will still use 1 SE for all tasks')
parser.add_argument('--sedec', type=str2bool, default=True,
help='Squeeze and excitation per task for decoder? False will not use SE modules')
parser.add_argument('--adapt', type=str2bool, default=True,
help='Use parallel residual adapters?')
parser.add_argument('--lr_tsk', type=float, default=-1,
help='Task Specific layer learning rate multiplier')
# Discriminator parameters
parser.add_argument('--dscr', type=str, default='fconv',
help='Use discriminator?')
parser.add_argument('--lr_dscr', type=int, default=10,
help='learning rate multiplier of discriminator?')
parser.add_argument('--dscr_w', type=float, default=0.01,
help='weight of discriminator in the range [0, 1]')
parser.add_argument('--dscrd', type=int, default=2,
help='Depth of discriminator')
parser.add_argument('--dscrk', type=int, default=1,
help='kernel size of discriminator')
# Task-specific parameters
parser.add_argument('--edge_w', type=float, default=0.95,
help='weighting the positive loss for boundary detection as w * L_pos + (1 - w) * L_neg')
return parser.parse_args()
def create_config():
cfg = edict()
args = parse_args()
# Parse tasks
assert (len(args.active_tasks) == 5)
args.do_edge = args.active_tasks[0]
args.do_semseg = args.active_tasks[1]
args.do_human_parts = args.active_tasks[2]
args.do_normals = args.active_tasks[3]
args.do_sal = args.active_tasks[4]
print('\nThis script was run with the following parameters:')
for x in vars(args):
print('{}: {}'.format(x, str(getattr(args, x))))
cfg.resume_epoch = args.resume_epoch
cfg.DO_EDGE = args.do_edge
cfg.DO_SEMSEG = args.do_semseg
cfg.DO_HUMAN_PARTS = args.do_human_parts
cfg.DO_NORMALS = args.do_normals
cfg.DO_SAL = args.do_sal
if not cfg.DO_EDGE and not cfg.DO_SEMSEG and not cfg.DO_HUMAN_PARTS and not cfg.DO_NORMALS and not cfg.DO_SAL:
raise ValueError("Select at least one task")
cfg['arch'] = args.arch
cfg['pretr'] = args.pretr
cfg['trBatch'] = args.trBatch
cfg['lr'] = args.lr
cfg['lr_dec'] = args.lr_dec
cfg['wd'] = args.wd
cfg['cls'] = args.cls
cfg['epochs'] = args.epochs
cfg['stride'] = args.stride
cfg['trNorm'] = args.trNorm
cfg['dec_w'] = args.dec_w
# Set Modulation (Squeeze and Exciation, Residual Adapters) parameters
cfg['seenc'] = args.seenc
cfg['sedec'] = args.sedec
cfg['adapters'] = args.adapt
if cfg['sedec']:
cfg['norm_per_task'] = True
else:
cfg['norm_per_task'] = False
if args.dscr == 'None':
args.dscr = None
cfg['dscr_type'] = args.dscr
cfg['lr_dscr'] = args.lr_dscr
cfg['dscr_w'] = args.dscr_w
cfg['dscrd'] = args.dscrd
cfg['dscrk'] = args.dscrk
task_args, name_args = get_exp_name(args)
cfg['exp_folder_name'] = 'pascal_resnet'
cfg['exp_name'] = "_".join(name_args)
cfg['tasks_name'] = "_".join(task_args)
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'], cfg['tasks_name'])
if args.onlyVOC:
cfg['train_db_name'] = ['VOC12', 'SBD']
cfg['test_db_name'] = 'VOC12'
cfg['infer_db_names'] = ['VOC12', ]
else:
cfg['train_db_name'] = ['PASCALContext', ]
cfg['test_db_name'] = 'PASCALContext'
cfg['infer_db_names'] = ['PASCALContext', ]
# Which tasks?
cfg.TASKS = edict()
cfg.TASKS.NAMES = []
cfg.TASKS.NUM_OUTPUT = {} # How many outputs per task?
cfg.TASKS.TB_MIN = {}
cfg.TASKS.TB_MAX = {}
cfg.TASKS.LOSS_MULT = {}
cfg.TASKS.FLAGVALS = {'image': cv2.INTER_CUBIC}
cfg.TASKS.INFER_FLAGVALS = {}
if cfg.DO_EDGE:
# Edge Detection
print('Adding task: Edge Detection')
tmp = 'edge'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp]
cfg.TASKS.LOSS_MULT[tmp] = 50
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
# Add task-specific parameters from parser
cfg['edge_w'] = args.edge_w
cfg['eval_edge'] = False
if cfg.DO_SEMSEG:
# Semantic Segmentation
print('Adding task: Semantic Segmentation')
tmp = 'semseg'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 21
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
cfg.TASKS.LOSS_MULT[tmp] = 1
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST
if cfg.DO_HUMAN_PARTS:
# Human Parts Segmentation
print('Adding task: Human Part Segmentation')
tmp = 'human_parts'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 7
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
cfg.TASKS.LOSS_MULT[tmp] = 2
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST
if cfg.DO_NORMALS:
# Human Parts Segmentation
print('Adding task: Normals')
tmp = 'normals'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 3
cfg.TASKS.TB_MIN[tmp] = -1
cfg.TASKS.TB_MAX[tmp] = 1
cfg.TASKS.LOSS_MULT[tmp] = 10
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_CUBIC
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['normloss'] = 1 # Hard-coded L1 loss for normals
if cfg.DO_SAL:
# Saliency Estimation
print('Adding task: Saliency')
tmp = 'sal'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = 1
cfg.TASKS.LOSS_MULT[tmp] = 5
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['lr_tsk'] = len(cfg.TASKS.NAMES) if args.lr_tsk < 0 else args.lr_tsk
cfg.NETWORK = edict()
# Visualize the network on Tensorboard / pdf?
cfg.NETWORK.VIS_NET = False
cfg.TRAIN = edict()
cfg.TRAIN.SCALE = (512, 512)
cfg.TRAIN.MOMENTUM = 0.9
cfg.TRAIN.TENS_VIS = True
cfg.TRAIN.TENS_VIS_INTER = 1000
cfg.TRAIN.TEMP_LOSS_INTER = 1000
cfg.TEST = edict()
# See evolution of the test set when training?
cfg.TEST.USE_TEST = True
cfg.TEST.TEST_INTER = 10
cfg.TEST.SCALE = (512, 512)
cfg.SEED = 0
cfg.EVALUATE = True
cfg.DEBUG = False
cfg['overfit'] = args.overfit
if cfg['overfit']:
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'])
cfg['exp_name'] = 'test'
cfg['save_dir'] = os.path.join(cfg['save_dir_root'], cfg['exp_name'])
return cfg
def check_downloaded(p):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
def _create_url(name):
return 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/astmt/{}.tgz'.format(name)
_model_urls = {
'pascal_resnet_edge_semseg_human_parts_normals_sal_'
'arch-se_res26_pretr-imagenet_trBatch-8_lr-0.001_epochs-60_trNorm_seenc_sedec_adapt_dscr-fconv_lr_dscr'
'-10_dscr_w-0.01_dscrd-2_dscrk-1_edge_w-0.95_60',
}
ans = False
_check = p['exp_folder_name'] + '_' + p['tasks_name'] + '_' + p['exp_name'] + '_' + str(p['resume_epoch'])
_fpath = os.path.join(Path.exp_dir(), _check + '.tgz')
if _check in _model_urls:
if not os.path.isfile(os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')):
urllib.request.urlretrieve(_create_url(_check), _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.exp_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
ans = True
return ans
def get_net_resnet(p):
"""
Define the network (standard Deeplab ResNet101) and the trainable parameters
"""
if p['arch'] == 'se_res26':
network = se_resnet_multitask.se_resnet26
elif p['arch'] == 'se_res50':
network = se_resnet_multitask.se_resnet50
elif p['arch'] == 'se_res101':
network = se_resnet_multitask.se_resnet101
else:
raise NotImplementedError('ResNet: Choose between among se_res26, se_res50, and se_res101')
print('Creating ResNet model: {}'.format(p.NETWORK))
net = network(tasks=p.TASKS.NAMES, n_classes=p.TASKS.NUM_OUTPUT, pretrained=p['pretr'], classifier=p['cls'],
output_stride=p['stride'], train_norm_layers=p['trNorm'], width_decoder=p['dec_w'],
squeeze_enc=p['seenc'], squeeze_dec=p['sedec'], adapters=p['adapters'],
norm_per_task=p['norm_per_task'], dscr_type=p['dscr_type'], dscr_d=p['dscrd'], dscr_k=p['dscrk'])
if p['resume_epoch'] != 0:
print("Initializing weights from: {}".format(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')))
state_dict_checkpoint = torch.load(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')
, map_location=lambda storage, loc: storage)
net.load_state_dict(state_dict_checkpoint)
return net
def get_train_params(net, p):
train_params = [{'params': se_resnet_multitask.get_lr_params(net, part='backbone', tasks=p.TASKS.NAMES),
'lr': p['lr']},
{'params': se_resnet_multitask.get_lr_params(net, part='decoder', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_dec']},
{'params': se_resnet_multitask.get_lr_params(net, part='task_specific', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_tsk']}]
if p['dscr_type'] is not None:
train_params.append(
{'params': se_resnet_multitask.get_lr_params(net, part='discriminator', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_dscr']})
return train_params
def get_exp_name(args):
"""
Creates the name experiment from the configuration file and the arguments
"""
task_dict = {
'do_edge': 0,
'do_semseg': 0,
'do_human_parts': 0,
'do_normals': 0,
'do_sal': 0
}
name_dict = {
'arch': None,
'onlyVOC': False,
'pretr': None,
'trBatch': None,
'lr': None,
'wd': 1e-04,
'epochs': None,
'cls': 'atrous-v3',
'stride': 16,
'trNorm': False,
'dec_w': 64,
'seenc': False,
'sedec': False,
'adapt': False,
'dscr': None,
'lr_dscr': 1,
'dscr_w': ('dscr', None),
'dscrd': ('dscr', None),
'dscrk': ('dscr', None),
'edge_w': ('do_edge', None),
'lr_dec': 1,
'lr_tsk': -1,
}
# Experiment folder (task) string
task_args = [x.replace('do_', '') for x in task_dict if getattr(args, x) != task_dict[x]]
# Experiment name string
name_args = []
for x in name_dict:
# Check dependencies in tuple
if isinstance(name_dict[x], tuple):
elem = name_dict if name_dict[x][0] in name_dict else task_dict
if elem[name_dict[x][0]] == getattr(args, name_dict[x][0]):
continue
if getattr(args, x) != name_dict[x]:
tmp = getattr(args, x)
if isinstance(tmp, list):
tmp = "_".join([str(x) for x in tmp])
else:
tmp = str(tmp)
name_args.append(x + '-' + tmp)
name_args = [x.replace('-True', '') for x in name_args]
return task_args, name_args
|
astmt-master
|
experiments/dense_predict/pascal_resnet/config.py
|
MAX_N_IMAGES_PER_GPU = {
'se_res26-8': 10,
'se_res26-16': 16,
'se_res50-8': 8,
'se_res50-16': 10,
'se_res101-8': 2,
'se_res101-16': 8,
}
|
astmt-master
|
experiments/dense_predict/pascal_resnet/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import socket
import timeit
import cv2
from datetime import datetime
import imageio
import numpy as np
# PyTorch includes
import torch
import torch.optim as optim
from torch.nn.functional import interpolate
# Custom includes
from fblib.util.helpers import generate_param_report
from fblib.util.dense_predict.utils import lr_poly
from experiments.dense_predict import common_configs
from fblib.util.mtl_tools.multitask_visualizer import TBVisualizer, visualize_network
from fblib.util.model_resources.flops import compute_gflops
from fblib.util.model_resources.num_parameters import count_parameters
from fblib.util.dense_predict.utils import AverageMeter
# Custom optimizer
from fblib.util.optimizer_mtl.select_used_modules import make_closure
# Configuration file
from experiments.dense_predict.pascal_resnet import config as config
# Tensorboard include
from tensorboardX import SummaryWriter
def main():
p = config.create_config()
gpu_id = 0
device = torch.device("cuda:" + str(gpu_id) if torch.cuda.is_available() else "cpu")
p.TEST.BATCH_SIZE = 32
# Setting parameters
n_epochs = p['epochs']
print("Total training epochs: {}".format(n_epochs))
print(p)
print('Training on {}'.format(p['train_db_name']))
snapshot = 10 # Store a model every snapshot epochs
test_interval = p.TEST.TEST_INTER # Run on test set every test_interval epochs
torch.manual_seed(p.SEED)
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
if not os.path.exists(os.path.join(p['save_dir'], 'models')):
if p['resume_epoch'] == 0:
os.makedirs(os.path.join(p['save_dir'], 'models'))
else:
if not config.check_downloaded(p):
print('Folder does not exist.No checkpoint to resume from. Exiting')
exit(1)
net = config.get_net_resnet(p)
# Visualize the network
if p.NETWORK.VIS_NET:
visualize_network(net, p)
gflops = compute_gflops(net, in_shape=(p['trBatch'], 3, p.TRAIN.SCALE[0], p.TRAIN.SCALE[1]),
tasks=p.TASKS.NAMES[0])
print('GFLOPS per task: {}'.format(gflops / p['trBatch']))
print('\nNumber of parameters (in millions): {0:.3f}'.format(count_parameters(net) / 1e6))
print('Number of parameters (in millions) for decoder: {0:.3f}\n'.format(count_parameters(net.decoder) / 1e6))
net.to(device)
if p['resume_epoch'] != n_epochs:
criteria_tr = {}
criteria_ts = {}
running_loss_tr = {task: 0. for task in p.TASKS.NAMES}
running_loss_ts = {task: 0. for task in p.TASKS.NAMES}
curr_loss_task = {task: 0. for task in p.TASKS.NAMES}
counter_tr = {task: 0 for task in p.TASKS.NAMES}
counter_ts = {task: 0 for task in p.TASKS.NAMES}
# Discriminator loss variables for logging
running_loss_tr_dscr = 0
running_loss_ts_dscr = 0
# Logging into Tensorboard
log_dir = os.path.join(p['save_dir'], 'models',
datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
writer = SummaryWriter(log_dir=log_dir)
# Training parameters and their optimizer
train_params = config.get_train_params(net, p)
optimizer = optim.SGD(train_params, lr=p['lr'], momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
for task in p.TASKS.NAMES:
# Losses
criteria_tr[task] = config.get_loss(p, task)
criteria_ts[task] = config.get_loss(p, task)
criteria_tr[task].to(device)
criteria_ts[task].to(device)
# Preparation of the data loaders
transforms_tr, transforms_ts, _ = config.get_transformations(p)
trainloader = config.get_train_loader(p, db_name=p['train_db_name'], transforms=transforms_tr)
testloader = config.get_test_loader(p, db_name=p['test_db_name'], transforms=transforms_ts)
# TensorBoard Image Visualizer
tb_vizualizer = TBVisualizer(tasks=p.TASKS.NAMES, min_ranges=p.TASKS.TB_MIN, max_ranges=p.TASKS.TB_MAX,
batch_size=p['trBatch'])
generate_param_report(os.path.join(p['save_dir'], exp_name + '.txt'), p)
# Train variables
num_img_tr = len(trainloader)
num_img_ts = len(testloader)
print("Training Network")
# Main Training and Testing Loop
for epoch in range(p['resume_epoch'], n_epochs):
top1_dscr = AverageMeter()
start_time = timeit.default_timer()
# One training epoch
net.train()
alpha = 2. / (1. + np.exp(-10 * ((epoch + 1) / n_epochs))) - 1 # Ganin et al for gradient reversal
if p['dscr_type'] is not None:
print('Value of alpha: {}'.format(alpha))
for ii, sample in enumerate(trainloader):
curr_loss_dscr = 0
# Grab the input
inputs = sample['image']
inputs.requires_grad_()
inputs = inputs.to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# Forward pass
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
loss_tasks = losses_tasks[task]
running_loss_tr[task] += losses_tasks[task].item()
curr_loss_task[task] = losses_tasks[task].item()
counter_tr[task] += 1
# Store output for logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
# measure loss, accuracy and record accuracy for discriminator
loss_dscr = losses_dscr[task]
running_loss_tr_dscr += losses_dscr[task].item()
curr_loss_dscr += loss_dscr.item()
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
loss = (1 - p['dscr_w']) * loss_tasks + p['dscr_w'] * loss_dscr
else:
loss = loss_tasks
# Backward pass inside make_closure to update only weights that were used during fw pass
optimizer.zero_grad()
optimizer.step(closure=make_closure(loss=loss, net=net))
# Print stuff and log epoch loss into Tensorboard
if ii % num_img_tr == num_img_tr - 1:
print('[Epoch: %d, numImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_tr[task] = running_loss_tr[task] / counter_tr[task]
writer.add_scalar('data/total_loss_epoch' + task,
running_loss_tr[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Loss %s: %f' % (task, running_loss_tr[task]))
running_loss_tr[task] = 0
counter_tr[task] = 0
if p['dscr_type'] is not None:
running_loss_tr_dscr = running_loss_tr_dscr / num_img_tr / len(p.TASKS.NAMES)
writer.add_scalar('data/total_loss_epoch_dscr', running_loss_tr_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_tr_dscr)
print('Train Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
writer.add_scalar('data/train_accuracy_dscr', top1_dscr.avg, epoch)
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log current train loss into Tensorboard
for task in p.TASKS.NAMES:
writer.add_scalar('data/train_loss_iter_' + task, curr_loss_task[task], ii + num_img_tr * epoch)
curr_loss_task[task] = 0.
if p['dscr_type'] is not None:
writer.add_scalar('data/train_loss_dscr_iter', curr_loss_dscr, ii + num_img_tr * epoch)
curr_loss_dscr = 0.
# Log train images to Tensorboard
if p['overfit'] and p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='train')
if ii % num_img_tr == num_img_tr - 1:
lr_ = lr_poly(p['lr'], iter_=epoch, max_iter=n_epochs)
print('(poly lr policy) learning rate: {0:.6f}'.format(lr_))
train_params = config.get_train_params(net, p)
optimizer = optim.SGD(train_params, lr=lr_, momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
optimizer.zero_grad()
# Save the model
if (epoch % snapshot) == snapshot - 1 and epoch != 0:
torch.save(net.state_dict(), os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(epoch) + '.pth'))
# One testing epoch
if p.TEST.USE_TEST and epoch % test_interval == (test_interval - 1):
print('Testing Phase')
top1_dscr = AverageMeter()
net.eval()
start_time = timeit.default_timer()
for ii, sample in enumerate(testloader):
inputs = sample['image'].to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# forward pass of the mini-batch
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
running_loss_ts[task] += losses_tasks[task].item()
counter_ts[task] += 1
# for logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
running_loss_ts_dscr += losses_dscr[task].item()
# measure accuracy and record loss for discriminator
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
# Print stuff
if ii % num_img_ts == num_img_ts - 1:
print('[Epoch: %d, numTestImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_ts[task] = running_loss_ts[task] / counter_ts[task]
writer.add_scalar('data/test_loss_' + task + '_epoch',
running_loss_ts[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Testing Loss %s: %f' % (task, running_loss_ts[task]))
running_loss_ts[task] = 0
counter_ts[task] = 0
# Free the graph
losses_tasks = {}
if p['dscr_type'] is not None:
running_loss_ts_dscr = running_loss_ts_dscr / num_img_ts / len(p.TASKS.NAMES)
writer.add_scalar('data/test_loss_dscr', running_loss_ts_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_ts_dscr)
writer.add_scalar('data/test_accuracy_dscr', top1_dscr.avg, epoch)
print('Test Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
# Free the graph
losses_dscr = {}
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log test images to Tensorboard
if p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='test')
writer.close()
# Generate Results
net.eval()
_, _, transforms_infer = config.get_transformations(p)
for db_name in p['infer_db_names']:
testloader = config.get_test_loader(p, db_name=db_name, transforms=transforms_infer, infer=True)
save_dir_res = os.path.join(p['save_dir'], 'Results_' + db_name)
print('Testing Network')
# Main Testing Loop
with torch.no_grad():
for ii, sample in enumerate(testloader):
img, meta = sample['image'], sample['meta']
# Forward pass of the mini-batch
inputs = img.to(device)
tasks = net.tasks
for task in tasks:
output, _ = net.forward(inputs, task=task)
save_dir_task = os.path.join(save_dir_res, task)
if not os.path.exists(save_dir_task):
os.makedirs(save_dir_task)
output = interpolate(output, size=(inputs.size()[-2], inputs.size()[-1]),
mode='bilinear', align_corners=False)
output = common_configs.get_output(output, task)
for jj in range(int(inputs.size()[0])):
if len(sample[task][jj].unique()) == 1 and sample[task][jj].unique() == 255:
continue
fname = meta['image'][jj]
result = cv2.resize(output[jj], dsize=(meta['im_size'][1][jj], meta['im_size'][0][jj]),
interpolation=p.TASKS.INFER_FLAGVALS[task])
imageio.imwrite(os.path.join(save_dir_task, fname + '.png'), result.astype(np.uint8))
if p.EVALUATE:
common_configs.eval_all_results(p)
if __name__ == '__main__':
main()
|
astmt-master
|
experiments/dense_predict/pascal_resnet/main.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import cv2
import argparse
import torch
import tarfile
from six.moves import urllib
from easydict import EasyDict as edict
# Networks
import fblib.networks.deeplab_multi_task.deeplab_se_mobilenet_v2_multitask as se_mobilenet_v2
# Common configs
from experiments.dense_predict.common_configs import get_loss, get_train_loader, get_test_loader, get_transformations
from fblib.util.mypath import Path
def parse_args():
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='Multi-task Learning with PASCAL and MobileNet-v2')
# Select tasks
parser.add_argument('--active_tasks', type=int, nargs='+', default=[1, 1, 1, 1, 1],
help='Which tasks to train?')
# General parameters
parser.add_argument('--arch', type=str, default='mnetv2',
help='network: Mobilenet v2')
parser.add_argument('--pretr', type=str, default='imagenet',
help='pre-trained model: "imagenet" or "scratch"')
parser.add_argument('--trBatch', type=int, default=16,
help='training batch size')
parser.add_argument('-lr', type=float, default=0.001,
help='initial learning rate. poly-learning rate is used.')
parser.add_argument('--lr_dec', type=float, default=1,
help='decoder learning rate multiplier')
parser.add_argument('-wd', type=float, default=1e-4,
help='Weight decay')
parser.add_argument('--epochs', type=int, default=130,
help='total number of epochs for training')
parser.add_argument('--resume_epoch', type=int, default=0,
help='Resume Epoch #')
parser.add_argument('--stride', type=int, default=16,
help='output stride of ResNet backbone. If set to 16 saves significant memory')
parser.add_argument('--trNorm', type=str2bool, default=True,
help='train normalization layers of backbone?')
parser.add_argument('--poly', type=str2bool, default=True,
help='Use poly learning rate')
parser.add_argument('--overfit', type=str2bool, default=False,
help='overfit to small subset of data for debugging purposes')
# Squeeze and Excitation Parameters
parser.add_argument('--seenc', type=str2bool, default=True,
help='Squeeze and excitation per task on encoder?')
parser.add_argument('--sedec', type=str2bool, default=True,
help='Squeeze and excitation per task on decoder?')
parser.add_argument('--lr_tsk', type=float, default=-1,
help='Task Specific layer learning rate multiplier')
# Discriminator parameters
parser.add_argument('--dscr', type=str, default='None',
help='Use discriminator?')
parser.add_argument('--lr_dscr', type=int, default=1,
help='learning rate multiplier of discriminator?')
parser.add_argument('--dscr_w', type=float, default=0,
help='weight of discriminator in the range [0, 1]')
parser.add_argument('--dscrd', type=int, default=2,
help='Depth of discriminator')
parser.add_argument('--dscrk', type=int, default=1,
help='kernel size of discriminator')
# Task-specific parameters
parser.add_argument('--edge_w', type=float, default=0.95,
help='weighting the positive loss for boundary detection as w * L_pos + (1 - w) * L_neg')
return parser.parse_args()
def create_config():
cfg = edict()
args = parse_args()
# Parse tasks
assert (len(args.active_tasks) == 5)
args.do_edge = args.active_tasks[0]
args.do_semseg = args.active_tasks[1]
args.do_human_parts = args.active_tasks[2]
args.do_normals = args.active_tasks[3]
args.do_sal = args.active_tasks[4]
print('\nThis script was run with the following parameters:')
for x in vars(args):
print('{}: {}'.format(x, str(getattr(args, x))))
cfg.resume_epoch = args.resume_epoch
cfg.DO_EDGE = args.do_edge
cfg.DO_SEMSEG = args.do_semseg
cfg.DO_HUMAN_PARTS = args.do_human_parts
cfg.DO_NORMALS = args.do_normals
cfg.DO_SAL = args.do_sal
if not cfg.DO_EDGE and not cfg.DO_SEMSEG and not cfg.DO_HUMAN_PARTS and not cfg.DO_NORMALS and not cfg.DO_SAL:
raise ValueError("Select at least one task")
cfg['arch'] = args.arch
cfg['pretr'] = args.pretr
cfg['trBatch'] = args.trBatch
cfg['lr'] = args.lr
cfg['lr_dec'] = args.lr_dec
cfg['wd'] = args.wd
cfg['epochs'] = args.epochs
cfg['stride'] = args.stride
cfg['trNorm'] = args.trNorm
cfg['poly'] = args.poly
# Set squeeze and excitation parameters
cfg['seenc'] = args.seenc
cfg['sedec'] = args.sedec
if args.dscr == 'None':
args.dscr = None
cfg['dscr_type'] = args.dscr
cfg['lr_dscr'] = args.lr_dscr
cfg['dscr_w'] = args.dscr_w
cfg['dscrd'] = args.dscrd
cfg['dscrk'] = args.dscrk
task_args, name_args = get_exp_name(args)
cfg['exp_folder_name'] = 'pascal_mnet'
cfg['exp_name'] = "_".join(name_args)
cfg['tasks_name'] = "_".join(task_args)
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'], cfg['tasks_name'])
cfg['train_db_name'] = ['PASCALContext', ]
cfg['test_db_name'] = 'PASCALContext'
cfg['infer_db_names'] = ['PASCALContext', ]
# Which tasks?
cfg.TASKS = edict()
cfg.TASKS.NAMES = []
cfg.TASKS.NUM_OUTPUT = {} # How many outputs per task?
cfg.TASKS.TB_MIN = {}
cfg.TASKS.TB_MAX = {}
cfg.TASKS.LOSS_MULT = {}
cfg.TASKS.FLAGVALS = {'image': cv2.INTER_CUBIC}
cfg.TASKS.INFER_FLAGVALS = {}
if cfg.DO_EDGE:
# Edge Detection
print('Adding task: Edge Detection')
tmp = 'edge'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp]
cfg.TASKS.LOSS_MULT[tmp] = 50
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
# Add task-specific parameters from parser
cfg['edge_w'] = args.edge_w
cfg['eval_edge'] = False
if cfg.DO_SEMSEG:
# Semantic Segmentation
print('Adding task: Semantic Segmentation')
tmp = 'semseg'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 21
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
cfg.TASKS.LOSS_MULT[tmp] = 1
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST
if cfg.DO_HUMAN_PARTS:
# Human Parts Segmentation
print('Adding task: Human Part Segmentation')
tmp = 'human_parts'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 7
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
cfg.TASKS.LOSS_MULT[tmp] = 2
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST
if cfg.DO_NORMALS:
# Human Parts Segmentation
print('Adding task: Normals')
tmp = 'normals'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 3
cfg.TASKS.TB_MIN[tmp] = -1
cfg.TASKS.TB_MAX[tmp] = 1
cfg.TASKS.LOSS_MULT[tmp] = 10
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_CUBIC
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['normloss'] = 1 # Hard-coded L1 loss for normals
if cfg.DO_SAL:
# Saliency Estimation
print('Adding task: Saliency')
tmp = 'sal'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = 1
cfg.TASKS.LOSS_MULT[tmp] = 5
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['lr_tsk'] = len(cfg.TASKS.NAMES) if args.lr_tsk < 0 else args.lr_tsk
cfg.NETWORK = edict()
# Visualize the network on Tensorboard / pdf?
cfg.NETWORK.VIS_NET = False
cfg.TRAIN = edict()
cfg.TRAIN.SCALE = (512, 512)
cfg.TRAIN.MOMENTUM = 0.9
cfg.TRAIN.TENS_VIS = False
cfg.TRAIN.TENS_VIS_INTER = 1000
cfg.TRAIN.TEMP_LOSS_INTER = 1000
cfg.TEST = edict()
# See evolution of the test set when training?
cfg.TEST.USE_TEST = True
cfg.TEST.TEST_INTER = 10
cfg.TEST.SCALE = (512, 512)
cfg.SEED = 0
cfg.EVALUATE = True
cfg.DEBUG = False
cfg['overfit'] = args.overfit
if cfg['overfit']:
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'])
cfg['exp_name'] = 'test'
cfg['save_dir'] = os.path.join(cfg['save_dir_root'], cfg['exp_name'])
return cfg
def check_downloaded(p):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
def _create_url(name):
return 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/astmt/{}.tgz'.format(name)
_model_urls = {
'pascal_mnet_edge_semseg_human_parts_normals_sal_'
'arch-mnetv2_pretr-imagenet_trBatch-16_lr-0.001_epochs-130_trNorm_poly_seenc_sedec_edge_w-0.95_130'
}
ans = False
_check = p['exp_folder_name'] + '_' + p['tasks_name'] + '_' + p['exp_name'] + '_' + str(p['resume_epoch'])
_fpath = os.path.join(Path.exp_dir(), _check + '.tgz')
if _check in _model_urls:
if not os.path.isfile(os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')):
urllib.request.urlretrieve(_create_url(_check), _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.exp_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
ans = True
return ans
def get_net_mnet(p):
"""
Define the network (standard Deeplab ResNet101) and the trainable parameters
"""
print('Creating DeepLab with Mobilenet-V2 model: {}'.format(p.NETWORK))
network = se_mobilenet_v2.se_mobilenet_v2
net = network(n_classes=p.TASKS.NUM_OUTPUT,
pretrained=p['pretr'],
tasks=p.TASKS.NAMES,
output_stride=p['stride'],
train_norm_layers=p['trNorm'],
mod_enc=p['seenc'],
mod_dec=p['sedec'],
use_dscr=(p['dscr_type'] == 'fconv'),
dscr_k=p['dscrk'],
dscr_d=p['dscrd'])
if p['resume_epoch'] != 0:
print("Initializing weights from: {}".format(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')))
state_dict_checkpoint = torch.load(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')
, map_location=lambda storage, loc: storage)
net.load_state_dict(state_dict_checkpoint)
return net
def get_train_params(net, p, lr):
print('Adjusting learning rate')
print('Base lr: {}'.format(lr))
print('Decoder lr: {}'.format(lr * p['lr_dec']))
print('Task-specific lr: {}'.format(lr * p['lr_tsk']))
train_params = [{'params': se_mobilenet_v2.get_lr_params(net, part='backbone', tasks=p.TASKS.NAMES),
'lr': lr},
{'params': se_mobilenet_v2.get_lr_params(net, part='decoder', tasks=p.TASKS.NAMES),
'lr': lr * p['lr_dec']},
{'params': se_mobilenet_v2.get_lr_params(net, part='task_specific', tasks=p.TASKS.NAMES),
'lr': lr * p['lr_tsk']}]
if p['dscr_type'] is not None:
print('Discriminator lr: {}'.format(lr * p['lr_dscr']))
train_params.append(
{'params': se_mobilenet_v2.get_lr_params(net, part='discriminator', tasks=p.TASKS.NAMES),
'lr': lr * p['lr_dscr']})
return train_params
def get_exp_name(args):
"""
Creates the name experiment from the configuration file and the arguments
"""
task_dict = {
'do_edge': 0,
'do_semseg': 0,
'do_human_parts': 0,
'do_normals': 0,
'do_sal': 0
}
name_dict = {
'arch': None,
'pretr': None,
'trBatch': None,
'lr': None,
'wd': 1e-04,
'epochs': None,
'stride': 16,
'trNorm': False,
'poly': False,
'seenc': False,
'sedec': False,
'dscr': None,
'lr_dscr': 1,
'dscr_w': ('dscr', None),
'dscrd': ('dscr', None),
'dscrk': ('dscr', None),
'edge_w': ('do_edge', None),
'lr_dec': 1,
'lr_tsk': -1,
}
# Experiment folder (task) string
task_args = [x.replace('do_', '') for x in task_dict if getattr(args, x) != task_dict[x]]
# Experiment name string
name_args = []
for x in name_dict:
# Check dependencies in tuple
if isinstance(name_dict[x], tuple):
elem = name_dict if name_dict[x][0] in name_dict else task_dict
if elem[name_dict[x][0]] == getattr(args, name_dict[x][0]):
continue
if getattr(args, x) != name_dict[x]:
tmp = getattr(args, x)
if isinstance(tmp, list):
tmp = "_".join([str(x) for x in tmp])
else:
tmp = str(tmp)
name_args.append(x + '-' + tmp)
name_args = [x.replace('-True', '') for x in name_args]
return task_args, name_args
|
astmt-master
|
experiments/dense_predict/pascal_mnet/config.py
|
MAX_N_IMAGES_PER_GPU = {
'mnetv2-8': 10,
'mnetv2-16': 16,
}
|
astmt-master
|
experiments/dense_predict/pascal_mnet/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import socket
import timeit
import cv2
from datetime import datetime
import imageio
import numpy as np
# PyTorch includes
import torch
import torch.optim as optim
from torch.nn.functional import interpolate
# Custom includes
from fblib.util.helpers import generate_param_report
from fblib.util.dense_predict.utils import lr_poly
from experiments.dense_predict import common_configs
from fblib.util.mtl_tools.multitask_visualizer import TBVisualizer
from fblib.util.model_resources.flops import compute_gflops
from fblib.util.model_resources.num_parameters import count_parameters
from fblib.util.dense_predict.utils import AverageMeter
# Custom optimizer
from fblib.util.optimizer_mtl.select_used_modules import make_closure
# Configuration
from experiments.dense_predict.pascal_mnet import config
# Tensorboard include
from tensorboardX import SummaryWriter
def main():
p = config.create_config()
gpu_id = 0
device = torch.device("cuda:" + str(gpu_id) if torch.cuda.is_available() else "cpu")
p.TEST.BATCH_SIZE = 32
# Setting parameters
n_epochs = p['epochs']
print("Total training epochs: {}".format(n_epochs))
print(p)
print('Training on {}'.format(p['train_db_name']))
snapshot = 10 # Store a model every snapshot epochs
test_interval = p.TEST.TEST_INTER # Run on test set every test_interval epochs
torch.manual_seed(p.SEED)
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
if not os.path.exists(os.path.join(p['save_dir'], 'models')):
if p['resume_epoch'] == 0:
os.makedirs(os.path.join(p['save_dir'], 'models'))
else:
if not config.check_downloaded(p):
print('Folder does not exist.No checkpoint to resume from. Exiting')
exit(1)
net = config.get_net_mnet(p)
gflops = compute_gflops(net, in_shape=(p['trBatch'], 3, p.TRAIN.SCALE[0], p.TRAIN.SCALE[1]),
tasks=p.TASKS.NAMES[0])
print('GFLOPS per task: {}'.format(gflops / p['trBatch']))
print('\nNumber of parameters (in millions): {0:.3f}'.format(count_parameters(net) / 1e6))
print('Number of parameters (in millions) for decoder: {0:.3f}\n'.format(count_parameters(net.decoder) / 1e6))
net.to(device)
if p['resume_epoch'] != n_epochs:
criteria_tr = {}
criteria_ts = {}
running_loss_tr = {task: 0. for task in p.TASKS.NAMES}
running_loss_ts = {task: 0. for task in p.TASKS.NAMES}
curr_loss_task = {task: 0. for task in p.TASKS.NAMES}
counter_tr = {task: 0 for task in p.TASKS.NAMES}
counter_ts = {task: 0 for task in p.TASKS.NAMES}
# Discriminator loss variables for logging
running_loss_tr_dscr = 0
running_loss_ts_dscr = 0
# Logging into Tensorboard
log_dir = os.path.join(p['save_dir'], 'models',
datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
writer = SummaryWriter(log_dir=log_dir)
# Training parameters and their optimizer
train_params = config.get_train_params(net, p, p['lr'])
optimizer = optim.SGD(train_params, lr=p['lr'], momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
for task in p.TASKS.NAMES:
# Losses
criteria_tr[task] = config.get_loss(p, task)
criteria_ts[task] = config.get_loss(p, task)
criteria_tr[task].to(device)
criteria_ts[task].to(device)
# Preparation of the data loaders
transforms_tr, transforms_ts, _ = config.get_transformations(p)
trainloader = config.get_train_loader(p, db_name=p['train_db_name'], transforms=transforms_tr)
testloader = config.get_test_loader(p, db_name=p['test_db_name'], transforms=transforms_ts)
# TensorBoard Image Visualizer
tb_vizualizer = TBVisualizer(tasks=p.TASKS.NAMES, min_ranges=p.TASKS.TB_MIN, max_ranges=p.TASKS.TB_MAX,
batch_size=p['trBatch'])
generate_param_report(os.path.join(p['save_dir'], exp_name + '.txt'), p)
# Train variables
num_img_tr = len(trainloader)
num_img_ts = len(testloader)
print("Training Network")
# Main Training and Testing Loop
for epoch in range(p['resume_epoch'], n_epochs):
top1_dscr = AverageMeter()
start_time = timeit.default_timer()
# One training epoch
net.train()
alpha = 2. / (1. + np.exp(-10 * ((epoch + 1) / n_epochs))) - 1 # Ganin et al for gradient reversal
if p['dscr_type'] is not None:
print('Value of alpha: {}'.format(alpha))
for ii, sample in enumerate(trainloader):
curr_loss_dscr = 0
# Grab the input
inputs = sample['image']
inputs.requires_grad_()
inputs = inputs.to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# Forward pass
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
loss_tasks = losses_tasks[task]
running_loss_tr[task] += losses_tasks[task].item()
curr_loss_task[task] = losses_tasks[task].item()
counter_tr[task] += 1
# Store output for logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
# measure loss, accuracy and record accuracy for discriminator
loss_dscr = losses_dscr[task]
running_loss_tr_dscr += losses_dscr[task].item()
curr_loss_dscr += loss_dscr.item()
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
loss = (1 - p['dscr_w']) * loss_tasks + p['dscr_w'] * loss_dscr
else:
loss = loss_tasks
# Backward pass inside make_closure to update only weights that were used during fw pass
optimizer.zero_grad()
optimizer.step(closure=make_closure(loss=loss, net=net))
# Print stuff and log epoch loss into Tensorboard
if ii % num_img_tr == num_img_tr - 1:
print('[Epoch: %d, numImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_tr[task] = running_loss_tr[task] / counter_tr[task]
writer.add_scalar('data/total_loss_epoch' + task,
running_loss_tr[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Loss %s: %f' % (task, running_loss_tr[task]))
running_loss_tr[task] = 0
counter_tr[task] = 0
if p['dscr_type'] is not None:
running_loss_tr_dscr = running_loss_tr_dscr / num_img_tr / len(p.TASKS.NAMES)
writer.add_scalar('data/total_loss_epoch_dscr', running_loss_tr_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_tr_dscr)
print('Train Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
writer.add_scalar('data/train_accuracy_dscr', top1_dscr.avg, epoch)
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log current train loss into Tensorboard
for task in p.TASKS.NAMES:
writer.add_scalar('data/train_loss_iter_' + task, curr_loss_task[task], ii + num_img_tr * epoch)
curr_loss_task[task] = 0.
if p['dscr_type'] is not None:
writer.add_scalar('data/train_loss_dscr_iter', curr_loss_dscr, ii + num_img_tr * epoch)
curr_loss_dscr = 0.
# Log train images to Tensorboard
if p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='train')
if p['poly'] and ii % num_img_tr == num_img_tr - 1:
lr_ = lr_poly(p['lr'], iter_=epoch, max_iter=n_epochs)
print('(poly lr policy) learning rate: {0:.6f}'.format(lr_))
train_params = config.get_train_params(net, p, lr_)
optimizer = optim.SGD(train_params, lr=lr_, momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
optimizer.zero_grad()
# Save the model
if (epoch % snapshot) == snapshot - 1 and epoch != 0:
torch.save(net.state_dict(), os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(epoch) + '.pth'))
# One testing epoch
if p.TEST.USE_TEST and epoch % test_interval == (test_interval - 1):
print('Testing Phase')
top1_dscr = AverageMeter()
net.eval()
start_time = timeit.default_timer()
for ii, sample in enumerate(testloader):
inputs = sample['image'].to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# Forward pass of the mini-batch
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
running_loss_ts[task] += losses_tasks[task].item()
counter_ts[task] += 1
# For logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
running_loss_ts_dscr += losses_dscr[task].item()
# measure accuracy and record loss for discriminator
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
# Print stuff
if ii % num_img_ts == num_img_ts - 1:
print('[Epoch: %d, numTestImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_ts[task] = running_loss_ts[task] / counter_ts[task]
writer.add_scalar('data/test_loss_' + task + '_epoch',
running_loss_ts[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Testing Loss %s: %f' % (task, running_loss_ts[task]))
running_loss_ts[task] = 0
counter_ts[task] = 0
# Free the graph
losses_tasks = {}
if p['dscr_type'] is not None:
running_loss_ts_dscr = running_loss_ts_dscr / num_img_ts / len(p.TASKS.NAMES)
writer.add_scalar('data/test_loss_dscr', running_loss_ts_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_ts_dscr)
writer.add_scalar('data/test_accuracy_dscr', top1_dscr.avg, epoch)
print('Test Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
# Free the graph
losses_dscr = {}
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log test images to Tensorboard
if p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='test')
writer.close()
# Generate Results
net.eval()
_, _, transforms_infer = config.get_transformations(p)
for db_name in p['infer_db_names']:
testloader = config.get_test_loader(p, db_name=db_name, transforms=transforms_infer, infer=True)
save_dir_res = os.path.join(p['save_dir'], 'Results_' + db_name)
print('Testing network')
# Main Testing Loop
with torch.no_grad():
for ii, sample in enumerate(testloader):
img, meta = sample['image'], sample['meta']
# Forward pass of the mini-batch
inputs = img.to(device)
tasks = net.tasks
for task in tasks:
output, _ = net.forward(inputs, task=task)
save_dir_task = os.path.join(save_dir_res, task)
if not os.path.exists(save_dir_task):
os.makedirs(save_dir_task)
output = interpolate(output, size=(inputs.size()[-2], inputs.size()[-1]),
mode='bilinear', align_corners=False)
output = common_configs.get_output(output, task)
for jj in range(int(inputs.size()[0])):
if len(sample[task][jj].unique()) == 1 and sample[task][jj].unique() == 255:
continue
fname = meta['image'][jj]
result = cv2.resize(output[jj], dsize=(meta['im_size'][1][jj], meta['im_size'][0][jj]),
interpolation=p.TASKS.INFER_FLAGVALS[task])
imageio.imwrite(os.path.join(save_dir_task, fname + '.png'), result.astype(np.uint8))
if p.EVALUATE:
common_configs.eval_all_results(p)
if __name__ == '__main__':
main()
|
astmt-master
|
experiments/dense_predict/pascal_mnet/main.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import cv2
import argparse
import torch
import tarfile
from six.moves import urllib
from easydict import EasyDict as edict
# Networks
import fblib.networks.deeplab_multi_task.deeplab_se_resnet_multitask as se_resnet_multitask
# Common configs
from experiments.dense_predict.common_configs import get_loss, get_train_loader, get_test_loader, get_transformations
from fblib.util.mypath import Path
def parse_args():
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='Multi-task Learning for NYUD')
# Select tasks
parser.add_argument('--active_tasks', type=int, nargs='+', default=[1, 1, 1, 1],
help='Which tasks to train?')
# General parameters
parser.add_argument('--arch', type=str, default='se_res50',
help='network: se_res26, se_res50, se_res101')
parser.add_argument('--pretr', type=str, default='imagenet',
help='pre-trained model: "imagenet" or "scratch"')
parser.add_argument('--trBatch', type=int, default=8,
help='training batch size')
parser.add_argument('-lr', type=float, default=0.001,
help='initial learning rate. poly-learning rate is used.')
parser.add_argument('--lr_dec', type=float, default=1,
help='decoder learning rate multiplier')
parser.add_argument('-wd', type=float, default=1e-4,
help='Weight decay')
parser.add_argument('--epochs', type=int, default=200,
help='Total number of epochs for training')
parser.add_argument('--resume_epoch', type=int, default=0,
help='Resume Epoch #')
parser.add_argument('--cls', type=str, default='atrous-v3',
help='Classifier type')
parser.add_argument('--stride', type=int, default=16,
help='Output stride of ResNet backbone. If set to 16 saves significant memory')
parser.add_argument('--trNorm', type=str2bool, default=True,
help='train normalization layers of Backbone?')
parser.add_argument('--dec_w', type=int, default=64,
help='decoder width (default 256 in Deeplab v3+')
parser.add_argument('--overfit', type=str2bool, default=False,
help='overfit to small subset of data for debugging purposes')
# Modulation Parameters
parser.add_argument('--seenc', type=str2bool, default=True,
help='Squeeze and excitation per task for encoder? False will still use 1 SE for all tasks')
parser.add_argument('--sedec', type=str2bool, default=True,
help='Squeeze and excitation per task for decoder? False will not use SE modules')
parser.add_argument('--adapt', type=str2bool, default=True,
help='Use parallel residual adapters?')
parser.add_argument('--lr_tsk', type=float, default=-1,
help='Task Specific layer learning rate multiplier')
# Discriminator parameters
parser.add_argument('--dscr', type=str, default='fconv',
help='Use discriminator?')
parser.add_argument('--lr_dscr', type=int, default=10,
help='learning rate multiplier of discriminator?')
parser.add_argument('--dscr_w', type=float, default=0.01,
help='weight of discriminator in the range [0, 1]')
parser.add_argument('--dscrd', type=int, default=2,
help='Depth of discriminator')
parser.add_argument('--dscrk', type=int, default=3,
help='Kernel size of discriminator')
# Task-specific parameters
parser.add_argument('--edge_w', type=float, default=0.8,
help='Weighting the positive loss for boundary detection as w * L_pos + (1 - w) * L_neg')
return parser.parse_args()
def create_config():
cfg = edict()
args = parse_args()
# Parse tasks
assert (len(args.active_tasks) == 4)
args.do_edge = args.active_tasks[0]
args.do_semseg = args.active_tasks[1]
args.do_normals = args.active_tasks[2]
args.do_depth = args.active_tasks[3]
print('\nThis script was run with the following parameters:')
for x in vars(args):
print('{}: {}'.format(x, str(getattr(args, x))))
cfg.resume_epoch = args.resume_epoch
cfg.DO_EDGE = args.do_edge
cfg.DO_SEMSEG = args.do_semseg
cfg.DO_NORMALS = args.do_normals
cfg.DO_DEPTH = args.do_depth
if not cfg.DO_EDGE and not cfg.DO_SEMSEG and not cfg.DO_NORMALS and not cfg.DO_DEPTH:
raise ValueError("Select at least one task")
cfg['arch'] = args.arch
cfg['pretr'] = args.pretr
cfg['trBatch'] = args.trBatch
cfg['lr'] = args.lr
cfg['lr_dec'] = args.lr_dec
cfg['wd'] = args.wd
cfg['cls'] = args.cls
cfg['epochs'] = args.epochs
cfg['stride'] = args.stride
cfg['trNorm'] = args.trNorm
cfg['dec_w'] = args.dec_w
# Set Modulation (Squeeze and Exciation, Residual Adapters) parameters
cfg['seenc'] = args.seenc
cfg['sedec'] = args.sedec
cfg['adapters'] = args.adapt
if cfg['sedec']:
cfg['norm_per_task'] = True
else:
cfg['norm_per_task'] = False
if args.dscr == 'None':
args.dscr = None
cfg['dscr_type'] = args.dscr
cfg['lr_dscr'] = args.lr_dscr
cfg['dscr_w'] = args.dscr_w
cfg['dscrd'] = args.dscrd
cfg['dscrk'] = args.dscrk
task_args, name_args = get_exp_name(args)
cfg['exp_folder_name'] = 'nyud_resnet'
cfg['exp_name'] = "_".join(name_args)
cfg['tasks_name'] = "_".join(task_args)
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'], cfg['tasks_name'])
cfg['train_db_name'] = ['NYUD', ]
cfg['test_db_name'] = 'NYUD'
cfg['infer_db_names'] = ['NYUD', ]
# Which tasks?
cfg.TASKS = edict()
cfg.TASKS.NAMES = []
cfg.TASKS.NUM_OUTPUT = {} # How many outputs per task?
cfg.TASKS.TB_MIN = {}
cfg.TASKS.TB_MAX = {}
cfg.TASKS.LOSS_MULT = {}
cfg.TASKS.FLAGVALS = {'image': cv2.INTER_CUBIC}
cfg.TASKS.INFER_FLAGVALS = {}
if cfg.DO_EDGE:
# Edge Detection
print('Adding task: Edge Detection')
tmp = 'edge'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp]
cfg.TASKS.LOSS_MULT[tmp] = 50
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
# Add task-specific parameters from parser
cfg['edge_w'] = args.edge_w
cfg['eval_edge'] = False
if cfg.DO_SEMSEG:
# Semantic Segmentation
print('Adding task: Semantic Segmentation')
tmp = 'semseg'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 41
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
cfg.TASKS.LOSS_MULT[tmp] = 1
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST
if cfg.DO_NORMALS:
# Human Parts Segmentation
print('Adding task: Normals')
tmp = 'normals'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 3
cfg.TASKS.TB_MIN[tmp] = -1
cfg.TASKS.TB_MAX[tmp] = 1
cfg.TASKS.LOSS_MULT[tmp] = 10
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_CUBIC
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['normloss'] = 1 # Hard-coded L1 loss for normals
if cfg.DO_DEPTH:
# Depth Estimation
print('Adding task: Depth')
tmp = 'depth'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 1
cfg.TASKS.TB_MAX[tmp] = 10
cfg.TASKS.LOSS_MULT[tmp] = 1
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['lr_tsk'] = len(cfg.TASKS.NAMES) if args.lr_tsk < 0 else args.lr_tsk
cfg.NETWORK = edict()
# Visualize the network on Tensorboard / pdf?
cfg.NETWORK.VIS_NET = False
cfg.TRAIN = edict()
cfg.TRAIN.SCALE = (512, 512)
cfg.TRAIN.MOMENTUM = 0.9
cfg.TRAIN.TENS_VIS = True
cfg.TRAIN.TENS_VIS_INTER = 1000
cfg.TRAIN.TEMP_LOSS_INTER = 1000
cfg.TEST = edict()
# See evolution of the test set when training?
cfg.TEST.USE_TEST = True
cfg.TEST.TEST_INTER = 10
cfg.TEST.SCALE = (512, 512)
cfg.SEED = 0
cfg.EVALUATE = True
cfg.DEBUG = False
cfg['overfit'] = args.overfit
if cfg['overfit']:
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'])
cfg['exp_name'] = 'test'
cfg['save_dir'] = os.path.join(cfg['save_dir_root'], cfg['exp_name'])
return cfg
def check_downloaded(p):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
def _create_url(name):
return 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/astmt/{}.tgz'.format(name)
_model_urls = {
'nyud_resnet_edge_semseg_normals_depth_'
'arch-se_res50_pretr-imagenet_trBatch-8_lr-0.001_epochs-200_trNorm_seenc_sedec_adapt_dscr-fconv_lr_dscr'
'-10_dscr_w-0.01_dscrd-2_dscrk-3_edge_w-0.8_200',
}
ans = False
_check = p['exp_folder_name'] + '_' + p['tasks_name'] + '_' + p['exp_name'] + '_' + str(p['resume_epoch'])
_fpath = os.path.join(Path.exp_dir(), _check + '.tgz')
if _check in _model_urls:
if not os.path.isfile(os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')):
urllib.request.urlretrieve(_create_url(_check), _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.exp_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
ans = True
return ans
def get_net_resnet(p):
"""
Define the network (standard Deeplab ResNet101) and the trainable parameters
"""
if p['arch'] == 'se_res26':
network = se_resnet_multitask.se_resnet26
elif p['arch'] == 'se_res50':
network = se_resnet_multitask.se_resnet50
elif p['arch'] == 'se_res101':
network = se_resnet_multitask.se_resnet101
else:
raise NotImplementedError('ResNet: Choose between among se_res26, se_res50, and se_res101')
print('Creating ResNet model: {}'.format(p.NETWORK))
net = network(tasks=p.TASKS.NAMES, n_classes=p.TASKS.NUM_OUTPUT, pretrained=p['pretr'], classifier=p['cls'],
output_stride=p['stride'], train_norm_layers=p['trNorm'], width_decoder=p['dec_w'],
squeeze_enc=p['seenc'], squeeze_dec=p['sedec'], adapters=p['adapters'],
norm_per_task=p['norm_per_task'], dscr_type=p['dscr_type'], dscr_d=p['dscrd'], dscr_k=p['dscrk'])
if p['resume_epoch'] != 0:
print("Initializing weights from: {}".format(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')))
state_dict_checkpoint = torch.load(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')
, map_location=lambda storage, loc: storage)
net.load_state_dict(state_dict_checkpoint)
return net
def get_train_params(net, p):
train_params = [{'params': se_resnet_multitask.get_lr_params(net, part='backbone', tasks=p.TASKS.NAMES),
'lr': p['lr']},
{'params': se_resnet_multitask.get_lr_params(net, part='decoder', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_dec']},
{'params': se_resnet_multitask.get_lr_params(net, part='task_specific', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_tsk']}]
if p['dscr_type'] is not None:
train_params.append(
{'params': se_resnet_multitask.get_lr_params(net, part='discriminator', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_dscr']})
return train_params
def get_exp_name(args):
"""
Creates the name experiment from the configuration file and the arguments
"""
task_dict = {
'do_edge': 0,
'do_semseg': 0,
'do_normals': 0,
'do_depth': 0
}
name_dict = {
'arch': None,
'pretr': None,
'trBatch': None,
'lr': None,
'wd': 1e-04,
'epochs': None,
'cls': 'atrous-v3',
'stride': 16,
'trNorm': False,
'dec_w': 64,
'seenc': False,
'sedec': False,
'adapt': False,
'dscr': None,
'lr_dscr': 1,
'dscr_w': ('dscr', None),
'dscrd': ('dscr', None),
'dscrk': ('dscr', None),
'edge_w': ('do_edge', None),
'lr_dec': 1,
'lr_tsk': -1,
}
# Experiment folder (task) string
task_args = [x.replace('do_', '') for x in task_dict if getattr(args, x) != task_dict[x]]
# Experiment name string
name_args = []
for x in name_dict:
# Check dependencies in tuple
if isinstance(name_dict[x], tuple):
elem = name_dict if name_dict[x][0] in name_dict else task_dict
if elem[name_dict[x][0]] == getattr(args, name_dict[x][0]):
continue
if getattr(args, x) != name_dict[x]:
tmp = getattr(args, x)
if isinstance(tmp, list):
tmp = "_".join([str(x) for x in tmp])
else:
tmp = str(tmp)
name_args.append(x + '-' + tmp)
name_args = [x.replace('-True', '') for x in name_args]
return task_args, name_args
|
astmt-master
|
experiments/dense_predict/nyud_resnet/config.py
|
MAX_N_IMAGES_PER_GPU = {
'se_res26-8': 10,
'se_res26-16': 16,
'se_res50-8': 8,
'se_res50-16': 16,
'se_res101-8': 2,
'se_res101-16': 10,
}
|
astmt-master
|
experiments/dense_predict/nyud_resnet/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import socket
import timeit
import cv2
from datetime import datetime
import imageio
import scipy.io as sio
import numpy as np
# PyTorch includes
import torch
import torch.optim as optim
from torch.nn.functional import interpolate
# Custom includes
from fblib.util.helpers import generate_param_report
from fblib.util.dense_predict.utils import lr_poly
from experiments.dense_predict import common_configs
from fblib.util.mtl_tools.multitask_visualizer import TBVisualizer, visualize_network
from fblib.util.model_resources.flops import compute_gflops
from fblib.util.model_resources.num_parameters import count_parameters
from fblib.util.dense_predict.utils import AverageMeter
# Custom optimizer
from fblib.util.optimizer_mtl.select_used_modules import make_closure
# Configuration file
from experiments.dense_predict.nyud_resnet import config as config
# Tensorboard include
from tensorboardX import SummaryWriter
def main():
p = config.create_config()
gpu_id = 0
device = torch.device("cuda:" + str(gpu_id) if torch.cuda.is_available() else "cpu")
p.TEST.BATCH_SIZE = 32
# Setting parameters
n_epochs = p['epochs']
print("Total training epochs: {}".format(n_epochs))
print(p)
print('Training on {}'.format(p['train_db_name']))
snapshot = 10 # Store a model every snapshot epochs
test_interval = p.TEST.TEST_INTER # Run on test set every test_interval epochs
torch.manual_seed(p.SEED)
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
if not os.path.exists(os.path.join(p['save_dir'], 'models')):
if p['resume_epoch'] == 0:
os.makedirs(os.path.join(p['save_dir'], 'models'))
else:
if not config.check_downloaded(p):
print('Folder does not exist.No checkpoint to resume from. Exiting')
exit(1)
net = config.get_net_resnet(p)
# Visualize the network
if p.NETWORK.VIS_NET:
visualize_network(net, p)
gflops = compute_gflops(net, in_shape=(p['trBatch'], 3, p.TRAIN.SCALE[0], p.TRAIN.SCALE[1]),
tasks=p.TASKS.NAMES[0])
print('GFLOPS per task: {}'.format(gflops / p['trBatch']))
print('\nNumber of parameters (in millions): {0:.3f}'.format(count_parameters(net) / 1e6))
print('Number of parameters (in millions) for decoder: {0:.3f}\n'.format(count_parameters(net.decoder) / 1e6))
net.to(device)
if p['resume_epoch'] != n_epochs:
criteria_tr = {}
criteria_ts = {}
running_loss_tr = {task: 0. for task in p.TASKS.NAMES}
running_loss_ts = {task: 0. for task in p.TASKS.NAMES}
curr_loss_task = {task: 0. for task in p.TASKS.NAMES}
counter_tr = {task: 0 for task in p.TASKS.NAMES}
counter_ts = {task: 0 for task in p.TASKS.NAMES}
# Discriminator loss variables for logging
running_loss_tr_dscr = 0
running_loss_ts_dscr = 0
# Logging into Tensorboard
log_dir = os.path.join(p['save_dir'], 'models',
datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
writer = SummaryWriter(log_dir=log_dir)
# Training parameters and their optimizer
train_params = config.get_train_params(net, p)
optimizer = optim.SGD(train_params, lr=p['lr'], momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
for task in p.TASKS.NAMES:
# Losses
criteria_tr[task] = config.get_loss(p, task)
criteria_ts[task] = config.get_loss(p, task)
criteria_tr[task].to(device)
criteria_ts[task].to(device)
# Preparation of the data loaders
transforms_tr, transforms_ts, _ = config.get_transformations(p)
trainloader = config.get_train_loader(p, db_name=p['train_db_name'], transforms=transforms_tr)
testloader = config.get_test_loader(p, db_name=p['test_db_name'], transforms=transforms_ts)
# TensorBoard Image Visualizer
tb_vizualizer = TBVisualizer(tasks=p.TASKS.NAMES, min_ranges=p.TASKS.TB_MIN, max_ranges=p.TASKS.TB_MAX,
batch_size=p['trBatch'])
generate_param_report(os.path.join(p['save_dir'], exp_name + '.txt'), p)
# Train variables
num_img_tr = len(trainloader)
num_img_ts = len(testloader)
print("Training Network")
# Main Training and Testing Loop
for epoch in range(p['resume_epoch'], n_epochs):
top1_dscr = AverageMeter()
start_time = timeit.default_timer()
# One training epoch
net.train()
alpha = 2. / (1. + np.exp(-10 * ((epoch + 1) / n_epochs))) - 1 # Ganin et al for gradient reversal
if p['dscr_type'] is not None:
print('Value of alpha: {}'.format(alpha))
for ii, sample in enumerate(trainloader):
curr_loss_dscr = 0
# Grab the input
inputs = sample['image']
inputs.requires_grad_()
inputs = inputs.to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# Forward pass
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
loss_tasks = losses_tasks[task]
running_loss_tr[task] += losses_tasks[task].item()
curr_loss_task[task] = losses_tasks[task].item()
counter_tr[task] += 1
# Store output for logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
# measure loss, accuracy and record accuracy for discriminator
loss_dscr = losses_dscr[task]
running_loss_tr_dscr += losses_dscr[task].item()
curr_loss_dscr += loss_dscr.item()
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
loss = (1 - p['dscr_w']) * loss_tasks + p['dscr_w'] * loss_dscr
else:
loss = loss_tasks
# Backward pass inside make_closure to update only weights that were used during fw pass
optimizer.zero_grad()
optimizer.step(closure=make_closure(loss=loss, net=net))
# Print stuff and log epoch loss into Tensorboard
if ii % num_img_tr == num_img_tr - 1:
print('[Epoch: %d, numImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_tr[task] = running_loss_tr[task] / counter_tr[task]
writer.add_scalar('data/total_loss_epoch' + task,
running_loss_tr[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Loss %s: %f' % (task, running_loss_tr[task]))
running_loss_tr[task] = 0
counter_tr[task] = 0
if p['dscr_type'] is not None:
running_loss_tr_dscr = running_loss_tr_dscr / num_img_tr / len(p.TASKS.NAMES)
writer.add_scalar('data/total_loss_epoch_dscr', running_loss_tr_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_tr_dscr)
print('Train Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
writer.add_scalar('data/train_accuracy_dscr', top1_dscr.avg, epoch)
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log current train loss into Tensorboard
for task in p.TASKS.NAMES:
writer.add_scalar('data/train_loss_iter_' + task, curr_loss_task[task], ii + num_img_tr * epoch)
curr_loss_task[task] = 0.
if p['dscr_type'] is not None:
writer.add_scalar('data/train_loss_dscr_iter', curr_loss_dscr, ii + num_img_tr * epoch)
curr_loss_dscr = 0.
# Log train images to Tensorboard
if p['overfit'] and p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='train')
if ii % num_img_tr == num_img_tr - 1:
lr_ = lr_poly(p['lr'], iter_=epoch, max_iter=n_epochs)
print('(poly lr policy) learning rate: {0:.6f}'.format(lr_))
train_params = config.get_train_params(net, p)
optimizer = optim.SGD(train_params, lr=lr_, momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
optimizer.zero_grad()
# Save the model
if (epoch % snapshot) == snapshot - 1 and epoch != 0:
torch.save(net.state_dict(), os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(epoch) + '.pth'))
# One testing epoch
if p.TEST.USE_TEST and epoch % test_interval == (test_interval - 1):
print('Testing Phase')
top1_dscr = AverageMeter()
net.eval()
start_time = timeit.default_timer()
for ii, sample in enumerate(testloader):
inputs = sample['image'].to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# Forward pass of the mini-batch
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
running_loss_ts[task] += losses_tasks[task].item()
counter_ts[task] += 1
# For logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
running_loss_ts_dscr += losses_dscr[task].item()
# measure accuracy and record loss for discriminator
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
# Print stuff
if ii % num_img_ts == num_img_ts - 1:
print('[Epoch: %d, numTestImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_ts[task] = running_loss_ts[task] / counter_ts[task]
writer.add_scalar('data/test_loss_' + task + '_epoch',
running_loss_ts[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Testing Loss %s: %f' % (task, running_loss_ts[task]))
running_loss_ts[task] = 0
counter_ts[task] = 0
# Free the graph
losses_tasks = {}
if p['dscr_type'] is not None:
running_loss_ts_dscr = running_loss_ts_dscr / num_img_ts / len(p.TASKS.NAMES)
writer.add_scalar('data/test_loss_dscr', running_loss_ts_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_ts_dscr)
writer.add_scalar('data/test_accuracy_dscr', top1_dscr.avg, epoch)
print('Test Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
# Free the graph
losses_dscr = {}
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log test images to Tensorboard
if p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='test')
writer.close()
# Generate Results
net.eval()
_, _, transforms_infer = config.get_transformations(p)
for db_name in p['infer_db_names']:
testloader = config.get_test_loader(p, db_name=db_name, transforms=transforms_infer, infer=True)
save_dir_res = os.path.join(p['save_dir'], 'Results_' + db_name)
print('Testing Network')
# Main Testing Loop
with torch.no_grad():
for ii, sample in enumerate(testloader):
img, meta = sample['image'], sample['meta']
# Forward pass of the mini-batch
inputs = img.to(device)
tasks = net.tasks
for task in tasks:
output, _ = net.forward(inputs, task=task)
save_dir_task = os.path.join(save_dir_res, task)
if not os.path.exists(save_dir_task):
os.makedirs(save_dir_task)
output = interpolate(output, size=(inputs.size()[-2], inputs.size()[-1]),
mode='bilinear', align_corners=False)
output = common_configs.get_output(output, task)
for jj in range(int(inputs.size()[0])):
if len(sample[task][jj].unique()) == 1 and sample[task][jj].unique() == 255:
continue
# Parameters
fname = meta['image'][jj]
result = cv2.resize(output[jj], dsize=(meta['im_size'][1][jj], meta['im_size'][0][jj]),
interpolation=p.TASKS.INFER_FLAGVALS[task])
if task == 'depth':
sio.savemat(os.path.join(save_dir_task, fname + '.mat'), {'depth': result})
else:
imageio.imwrite(os.path.join(save_dir_task, fname + '.png'), result.astype(np.uint8))
if p.EVALUATE:
common_configs.eval_all_results(p)
if __name__ == '__main__':
main()
|
astmt-master
|
experiments/dense_predict/nyud_resnet/main.py
|
import os
PROJECT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
astmt-master
|
fblib/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
class AttentionModuleFree(nn.Module):
"""
Attention Module
"""
def __init__(self, input_size, offset=0.):
super(AttentionModuleFree, self).__init__()
# randomly initialize parameters
self.weight = nn.Parameter(torch.rand(1, input_size, 1, 1) + offset)
def forward(self, x):
return torch.mul(self.weight, x)
class AttentionModule(AttentionModuleFree):
"""
AttentionModuleFree with restricted real-valued parameters within range [0, 1]
"""
def __init__(self, input_size):
super(AttentionModule, self).__init__(input_size, offset=10)
# randomly initialize the parameters
self.sigm = nn.Sigmoid()
def forward(self, x):
return torch.mul(self.sigm(self.weight), x)
class Conv2dAttentionAdapters(nn.Module):
"""
2D convolution followed by optional per-task transformation. The transformation can include the following:
- Residual adapters (in parallel)
- Attention modules (per-task feature multiplications) with gating, which can be binary or real-valued
During forward pass, except for the input tensor, the index of the task is required
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
n_tasks=1,
adapters=False,
attention=False,
bn_per_task=False,
binary_attention=False):
super(Conv2dAttentionAdapters, self).__init__()
self.adapters = adapters
self.attention = attention
self.bn_per_task = bn_per_task and (self.adapters or self.attention)
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
groups=groups, bias=bias)
if self.attention:
print('Constructing attention modules.')
if binary_attention:
print('Binary attention!')
att_module = AttentionModuleFree
else:
att_module = AttentionModule
self.attend = nn.ModuleList([att_module(out_channels) for i in range(n_tasks)])
if self.adapters:
print('Constructing parallel residual adapters.')
self.adapt = nn.ModuleList([
nn.Conv2d(in_channels, out_channels, stride=stride, kernel_size=1, bias=False)for i in range(n_tasks)])
if self.bn_per_task:
print('Constructing per task batchnorm layers')
self.bn = nn.ModuleList([nn.BatchNorm2d(out_channels) for i in range(n_tasks)])
else:
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x, task=None):
if self.adapters:
adapt = self.adapt[task](x)
x = self.conv(x)
if self.attention:
# print('Attend, task {}'.format(task))
x = self.attend[task](x)
if self.adapters:
# print('adapt, task {}'.format(task))
x += adapt
if self.bn_per_task:
# print('Bnorm, task {}'.format(task))
x = self.bn[task](x)
else:
x = self.bn(x)
return x
class XPathLayer(nn.Module):
"""
Create per task ResNeXt path
"""
def __init__(self,
in_channels,
interm_channels,
out_channels,
stride,
n_tasks):
super(XPathLayer, self).__init__()
self.conv_reduce = nn.ModuleList([nn.Conv2d(in_channels=in_channels,
out_channels=interm_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False) for i in range(n_tasks)])
self.bn_reduce = nn.ModuleList([nn.BatchNorm2d(interm_channels) for i in range(n_tasks)])
self.conv = nn.ModuleList([nn.Conv2d(in_channels=interm_channels,
out_channels=interm_channels,
kernel_size=3,
stride=stride,
padding=1) for i in range(n_tasks)])
self.bn = nn.ModuleList([nn.BatchNorm2d(interm_channels) for i in range(n_tasks)])
self.conv_expand = nn.ModuleList([nn.Conv2d(in_channels=interm_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False) for i in range(n_tasks)])
self.bn_expand = nn.ModuleList([nn.BatchNorm2d(out_channels) for i in range(n_tasks)])
def forward(self, x, task=None):
if task is None:
raise NotImplementedError('XPathLayer: Task not given at forward pass')
# Reduce
x = self.conv_reduce[task](x)
x = self.bn_reduce[task](x)
x = F.relu(x, inplace=True)
# Process
x = self.conv[task](x)
x = self.bn[task](x)
x = F.relu(x, inplace=True)
# Expand
x = self.conv_expand[task](x)
x = self.bn_expand[task](x)
return x
|
astmt-master
|
fblib/layers/attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
class Normalize(object):
"""Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for i in range(len(self.mean)):
tensor[:, i, :, :].sub_(self.mean[i]).div_(self.std[i])
return tensor
class ImageFeatures(nn.Module):
"""
Forward pass of an image on a pre-trained imagenet model.
Resurns output and features of the forward pass.
"""
def __init__(self, net, mean=None, std=None):
super(ImageFeatures, self).__init__()
if not mean:
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
self.normalize = Normalize(mean=mean, std=std)
self.net = net
def forward(self, x):
x = (x - x.min()) / (x.max() - x.min())
x = F.interpolate(x, size=(224, 224), mode='bilinear', align_corners=False)
x = self.normalize(x)
out, features = self.net(x)
return out, features
def main():
import os
import torch
import pickle
import cv2
import numpy as np
import urllib.request
from fblib import PROJECT_ROOT_DIR
from fblib.networks.classification.resnet import resnet101
classes = pickle.load(urllib.request.urlopen(
'https://gist.githubusercontent.com/yrevar/6135f1bd8dcf2e0cc683/raw/d133d61a09d7e5a3b36b8c111a8dd5c4b5d560ee'
'/imagenet1000_clsid_to_human.pkl'))
model = resnet101(pretrained=True, features=True)
model = ImageFeatures(model)
img = cv2.imread(os.path.join(PROJECT_ROOT_DIR, 'util/img/cat.jpg')).astype(np.float32)
img = img[:, :, :, np.newaxis]
img = img.transpose((3, 2, 0, 1))
img = torch.from_numpy(img.astype(np.float32))
model = model.eval()
with torch.no_grad():
output, features = model(img)
output = torch.nn.functional.softmax(output, dim=1)
print(output.max())
print(output.argmax())
print(classes[np.asscalar(output.argmax().numpy())])
if __name__ == '__main__':
main()
|
astmt-master
|
fblib/layers/image_features.py
|
astmt-master
|
fblib/layers/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from torch.autograd import Function
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None
|
astmt-master
|
fblib/layers/reverse_grad.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
import numpy as np
class SoftMaxwithLoss(Module):
"""
This function returns cross entropy loss for semantic segmentation
"""
def __init__(self):
super(SoftMaxwithLoss, self).__init__()
self.softmax = nn.LogSoftmax(dim=1)
self.criterion = nn.NLLLoss(ignore_index=255)
def forward(self, out, label):
assert not label.requires_grad
# out shape batch_size x channels x h x w
# label shape batch_size x 1 x h x w
label = label[:, 0, :, :].long()
loss = self.criterion(self.softmax(out), label)
return loss
class BalancedCrossEntropyLoss(Module):
"""
Balanced Cross Entropy Loss with optional ignore regions
"""
def __init__(self, size_average=True, batch_average=True, pos_weight=None):
super(BalancedCrossEntropyLoss, self).__init__()
self.size_average = size_average
self.batch_average = batch_average
self.pos_weight = pos_weight
def forward(self, output, label, void_pixels=None):
assert (output.size() == label.size())
labels = torch.ge(label, 0.5).float()
# Weighting of the loss, default is HED-style
if self.pos_weight is None:
num_labels_pos = torch.sum(labels)
num_labels_neg = torch.sum(1.0 - labels)
num_total = num_labels_pos + num_labels_neg
w = num_labels_neg / num_total
else:
w = self.pos_weight
output_gt_zero = torch.ge(output, 0).float()
loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))
loss_pos_pix = -torch.mul(labels, loss_val)
loss_neg_pix = -torch.mul(1.0 - labels, loss_val)
if void_pixels is not None and not self.pos_weight:
w_void = torch.le(void_pixels, 0.5).float()
loss_pos_pix = torch.mul(w_void, loss_pos_pix)
loss_neg_pix = torch.mul(w_void, loss_neg_pix)
num_total = num_total - torch.ge(void_pixels, 0.5).float().sum()
w = num_labels_neg / num_total
loss_pos = torch.sum(loss_pos_pix)
loss_neg = torch.sum(loss_neg_pix)
final_loss = w * loss_pos + (1 - w) * loss_neg
if self.size_average:
final_loss /= float(np.prod(label.size()))
elif self.batch_average:
final_loss /= label.size()[0]
return final_loss
class BinaryCrossEntropyLoss(Module):
"""
Binary Cross Entropy with ignore regions, not balanced.
"""
def __init__(self, size_average=True, batch_average=True):
super(BinaryCrossEntropyLoss, self).__init__()
self.size_average = size_average
self.batch_average = batch_average
def forward(self, output, label, void_pixels=None):
assert (output.size() == label.size())
labels = torch.ge(label, 0.5).float()
output_gt_zero = torch.ge(output, 0).float()
loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))
loss_pos_pix = -torch.mul(labels, loss_val)
loss_neg_pix = -torch.mul(1.0 - labels, loss_val)
if void_pixels is not None:
w_void = torch.le(void_pixels, 0.5).float()
loss_pos_pix = torch.mul(w_void, loss_pos_pix)
loss_neg_pix = torch.mul(w_void, loss_neg_pix)
loss_pos = torch.sum(loss_pos_pix)
loss_neg = torch.sum(loss_neg_pix)
final_loss = loss_pos + loss_neg
if self.size_average:
final_loss /= float(np.prod(label.size()))
elif self.batch_average:
final_loss /= label.size()[0]
return final_loss
class ImGrad(nn.Module):
"""
Compute the spatial gradients of input with Sobel filter, in order to penalize gradient mismatch.
Used for depth prediction
"""
def __init__(self):
super(ImGrad, self).__init__()
self.convx = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.convy = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False)
fx = np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]])
fy = np.array([[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]])
weight_x = torch.from_numpy(fx).float().unsqueeze(0).unsqueeze(0)
weight_y = torch.from_numpy(fy).float().unsqueeze(0).unsqueeze(0)
self.convx.weight.data = weight_x
self.convy.weight.data = weight_y
for p in self.parameters():
p.requires_grad = False
def forward(self, x):
grad_x = self.convx(x)
grad_y = self.convy(x)
return grad_x, grad_y
class GradLoss(nn.Module):
"""
Compute gradient loss using ImGrad
"""
def __init__(self, ignore_label=255):
super(GradLoss, self).__init__()
self.imgrad = ImGrad()
self.ignore_label = ignore_label
def forward(self, out, label):
if self.ignore_label:
n_valid = torch.sum(label != self.ignore_label).item()
label[label == self.ignore_label] = 0
out_grad_x, out_grad_y = self.imgrad(out)
label_grad_x, label_grad_y = self.imgrad(label)
out_grad = torch.cat((out_grad_y, out_grad_x), dim=1)
label_grad = torch.cat((label_grad_y, label_grad_x), dim=1)
# L1 norm
loss = torch.abs(out_grad - label_grad)
if self.ignore_label:
loss = torch.sum(loss) / n_valid
else:
loss = torch.mean(loss)
return loss
class RMSE_log(nn.Module):
def __init__(self, ignore_label=255):
super(RMSE_log, self).__init__()
self.ignore_label = ignore_label
def forward(self, out, label):
out[out <= 0] = 1e-6
log_mse = (torch.log(label) - torch.log(out)) ** 2
# Only inside valid pixels
if self.ignore_label:
n_valid = torch.sum(label != self.ignore_label).item()
log_mse[label == self.ignore_label] = 0
log_mse = torch.sum(log_mse) / n_valid
else:
log_mse = torch.mean(log_mse)
loss = torch.sqrt(log_mse)
return loss
class L1loss(nn.Module):
"""
L1 loss with ignore labels
"""
def __init__(self, ignore_label=255):
super(L1loss, self).__init__()
self.loss_func = F.l1_loss
self.ignore_label = ignore_label
def forward(self, out, label):
if self.ignore_label:
n_valid = torch.sum(label != self.ignore_label).item()
loss = torch.abs(out - label)
loss[label == self.ignore_label] = 0
loss = loss.sum()
if self.ignore_label:
loss.div_(max(n_valid, 1e-6))
else:
loss.div(float(np.prod(label.size())))
return loss
class DepthLoss(nn.Module):
"""
Loss for depth prediction. Combination of L1 loss and Gradient loss
"""
def __init__(self):
super(DepthLoss, self).__init__()
self.diff_loss = L1loss(ignore_label=255)
self.grad_loss = GradLoss(ignore_label=255)
def forward(self, out, label):
loss_diff = self.diff_loss(out, label)
loss_grad = self.grad_loss(out, label)
loss = loss_diff + loss_grad
return loss
def normal_ize(bottom, dim=1):
qn = torch.norm(bottom, p=2, dim=dim).unsqueeze(dim=dim) + 1e-12
return bottom.div(qn)
class Normalize(nn.Module):
def __init__(self):
super(Normalize, self).__init__()
def forward(self, bottom):
qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12
top = bottom.div(qn)
return top
class NormalsLoss(Module):
"""
L1 loss with ignore labels
normalize: normalization for surface normals
"""
def __init__(self, size_average=True, normalize=False, norm=1):
super(NormalsLoss, self).__init__()
self.size_average = size_average
if normalize:
self.normalize = Normalize()
else:
self.normalize = None
if norm == 1:
print('Using L1 loss for surface normals')
self.loss_func = F.l1_loss
elif norm == 2:
print('Using L2 loss for surface normals')
self.loss_func = F.mse_loss
else:
raise NotImplementedError
def forward(self, out, label, ignore_label=255):
assert not label.requires_grad
if ignore_label:
n_valid = torch.sum(label != ignore_label).item()
out[label == ignore_label] = 0
label[label == ignore_label] = 0
if self.normalize is not None:
out = self.normalize(out)
loss = self.loss_func(out, label, reduction='sum')
if self.size_average:
if ignore_label:
loss.div_(max(n_valid, 1e-6))
else:
loss.div_(float(np.prod(label.size())))
return loss
def normals_test():
from fblib.dataloaders.pascal_context import PASCALContext
flagvals = {'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'human_parts': cv2.INTER_NEAREST,
'normals': cv2.INTER_CUBIC}
transform = Compose([tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-90, 90), scales=(1., 1.),
flagvals=flagvals),
tr.FixedResize(resolutions={x: (512, 512) for x in flagvals},
flagvals=flagvals),
tr.AddIgnoreRegions(),
tr.ToTensor()])
dataset_human = PASCALContext(split=['train', 'val'], transform=transform, retname=True,
do_edge=True, do_human_parts=True, do_semseg=True, do_normals=True)
dataloader = torch.utils.data.DataLoader(dataset_human, batch_size=2, shuffle=False, num_workers=0)
criterion = NormalsLoss(normalize=True)
for i, sample in enumerate(dataloader):
assert (sample['normals'].size()[2:] == sample['image'].size()[2:])
loss = criterion(sample['normals'], sample['normals'])
print('Sample number: {}. Loss: {} (should be very close to 0)'.format(i, loss.item()))
def depth_test():
from fblib.dataloaders.nyud import NYUD_MT
flagvals = {'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'normals': cv2.INTER_LINEAR,
'depth': cv2.INTER_LINEAR}
transform = Compose([tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-90, 90), scales=(1., 1.),
flagvals=flagvals),
tr.FixedResize(resolutions={x: (512, 512) for x in flagvals},
flagvals=flagvals),
tr.AddIgnoreRegions(),
tr.ToTensor()])
dataset_human = NYUD_MT(split=['train', 'val'], transform=transform, retname=True,
do_edge=True, do_semseg=True, do_normals=True, do_depth=True)
dataloader = torch.utils.data.DataLoader(dataset_human, batch_size=2, shuffle=False, num_workers=0)
criterion = DepthLoss()
for i, sample in enumerate(dataloader):
loss = criterion(sample['depth'], sample['depth'])
print('Sample number: {}. Loss: {} (should be 0)'.format(i, loss.item()))
if __name__ == '__main__':
import cv2
from torchvision.transforms import Compose
import fblib.dataloaders.custom_transforms as tr
normals_test()
|
astmt-master
|
fblib/layers/loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import torch
from torch.nn import functional as F
def logit(x):
return np.log(x/(1-x+1e-08)+1e-08)
def sigmoid_np(x):
return 1/(1+np.exp(-x))
def center_crop(x, height, width):
crop_h = torch.FloatTensor([x.size()[2]]).sub(height).div(-2)
crop_w = torch.FloatTensor([x.size()[3]]).sub(width).div(-2)
# fixed indexing for PyTorch 0.4
return F.pad(x, [int(crop_w.ceil()[0]), int(crop_w.floor()[0]), int(crop_h.ceil()[0]), int(crop_h.floor()[0])])
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
def interp_surgery(lay):
"""
Set parameters s.t. deconvolutional layers compute bilinear interpolation
Only for deconvolution without groups
"""
m, k, h, w = lay.weight.data.size()
if m != k:
print('input + output channels need to be the same')
raise ValueError
if h != w:
print('filters need to be square')
raise ValueError
filt = upsample_filt(h)
for i in range(m):
lay.weight[i, i, :, :].data.copy_(torch.from_numpy(filt))
return lay.weight.data
|
astmt-master
|
fblib/layers/misc_layers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from torch import nn
from fblib.util.custom_container import SequentialMultiTask
class SELayer(nn.Module):
"""
Squeeze and Excitation Layer
"""
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class SELayerMultiTaskDict(nn.Module):
"""
Squeeze and Excitation Layer for multiple tasks (dict)
"""
def __init__(self, channel, reduction=16, tasks=None):
super(SELayerMultiTaskDict, self).__init__()
self.tasks = tasks
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if self.tasks is None:
self.fc = nn.Sequential(nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid())
else:
print('Initializing squeeze and excitation modules:')
self.fc = nn.ModuleDict()
for task in self.tasks:
print('SE for task: {}'.format(task))
self.fc[task] = SequentialMultiTask(nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid())
def forward(self, x, task=None):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
if self.tasks:
y = self.fc[task](y).view(b, c, 1, 1)
else:
y = self.fc(y).view(b, c, 1, 1)
return x * y
class ConvCoupledSE(nn.Module):
"""
SE-layer per task, coupled with convolutions and batchnorm.
Possibility to place convolutions before/after bn, deploy bn per task, and use/not use SE attention.
"""
def __init__(self, tasks,
process_layers=None,
norm=None,
norm_kwargs=None,
norm_per_task=False,
squeeze=False,
adapters=False,
se_after_relu=True,
reduction=16):
super(ConvCoupledSE, self).__init__()
self.norm_per_task = norm_per_task
self.squeeze = squeeze
self.adapters = adapters
self.se_after_relu = se_after_relu
if not isinstance(process_layers, list):
process_layers = [process_layers]
self.process = nn.Sequential(*process_layers)
se_module = SELayerMultiTaskDict
if self.squeeze:
self.se = se_module(process_layers[-1].out_channels, tasks=tasks, reduction=reduction)
if self.adapters:
print('Using parallel adapters')
self.adapt = nn.ModuleDict({task: nn.Conv2d(process_layers[-1].in_channels, process_layers[-1].out_channels,
kernel_size=1, bias=False) for task in tasks})
if self.norm_per_task:
self.norm = nn.ModuleDict({task: norm(**norm_kwargs) for task in tasks})
else:
self.norm = norm(**norm_kwargs)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, task):
if self.adapters:
x = self.process(x) + self.adapt[task](x)
else:
x = self.process(x)
if self.squeeze and not self.se_after_relu:
x = self.se(x, task)
if self.norm_per_task:
x = self.norm[task](x)
else:
x = self.norm(x)
x = self.relu(x)
if self.squeeze and self.se_after_relu:
x = self.se(x, task)
return x
|
astmt-master
|
fblib/layers/squeeze.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torch.autograd import Variable
from torchvision import models
from graphviz import Digraph
def make_dot(var, params):
""" Produces Graphviz representation of PyTorch autograd graph
Blue nodes are the Variables that require grad, orange are Tensors
saved for backward in torch.autograd.Function
Args:
var: output Variable
params: dict of (name, Variable) to add names to node that
require grad
"""
param_map = {id(v): k for k, v in params.items()}
node_attr = dict(style='filled',
shape='box',
align='left',
fontsize='12',
ranksep='0.1',
height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))
seen = set()
def size_to_str(size):
return '(' + (', ').join(['%d' % v for v in size]) + ')'
def add_nodes(var):
if var not in seen:
if torch.is_tensor(var):
dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
elif hasattr(var, 'variable'):
u = var.variable
node_name = '%s\n %s' % (param_map.get(id(u)), size_to_str(u.size()))
dot.node(str(id(var)), node_name, fillcolor='lightblue')
else:
dot.node(str(id(var)), str(type(var).__name__))
seen.add(var)
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if u[0] is not None:
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, 'saved_tensors'):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
if type(var) == list:
for ii in range(0, len(var)):
add_nodes(var[ii].grad_fn)
elif type(var) == dict:
for x in var:
add_nodes(var[x].grad_fn)
else:
add_nodes(var.grad_fn)
return dot
if __name__ == "__main__":
inputs = torch.randn(1, 3, 224, 224)
resnet18 = models.resnet18()
y = resnet18(Variable(inputs))
g = make_dot(y, resnet18.state_dict())
g.view()
|
astmt-master
|
fblib/util/pdf_visualizer.py
|
astmt-master
|
fblib/util/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import collections
import re
from torch._six import string_classes, int_classes
_use_shared_memory = False
r"""Whether to use shared memory in default_collate"""
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def collate_mil(batch):
"""
Puts each data field into a tensor with outer dimension batch size.
Custom-made for supporting MIL
"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
batch_modified = {key: collate_mil([d[key] for d in batch]) for key in batch[0] if key.find('idx') < 0}
if 'edgeidx' in batch[0]:
batch_modified['edgeidx'] = [batch[x]['edgeidx'] for x in range(len(batch))]
return batch_modified
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate_mil(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
|
astmt-master
|
fblib/util/custom_collate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import cv2
import numpy as np
# set random seed in each worker
worker_seed = lambda x: np.random.seed((torch.initial_seed()) % 2 ** 32)
def tens2image(tens):
"""Converts tensor with 2 or 3 dimensions to numpy array"""
im = tens.numpy()
if im.shape[0] == 1:
im = np.squeeze(im, axis=0)
if im.ndim == 3:
im = im.transpose((1, 2, 0))
return im
def pascal_color_map(N=256, normalized=False):
"""
Python implementation of the color map function for the PASCAL VOC data set.
Official Matlab version can be found in the PASCAL VOC devkit
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit
"""
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7 - j)
g = g | (bitget(c, 1) << 7 - j)
b = b | (bitget(c, 2) << 7 - j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap / 255 if normalized else cmap
return cmap
def fixed_resize(sample, resolution, flagval=None):
"""
Fixed resize to
resolution (tuple): resize image to size specified by tuple eg. (512, 512).
resolution (int): bring smaller side to resolution eg. image of shape 321 x 481 -> 512 x 767
"""
if flagval is None:
if ((sample == 0) | (sample == 1)).all():
flagval = cv2.INTER_NEAREST
else:
flagval = cv2.INTER_CUBIC
if isinstance(resolution, int):
tmp = [resolution, resolution]
tmp[int(np.argmax(sample.shape[:2]))] = int(
round(float(resolution) / np.min(sample.shape[:2]) * np.max(sample.shape[:2])))
resolution = tuple(tmp)
if sample.ndim == 2 or (sample.ndim == 3 and sample.shape[2] == 3):
sample = cv2.resize(sample, resolution[::-1], interpolation=flagval)
else:
tmp = sample
sample = np.zeros(np.append(resolution, tmp.shape[2]), dtype=np.float32)
for ii in range(sample.shape[2]):
sample[:, :, ii] = cv2.resize(tmp[:, :, ii], resolution[::-1], interpolation=flagval)
return sample
def im_normalize(im, max_value=1):
"""
Normalize image to range 0 - max_value
"""
imn = max_value * (im - im.min()) / max((im.max() - im.min()), 1e-8)
return imn
def generate_param_report(logfile, param):
log_file = open(logfile, 'w')
for key, val in param.items():
log_file.write(key + ':' + str(val) + '\n')
log_file.close()
def ind2sub(array_shape, inds):
rows, cols = [], []
for k in range(len(inds)):
if inds[k] == 0:
continue
cols.append((inds[k].astype('int') // array_shape[1]))
rows.append((inds[k].astype('int') % array_shape[1]))
return rows, cols
def main():
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from fblib.dataloaders.bsds import BSDS500
from fblib.dataloaders.custom_transforms import ToTensor
db = BSDS500(transform=ToTensor())
dataloader = DataLoader(db, batch_size=1)
for i, sample in enumerate(dataloader):
img = sample['image']
plt.imshow(im_normalize(fixed_resize(tens2image(img), resolution=512)))
plt.show()
if __name__ == "__main__":
main()
|
astmt-master
|
fblib/util/helpers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from collections import OrderedDict
from torch.nn.modules.container import Sequential
class SequentialMultiTask(Sequential):
"""A sequential container for multiple tasks.
Forward pass re-written to incorporate multiple tasks
"""
def __init__(self, *args):
super(SequentialMultiTask, self).__init__(*args)
def __getitem__(self, idx):
if isinstance(idx, slice):
return SequentialMultiTask(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def forward(self, input, task=None):
for module in self._modules.values():
if task is None:
input = module(input)
else:
input = module(input, task)
return input
|
astmt-master
|
fblib/util/custom_container.py
|
astmt-master
|
fblib/util/classification/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import time
import random
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def time_string():
ISOTIMEFORMAT = '%Y-%m-%d %X'
string = '[{}]'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
return string
def convert_secs2time(epoch_time):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600 * need_hour) / 60)
need_secs = int(epoch_time - 3600 * need_hour - 60 * need_mins)
return need_hour, need_mins, need_secs
def time_file_str():
ISOTIMEFORMAT = '%Y-%m-%d'
string = '{}'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
return string + '-{}'.format(random.randint(1, 10000))
|
astmt-master
|
fblib/util/classification/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torchvision import utils as vutils
import fblib.util.pdf_visualizer as viz
from fblib.util.mypath import Path
def visualize_network(net, p):
net.eval()
x = torch.randn(1, 3, 512, 512)
x.requires_grad_()
# pdf visualizer
y = {}
for task in p.TASKS.NAMES:
y[task], _ = net.forward(x, task)
g = viz.make_dot(y, net.state_dict())
g.view(directory=Path.save_root_dir())
class TBVisualizer(object):
def __init__(self, tasks, min_ranges, max_ranges, batch_size):
# Visualization settings
self.grid_input = {
'image': {
'range': (0, 255),
'normalize': True,
'scale_each': True,
'nrow': batch_size
}}
self.grid_output = {}
for task in tasks:
min_range = min_ranges[task]
max_range = max_ranges[task]
self.grid_input[task] = {
'range': (min_range, max_range),
'normalize': True,
'scale_each': True,
'nrow': batch_size
}
self.grid_output[task+'_pred'] = {
'range': (min_range, max_range),
'normalize': True,
'scale_each': True,
'nrow': batch_size
}
def visualize_images_tb(self, writer, sample, outputs, global_step, tag, phase='train'):
"""Vizualize images into Tensorboard
writer: Tensorboardx summary writer
sample: dataloader sample that contains a dict of tensors, aka images and groundtruths
grid_input: see function get_visualizer()
grid_output: see function get_visualizer()
global_step: global iteration num
tag: current iteration num to tag on tensorboard
phase: 'train' or 'test
"""
for k in list(self.grid_input.keys()):
if k in sample.keys():
elem = sample[k].detach()
if k in {'normals', 'depth'}:
elem[elem == 255] = 0
img_grid = vutils.make_grid(elem, **self.grid_input[k])
writer.add_image(f'{k}_gt/{phase}_{tag}', img_grid, global_step)
for k in list(outputs.keys()):
if (k + '_pred') in self.grid_output.keys():
output = outputs[k].detach()
if k == 'normals':
elem = self._normalize(output)
elif k in {'depth', 'albedo'}:
elem = output
elif output.size()[1] == 1:
elem = 1 / (1 + torch.exp(-output))
else:
_, argmax_pred = torch.max(output, dim=1)
argmax_pred = argmax_pred.type(torch.FloatTensor)
elem = torch.unsqueeze(argmax_pred, 1)
img_grid = vutils.make_grid(elem, **self.grid_output[k + '_pred'])
writer.add_image(f'{k}_pred/{phase}_{tag}', img_grid, global_step)
@staticmethod
def _normalize(bottom):
qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12
return bottom.div(qn)
|
astmt-master
|
fblib/util/mtl_tools/multitask_visualizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def imagenet_categ_names():
return { 0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
10: 'brambling, Fringilla montifringilla',
11: 'goldfinch, Carduelis carduelis',
12: 'house finch, linnet, Carpodacus mexicanus',
13: 'junco, snowbird',
14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
15: 'robin, American robin, Turdus migratorius',
16: 'bulbul',
17: 'jay',
18: 'magpie',
19: 'chickadee',
20: 'water ouzel, dipper',
21: 'kite',
22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
23: 'vulture',
24: 'great grey owl, great gray owl, Strix nebulosa',
25: 'European fire salamander, Salamandra salamandra',
26: 'common newt, Triturus vulgaris',
27: 'eft',
28: 'spotted salamander, Ambystoma maculatum',
29: 'axolotl, mud puppy, Ambystoma mexicanum',
30: 'bullfrog, Rana catesbeiana',
31: 'tree frog, tree-frog',
32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
33: 'loggerhead, loggerhead turtle, Caretta caretta',
34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
35: 'mud turtle',
36: 'terrapin',
37: 'box turtle, box tortoise',
38: 'banded gecko',
39: 'common iguana, iguana, Iguana iguana',
40: 'American chameleon, anole, Anolis carolinensis',
41: 'whiptail, whiptail lizard',
42: 'agama',
43: 'frilled lizard, Chlamydosaurus kingi',
44: 'alligator lizard',
45: 'Gila monster, Heloderma suspectum',
46: 'green lizard, Lacerta viridis',
47: 'African chameleon, Chamaeleo chamaeleon',
48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
50: 'American alligator, Alligator mississipiensis',
51: 'triceratops',
52: 'thunder snake, worm snake, Carphophis amoenus',
53: 'ringneck snake, ring-necked snake, ring snake',
54: 'hognose snake, puff adder, sand viper',
55: 'green snake, grass snake',
56: 'king snake, kingsnake',
57: 'garter snake, grass snake',
58: 'water snake',
59: 'vine snake',
60: 'night snake, Hypsiglena torquata',
61: 'boa constrictor, Constrictor constrictor',
62: 'rock python, rock snake, Python sebae',
63: 'Indian cobra, Naja naja',
64: 'green mamba',
65: 'sea snake',
66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
69: 'trilobite',
70: 'harvestman, daddy longlegs, Phalangium opilio',
71: 'scorpion',
72: 'black and gold garden spider, Argiope aurantia',
73: 'barn spider, Araneus cavaticus',
74: 'garden spider, Aranea diademata',
75: 'black widow, Latrodectus mactans',
76: 'tarantula',
77: 'wolf spider, hunting spider',
78: 'tick',
79: 'centipede',
80: 'black grouse',
81: 'ptarmigan',
82: 'ruffed grouse, partridge, Bonasa umbellus',
83: 'prairie chicken, prairie grouse, prairie fowl',
84: 'peacock',
85: 'quail',
86: 'partridge',
87: 'African grey, African gray, Psittacus erithacus',
88: 'macaw',
89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
90: 'lorikeet',
91: 'coucal',
92: 'bee eater',
93: 'hornbill',
94: 'hummingbird',
95: 'jacamar',
96: 'toucan',
97: 'drake',
98: 'red-breasted merganser, Mergus serrator',
99: 'goose',
100: 'black swan, Cygnus atratus',
101: 'tusker',
102: 'echidna, spiny anteater, anteater',
103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
104: 'wallaby, brush kangaroo',
105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
106: 'wombat',
107: 'jellyfish',
108: 'sea anemone, anemone',
109: 'brain coral',
110: 'flatworm, platyhelminth',
111: 'nematode, nematode worm, roundworm',
112: 'conch',
113: 'snail',
114: 'slug',
115: 'sea slug, nudibranch',
116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
117: 'chambered nautilus, pearly nautilus, nautilus',
118: 'Dungeness crab, Cancer magister',
119: 'rock crab, Cancer irroratus',
120: 'fiddler crab',
121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
124: 'crayfish, crawfish, crawdad, crawdaddy',
125: 'hermit crab',
126: 'isopod',
127: 'white stork, Ciconia ciconia',
128: 'black stork, Ciconia nigra',
129: 'spoonbill',
130: 'flamingo',
131: 'little blue heron, Egretta caerulea',
132: 'American egret, great white heron, Egretta albus',
133: 'bittern',
134: 'crane',
135: 'limpkin, Aramus pictus',
136: 'European gallinule, Porphyrio porphyrio',
137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
138: 'bustard',
139: 'ruddy turnstone, Arenaria interpres',
140: 'red-backed sandpiper, dunlin, Erolia alpina',
141: 'redshank, Tringa totanus',
142: 'dowitcher',
143: 'oystercatcher, oyster catcher',
144: 'pelican',
145: 'king penguin, Aptenodytes patagonica',
146: 'albatross, mollymawk',
147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
149: 'dugong, Dugong dugon',
150: 'sea lion',
151: 'Chihuahua',
152: 'Japanese spaniel',
153: 'Maltese dog, Maltese terrier, Maltese',
154: 'Pekinese, Pekingese, Peke',
155: 'Shih-Tzu',
156: 'Blenheim spaniel',
157: 'papillon',
158: 'toy terrier',
159: 'Rhodesian ridgeback',
160: 'Afghan hound, Afghan',
161: 'basset, basset hound',
162: 'beagle',
163: 'bloodhound, sleuthhound',
164: 'bluetick',
165: 'black-and-tan coonhound',
166: 'Walker hound, Walker foxhound',
167: 'English foxhound',
168: 'redbone',
169: 'borzoi, Russian wolfhound',
170: 'Irish wolfhound',
171: 'Italian greyhound',
172: 'whippet',
173: 'Ibizan hound, Ibizan Podenco',
174: 'Norwegian elkhound, elkhound',
175: 'otterhound, otter hound',
176: 'Saluki, gazelle hound',
177: 'Scottish deerhound, deerhound',
178: 'Weimaraner',
179: 'Staffordshire bullterrier, Staffordshire bull terrier',
180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
181: 'Bedlington terrier',
182: 'Border terrier',
183: 'Kerry blue terrier',
184: 'Irish terrier',
185: 'Norfolk terrier',
186: 'Norwich terrier',
187: 'Yorkshire terrier',
188: 'wire-haired fox terrier',
189: 'Lakeland terrier',
190: 'Sealyham terrier, Sealyham',
191: 'Airedale, Airedale terrier',
192: 'cairn, cairn terrier',
193: 'Australian terrier',
194: 'Dandie Dinmont, Dandie Dinmont terrier',
195: 'Boston bull, Boston terrier',
196: 'miniature schnauzer',
197: 'giant schnauzer',
198: 'standard schnauzer',
199: 'Scotch terrier, Scottish terrier, Scottie',
200: 'Tibetan terrier, chrysanthemum dog',
201: 'silky terrier, Sydney silky',
202: 'soft-coated wheaten terrier',
203: 'West Highland white terrier',
204: 'Lhasa, Lhasa apso',
205: 'flat-coated retriever',
206: 'curly-coated retriever',
207: 'golden retriever',
208: 'Labrador retriever',
209: 'Chesapeake Bay retriever',
210: 'German short-haired pointer',
211: 'vizsla, Hungarian pointer',
212: 'English setter',
213: 'Irish setter, red setter',
214: 'Gordon setter',
215: 'Brittany spaniel',
216: 'clumber, clumber spaniel',
217: 'English springer, English springer spaniel',
218: 'Welsh springer spaniel',
219: 'cocker spaniel, English cocker spaniel, cocker',
220: 'Sussex spaniel',
221: 'Irish water spaniel',
222: 'kuvasz',
223: 'schipperke',
224: 'groenendael',
225: 'malinois',
226: 'briard',
227: 'kelpie',
228: 'komondor',
229: 'Old English sheepdog, bobtail',
230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
231: 'collie',
232: 'Border collie',
233: 'Bouvier des Flandres, Bouviers des Flandres',
234: 'Rottweiler',
235: 'German shepherd, German shepherd dog, German police dog, alsatian',
236: 'Doberman, Doberman pinscher',
237: 'miniature pinscher',
238: 'Greater Swiss Mountain dog',
239: 'Bernese mountain dog',
240: 'Appenzeller',
241: 'EntleBucher',
242: 'boxer',
243: 'bull mastiff',
244: 'Tibetan mastiff',
245: 'French bulldog',
246: 'Great Dane',
247: 'Saint Bernard, St Bernard',
248: 'Eskimo dog, husky',
249: 'malamute, malemute, Alaskan malamute',
250: 'Siberian husky',
251: 'dalmatian, coach dog, carriage dog',
252: 'affenpinscher, monkey pinscher, monkey dog',
253: 'basenji',
254: 'pug, pug-dog',
255: 'Leonberg',
256: 'Newfoundland, Newfoundland dog',
257: 'Great Pyrenees',
258: 'Samoyed, Samoyede',
259: 'Pomeranian',
260: 'chow, chow chow',
261: 'keeshond',
262: 'Brabancon griffon',
263: 'Pembroke, Pembroke Welsh corgi',
264: 'Cardigan, Cardigan Welsh corgi',
265: 'toy poodle',
266: 'miniature poodle',
267: 'standard poodle',
268: 'Mexican hairless',
269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
271: 'red wolf, maned wolf, Canis rufus, Canis niger',
272: 'coyote, prairie wolf, brush wolf, Canis latrans',
273: 'dingo, warrigal, warragal, Canis dingo',
274: 'dhole, Cuon alpinus',
275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
276: 'hyena, hyaena',
277: 'red fox, Vulpes vulpes',
278: 'kit fox, Vulpes macrotis',
279: 'Arctic fox, white fox, Alopex lagopus',
280: 'grey fox, gray fox, Urocyon cinereoargenteus',
281: 'tabby, tabby cat',
282: 'tiger cat',
283: 'Persian cat',
284: 'Siamese cat, Siamese',
285: 'Egyptian cat',
286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
287: 'lynx, catamount',
288: 'leopard, Panthera pardus',
289: 'snow leopard, ounce, Panthera uncia',
290: 'jaguar, panther, Panthera onca, Felis onca',
291: 'lion, king of beasts, Panthera leo',
292: 'tiger, Panthera tigris',
293: 'cheetah, chetah, Acinonyx jubatus',
294: 'brown bear, bruin, Ursus arctos',
295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
297: 'sloth bear, Melursus ursinus, Ursus ursinus',
298: 'mongoose',
299: 'meerkat, mierkat',
300: 'tiger beetle',
301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
302: 'ground beetle, carabid beetle',
303: 'long-horned beetle, longicorn, longicorn beetle',
304: 'leaf beetle, chrysomelid',
305: 'dung beetle',
306: 'rhinoceros beetle',
307: 'weevil',
308: 'fly',
309: 'bee',
310: 'ant, emmet, pismire',
311: 'grasshopper, hopper',
312: 'cricket',
313: 'walking stick, walkingstick, stick insect',
314: 'cockroach, roach',
315: 'mantis, mantid',
316: 'cicada, cicala',
317: 'leafhopper',
318: 'lacewing, lacewing fly',
319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
320: 'damselfly',
321: 'admiral',
322: 'ringlet, ringlet butterfly',
323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
324: 'cabbage butterfly',
325: 'sulphur butterfly, sulfur butterfly',
326: 'lycaenid, lycaenid butterfly',
327: 'starfish, sea star',
328: 'sea urchin',
329: 'sea cucumber, holothurian',
330: 'wood rabbit, cottontail, cottontail rabbit',
331: 'hare',
332: 'Angora, Angora rabbit',
333: 'hamster',
334: 'porcupine, hedgehog',
335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
336: 'marmot',
337: 'beaver',
338: 'guinea pig, Cavia cobaya',
339: 'sorrel',
340: 'zebra',
341: 'hog, pig, grunter, squealer, Sus scrofa',
342: 'wild boar, boar, Sus scrofa',
343: 'warthog',
344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
345: 'ox',
346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
347: 'bison',
348: 'ram, tup',
349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
350: 'ibex, Capra ibex',
351: 'hartebeest',
352: 'impala, Aepyceros melampus',
353: 'gazelle',
354: 'Arabian camel, dromedary, Camelus dromedarius',
355: 'llama',
356: 'weasel',
357: 'mink',
358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
359: 'black-footed ferret, ferret, Mustela nigripes',
360: 'otter',
361: 'skunk, polecat, wood pussy',
362: 'badger',
363: 'armadillo',
364: 'three-toed sloth, ai, Bradypus tridactylus',
365: 'orangutan, orang, orangutang, Pongo pygmaeus',
366: 'gorilla, Gorilla gorilla',
367: 'chimpanzee, chimp, Pan troglodytes',
368: 'gibbon, Hylobates lar',
369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
370: 'guenon, guenon monkey',
371: 'patas, hussar monkey, Erythrocebus patas',
372: 'baboon',
373: 'macaque',
374: 'langur',
375: 'colobus, colobus monkey',
376: 'proboscis monkey, Nasalis larvatus',
377: 'marmoset',
378: 'capuchin, ringtail, Cebus capucinus',
379: 'howler monkey, howler',
380: 'titi, titi monkey',
381: 'spider monkey, Ateles geoffroyi',
382: 'squirrel monkey, Saimiri sciureus',
383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
384: 'indri, indris, Indri indri, Indri brevicaudatus',
385: 'Indian elephant, Elephas maximus',
386: 'African elephant, Loxodonta africana',
387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
389: 'barracouta, snoek',
390: 'eel',
391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
392: 'rock beauty, Holocanthus tricolor',
393: 'anemone fish',
394: 'sturgeon',
395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
396: 'lionfish',
397: 'puffer, pufferfish, blowfish, globefish',
398: 'abacus',
399: 'abaya',
400: "academic gown, academic robe, judge's robe",
401: 'accordion, piano accordion, squeeze box',
402: 'acoustic guitar',
403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
404: 'airliner',
405: 'airship, dirigible',
406: 'altar',
407: 'ambulance',
408: 'amphibian, amphibious vehicle',
409: 'analog clock',
410: 'apiary, bee house',
411: 'apron',
412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
413: 'assault rifle, assault gun',
414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
415: 'bakery, bakeshop, bakehouse',
416: 'balance beam, beam',
417: 'balloon',
418: 'ballpoint, ballpoint pen, ballpen, Biro',
419: 'Band Aid',
420: 'banjo',
421: 'bannister, banister, balustrade, balusters, handrail',
422: 'barbell',
423: 'barber chair',
424: 'barbershop',
425: 'barn',
426: 'barometer',
427: 'barrel, cask',
428: 'barrow, garden cart, lawn cart, wheelbarrow',
429: 'baseball',
430: 'basketball',
431: 'bassinet',
432: 'bassoon',
433: 'bathing cap, swimming cap',
434: 'bath towel',
435: 'bathtub, bathing tub, bath, tub',
436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
437: 'beacon, lighthouse, beacon light, pharos',
438: 'beaker',
439: 'bearskin, busby, shako',
440: 'beer bottle',
441: 'beer glass',
442: 'bell cote, bell cot',
443: 'bib',
444: 'bicycle-built-for-two, tandem bicycle, tandem',
445: 'bikini, two-piece',
446: 'binder, ring-binder',
447: 'binoculars, field glasses, opera glasses',
448: 'birdhouse',
449: 'boathouse',
450: 'bobsled, bobsleigh, bob',
451: 'bolo tie, bolo, bola tie, bola',
452: 'bonnet, poke bonnet',
453: 'bookcase',
454: 'bookshop, bookstore, bookstall',
455: 'bottlecap',
456: 'bow',
457: 'bow tie, bow-tie, bowtie',
458: 'brass, memorial tablet, plaque',
459: 'brassiere, bra, bandeau',
460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
461: 'breastplate, aegis, egis',
462: 'broom',
463: 'bucket, pail',
464: 'buckle',
465: 'bulletproof vest',
466: 'bullet train, bullet',
467: 'butcher shop, meat market',
468: 'cab, hack, taxi, taxicab',
469: 'caldron, cauldron',
470: 'candle, taper, wax light',
471: 'cannon',
472: 'canoe',
473: 'can opener, tin opener',
474: 'cardigan',
475: 'car mirror',
476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
477: "carpenter's kit, tool kit",
478: 'carton',
479: 'car wheel',
480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
481: 'cassette',
482: 'cassette player',
483: 'castle',
484: 'catamaran',
485: 'CD player',
486: 'cello, violoncello',
487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
488: 'chain',
489: 'chainlink fence',
490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
491: 'chain saw, chainsaw',
492: 'chest',
493: 'chiffonier, commode',
494: 'chime, bell, gong',
495: 'china cabinet, china closet',
496: 'Christmas stocking',
497: 'church, church building',
498: 'cinema, movie theater, movie theatre, movie house, picture palace',
499: 'cleaver, meat cleaver, chopper',
500: 'cliff dwelling',
501: 'cloak',
502: 'clog, geta, patten, sabot',
503: 'cocktail shaker',
504: 'coffee mug',
505: 'coffeepot',
506: 'coil, spiral, volute, whorl, helix',
507: 'combination lock',
508: 'computer keyboard, keypad',
509: 'confectionery, confectionary, candy store',
510: 'container ship, containership, container vessel',
511: 'convertible',
512: 'corkscrew, bottle screw',
513: 'cornet, horn, trumpet, trump',
514: 'cowboy boot',
515: 'cowboy hat, ten-gallon hat',
516: 'cradle',
517: 'crane',
518: 'crash helmet',
519: 'crate',
520: 'crib, cot',
521: 'Crock Pot',
522: 'croquet ball',
523: 'crutch',
524: 'cuirass',
525: 'dam, dike, dyke',
526: 'desk',
527: 'desktop computer',
528: 'dial telephone, dial phone',
529: 'diaper, nappy, napkin',
530: 'digital clock',
531: 'digital watch',
532: 'dining table, board',
533: 'dishrag, dishcloth',
534: 'dishwasher, dish washer, dishwashing machine',
535: 'disk brake, disc brake',
536: 'dock, dockage, docking facility',
537: 'dogsled, dog sled, dog sleigh',
538: 'dome',
539: 'doormat, welcome mat',
540: 'drilling platform, offshore rig',
541: 'drum, membranophone, tympan',
542: 'drumstick',
543: 'dumbbell',
544: 'Dutch oven',
545: 'electric fan, blower',
546: 'electric guitar',
547: 'electric locomotive',
548: 'entertainment center',
549: 'envelope',
550: 'espresso maker',
551: 'face powder',
552: 'feather boa, boa',
553: 'file, file cabinet, filing cabinet',
554: 'fireboat',
555: 'fire engine, fire truck',
556: 'fire screen, fireguard',
557: 'flagpole, flagstaff',
558: 'flute, transverse flute',
559: 'folding chair',
560: 'football helmet',
561: 'forklift',
562: 'fountain',
563: 'fountain pen',
564: 'four-poster',
565: 'freight car',
566: 'French horn, horn',
567: 'frying pan, frypan, skillet',
568: 'fur coat',
569: 'garbage truck, dustcart',
570: 'gasmask, respirator, gas helmet',
571: 'gas pump, gasoline pump, petrol pump, island dispenser',
572: 'goblet',
573: 'go-kart',
574: 'golf ball',
575: 'golfcart, golf cart',
576: 'gondola',
577: 'gong, tam-tam',
578: 'gown',
579: 'grand piano, grand',
580: 'greenhouse, nursery, glasshouse',
581: 'grille, radiator grille',
582: 'grocery store, grocery, food market, market',
583: 'guillotine',
584: 'hair slide',
585: 'hair spray',
586: 'half track',
587: 'hammer',
588: 'hamper',
589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
590: 'hand-held computer, hand-held microcomputer',
591: 'handkerchief, hankie, hanky, hankey',
592: 'hard disc, hard disk, fixed disk',
593: 'harmonica, mouth organ, harp, mouth harp',
594: 'harp',
595: 'harvester, reaper',
596: 'hatchet',
597: 'holster',
598: 'home theater, home theatre',
599: 'honeycomb',
600: 'hook, claw',
601: 'hoopskirt, crinoline',
602: 'horizontal bar, high bar',
603: 'horse cart, horse-cart',
604: 'hourglass',
605: 'iPod',
606: 'iron, smoothing iron',
607: "jack-o'-lantern",
608: 'jean, blue jean, denim',
609: 'jeep, landrover',
610: 'jersey, T-shirt, tee shirt',
611: 'jigsaw puzzle',
612: 'jinrikisha, ricksha, rickshaw',
613: 'joystick',
614: 'kimono',
615: 'knee pad',
616: 'knot',
617: 'lab coat, laboratory coat',
618: 'ladle',
619: 'lampshade, lamp shade',
620: 'laptop, laptop computer',
621: 'lawn mower, mower',
622: 'lens cap, lens cover',
623: 'letter opener, paper knife, paperknife',
624: 'library',
625: 'lifeboat',
626: 'lighter, light, igniter, ignitor',
627: 'limousine, limo',
628: 'liner, ocean liner',
629: 'lipstick, lip rouge',
630: 'Loafer',
631: 'lotion',
632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
633: "loupe, jeweler's loupe",
634: 'lumbermill, sawmill',
635: 'magnetic compass',
636: 'mailbag, postbag',
637: 'mailbox, letter box',
638: 'maillot',
639: 'maillot, tank suit',
640: 'manhole cover',
641: 'maraca',
642: 'marimba, xylophone',
643: 'mask',
644: 'matchstick',
645: 'maypole',
646: 'maze, labyrinth',
647: 'measuring cup',
648: 'medicine chest, medicine cabinet',
649: 'megalith, megalithic structure',
650: 'microphone, mike',
651: 'microwave, microwave oven',
652: 'military uniform',
653: 'milk can',
654: 'minibus',
655: 'miniskirt, mini',
656: 'minivan',
657: 'missile',
658: 'mitten',
659: 'mixing bowl',
660: 'mobile home, manufactured home',
661: 'Model T',
662: 'modem',
663: 'monastery',
664: 'monitor',
665: 'moped',
666: 'mortar',
667: 'mortarboard',
668: 'mosque',
669: 'mosquito net',
670: 'motor scooter, scooter',
671: 'mountain bike, all-terrain bike, off-roader',
672: 'mountain tent',
673: 'mouse, computer mouse',
674: 'mousetrap',
675: 'moving van',
676: 'muzzle',
677: 'nail',
678: 'neck brace',
679: 'necklace',
680: 'nipple',
681: 'notebook, notebook computer',
682: 'obelisk',
683: 'oboe, hautboy, hautbois',
684: 'ocarina, sweet potato',
685: 'odometer, hodometer, mileometer, milometer',
686: 'oil filter',
687: 'organ, pipe organ',
688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
689: 'overskirt',
690: 'oxcart',
691: 'oxygen mask',
692: 'packet',
693: 'paddle, boat paddle',
694: 'paddlewheel, paddle wheel',
695: 'padlock',
696: 'paintbrush',
697: "pajama, pyjama, pj's, jammies",
698: 'palace',
699: 'panpipe, pandean pipe, syrinx',
700: 'paper towel',
701: 'parachute, chute',
702: 'parallel bars, bars',
703: 'park bench',
704: 'parking meter',
705: 'passenger car, coach, carriage',
706: 'patio, terrace',
707: 'pay-phone, pay-station',
708: 'pedestal, plinth, footstall',
709: 'pencil box, pencil case',
710: 'pencil sharpener',
711: 'perfume, essence',
712: 'Petri dish',
713: 'photocopier',
714: 'pick, plectrum, plectron',
715: 'pickelhaube',
716: 'picket fence, paling',
717: 'pickup, pickup truck',
718: 'pier',
719: 'piggy bank, penny bank',
720: 'pill bottle',
721: 'pillow',
722: 'ping-pong ball',
723: 'pinwheel',
724: 'pirate, pirate ship',
725: 'pitcher, ewer',
726: "plane, carpenter's plane, woodworking plane",
727: 'planetarium',
728: 'plastic bag',
729: 'plate rack',
730: 'plow, plough',
731: "plunger, plumber's helper",
732: 'Polaroid camera, Polaroid Land camera',
733: 'pole',
734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
735: 'poncho',
736: 'pool table, billiard table, snooker table',
737: 'pop bottle, soda bottle',
738: 'pot, flowerpot',
739: "potter's wheel",
740: 'power drill',
741: 'prayer rug, prayer mat',
742: 'printer',
743: 'prison, prison house',
744: 'projectile, missile',
745: 'projector',
746: 'puck, hockey puck',
747: 'punching bag, punch bag, punching ball, punchball',
748: 'purse',
749: 'quill, quill pen',
750: 'quilt, comforter, comfort, puff',
751: 'racer, race car, racing car',
752: 'racket, racquet',
753: 'radiator',
754: 'radio, wireless',
755: 'radio telescope, radio reflector',
756: 'rain barrel',
757: 'recreational vehicle, RV, R.V.',
758: 'reel',
759: 'reflex camera',
760: 'refrigerator, icebox',
761: 'remote control, remote',
762: 'restaurant, eating house, eating place, eatery',
763: 'revolver, six-gun, six-shooter',
764: 'rifle',
765: 'rocking chair, rocker',
766: 'rotisserie',
767: 'rubber eraser, rubber, pencil eraser',
768: 'rugby ball',
769: 'rule, ruler',
770: 'running shoe',
771: 'safe',
772: 'safety pin',
773: 'saltshaker, salt shaker',
774: 'sandal',
775: 'sarong',
776: 'sax, saxophone',
777: 'scabbard',
778: 'scale, weighing machine',
779: 'school bus',
780: 'schooner',
781: 'scoreboard',
782: 'screen, CRT screen',
783: 'screw',
784: 'screwdriver',
785: 'seat belt, seatbelt',
786: 'sewing machine',
787: 'shield, buckler',
788: 'shoe shop, shoe-shop, shoe store',
789: 'shoji',
790: 'shopping basket',
791: 'shopping cart',
792: 'shovel',
793: 'shower cap',
794: 'shower curtain',
795: 'ski',
796: 'ski mask',
797: 'sleeping bag',
798: 'slide rule, slipstick',
799: 'sliding door',
800: 'slot, one-armed bandit',
801: 'snorkel',
802: 'snowmobile',
803: 'snowplow, snowplough',
804: 'soap dispenser',
805: 'soccer ball',
806: 'sock',
807: 'solar dish, solar collector, solar furnace',
808: 'sombrero',
809: 'soup bowl',
810: 'space bar',
811: 'space heater',
812: 'space shuttle',
813: 'spatula',
814: 'speedboat',
815: "spider web, spider's web",
816: 'spindle',
817: 'sports car, sport car',
818: 'spotlight, spot',
819: 'stage',
820: 'steam locomotive',
821: 'steel arch bridge',
822: 'steel drum',
823: 'stethoscope',
824: 'stole',
825: 'stone wall',
826: 'stopwatch, stop watch',
827: 'stove',
828: 'strainer',
829: 'streetcar, tram, tramcar, trolley, trolley car',
830: 'stretcher',
831: 'studio couch, day bed',
832: 'stupa, tope',
833: 'submarine, pigboat, sub, U-boat',
834: 'suit, suit of clothes',
835: 'sundial',
836: 'sunglass',
837: 'sunglasses, dark glasses, shades',
838: 'sunscreen, sunblock, sun blocker',
839: 'suspension bridge',
840: 'swab, swob, mop',
841: 'sweatshirt',
842: 'swimming trunks, bathing trunks',
843: 'swing',
844: 'switch, electric switch, electrical switch',
845: 'syringe',
846: 'table lamp',
847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
848: 'tape player',
849: 'teapot',
850: 'teddy, teddy bear',
851: 'television, television system',
852: 'tennis ball',
853: 'thatch, thatched roof',
854: 'theater curtain, theatre curtain',
855: 'thimble',
856: 'thresher, thrasher, threshing machine',
857: 'throne',
858: 'tile roof',
859: 'toaster',
860: 'tobacco shop, tobacconist shop, tobacconist',
861: 'toilet seat',
862: 'torch',
863: 'totem pole',
864: 'tow truck, tow car, wrecker',
865: 'toyshop',
866: 'tractor',
867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
868: 'tray',
869: 'trench coat',
870: 'tricycle, trike, velocipede',
871: 'trimaran',
872: 'tripod',
873: 'triumphal arch',
874: 'trolleybus, trolley coach, trackless trolley',
875: 'trombone',
876: 'tub, vat',
877: 'turnstile',
878: 'typewriter keyboard',
879: 'umbrella',
880: 'unicycle, monocycle',
881: 'upright, upright piano',
882: 'vacuum, vacuum cleaner',
883: 'vase',
884: 'vault',
885: 'velvet',
886: 'vending machine',
887: 'vestment',
888: 'viaduct',
889: 'violin, fiddle',
890: 'volleyball',
891: 'waffle iron',
892: 'wall clock',
893: 'wallet, billfold, notecase, pocketbook',
894: 'wardrobe, closet, press',
895: 'warplane, military plane',
896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
897: 'washer, automatic washer, washing machine',
898: 'water bottle',
899: 'water jug',
900: 'water tower',
901: 'whiskey jug',
902: 'whistle',
903: 'wig',
904: 'window screen',
905: 'window shade',
906: 'Windsor tie',
907: 'wine bottle',
908: 'wing',
909: 'wok',
910: 'wooden spoon',
911: 'wool, woolen, woollen',
912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
913: 'wreck',
914: 'yawl',
915: 'yurt',
916: 'web site, website, internet site, site',
917: 'comic book',
918: 'crossword puzzle, crossword',
919: 'street sign',
920: 'traffic light, traffic signal, stoplight',
921: 'book jacket, dust cover, dust jacket, dust wrapper',
922: 'menu',
923: 'plate',
924: 'guacamole',
925: 'consomme',
926: 'hot pot, hotpot',
927: 'trifle',
928: 'ice cream, icecream',
929: 'ice lolly, lolly, lollipop, popsicle',
930: 'French loaf',
931: 'bagel, beigel',
932: 'pretzel',
933: 'cheeseburger',
934: 'hotdog, hot dog, red hot',
935: 'mashed potato',
936: 'head cabbage',
937: 'broccoli',
938: 'cauliflower',
939: 'zucchini, courgette',
940: 'spaghetti squash',
941: 'acorn squash',
942: 'butternut squash',
943: 'cucumber, cuke',
944: 'artichoke, globe artichoke',
945: 'bell pepper',
946: 'cardoon',
947: 'mushroom',
948: 'Granny Smith',
949: 'strawberry',
950: 'orange',
951: 'lemon',
952: 'fig',
953: 'pineapple, ananas',
954: 'banana',
955: 'jackfruit, jak, jack',
956: 'custard apple',
957: 'pomegranate',
958: 'hay',
959: 'carbonara',
960: 'chocolate sauce, chocolate syrup',
961: 'dough',
962: 'meat loaf, meatloaf',
963: 'pizza, pizza pie',
964: 'potpie',
965: 'burrito',
966: 'red wine',
967: 'espresso',
968: 'cup',
969: 'eggnog',
970: 'alp',
971: 'bubble',
972: 'cliff, drop, drop-off',
973: 'coral reef',
974: 'geyser',
975: 'lakeside, lakeshore',
976: 'promontory, headland, head, foreland',
977: 'sandbar, sand bar',
978: 'seashore, coast, seacoast, sea-coast',
979: 'valley, vale',
980: 'volcano',
981: 'ballplayer, baseball player',
982: 'groom, bridegroom',
983: 'scuba diver',
984: 'rapeseed',
985: 'daisy',
986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
987: 'corn',
988: 'acorn',
989: 'hip, rose hip, rosehip',
990: 'buckeye, horse chestnut, conker',
991: 'coral fungus',
992: 'agaric',
993: 'gyromitra',
994: 'stinkhorn, carrion fungus',
995: 'earthstar',
996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
997: 'bolete',
998: 'ear, spike, capitulum',
999: 'toilet tissue, toilet paper, bathroom tissue'}
|
astmt-master
|
fblib/util/db_info/imagenet_categ.py
|
astmt-master
|
fblib/util/db_info/__init__.py
|
|
astmt-master
|
fblib/util/model_resources/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
# ---- Public functions
def compute_gflops(net, in_shape=(1, 3, 224, 224), tasks=None):
net = add_flops_counting_methods(net)
net = net.cuda().train()
net.start_flops_count()
inputs = torch.rand(in_shape).requires_grad_().cuda()
if not tasks:
_ = net(inputs)
else:
_ = net.forward(inputs, tasks)
gflops = net.compute_average_flops_cost() / 1e9 / 2
return gflops
def add_flops_counting_methods(net_main_module):
"""Adds flops counting functions to an existing model. After that
the flops count should be activated and the model should be run on an input
image.
Example:
fcn = add_flops_counting_methods(fcn)
fcn = fcn.cuda().train()
fcn.start_flops_count()
_ = fcn(batch)
fcn.compute_average_flops_cost() / 1e9 / 2 # Result in GFLOPs per image in batch
Important: dividing by 2 only works for resnet models -- see below for the details
of flops computation.
Attention: we are counting multiply-add as two flops in this work, because in
most resnet models convolutions are bias-free (BN layers act as bias there)
and it makes sense to count muliply and add as separate flops therefore.
This is why in the above example we divide by 2 in order to be consistent with
most modern benchmarks. For example in "Spatially Adaptive Computation Time for Residual
Networks" by Figurnov et al multiply-add was counted as two flops.
This module computes the average flops which is necessary for dynamic networks which
have different number of executed layers. For static networks it is enough to run the network
once and get statistics (above example).
Implementation:
The module works by adding batch_count to the main module which tracks the sum
of all batch sizes that were run through the network.
Also each convolutional layer of the network tracks the overall number of flops
performed.
The parameters are updated with the help of registered hook-functions which
are being called each time the respective layer is executed.
Parameters
----------
net_main_module : torch.nn.Module
Main module containing network
Returns
-------
net_main_module : torch.nn.Module
Updated main module with new methods/attributes that are used
to compute flops.
"""
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
# Adding varialbles necessary for masked flops computation
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Returns current mean flops consumption per image.
"""
batches_count = self.__batch_counter__
if batches_count == 0:
print('Please divide manually with the batch size')
batches_count = 1
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return flops_sum / batches_count
def start_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
# ---- Internal functions
def is_supported_instance(module):
if isinstance(module, torch.nn.Conv2d) \
or isinstance(module, torch.nn.Linear) \
or isinstance(module, torch.nn.Upsample):
return True
return False
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_height, output_width = output.shape[2:]
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
# We count multiply-add as 2 flops
if conv_module.groups == 1:
# Normal convolution
conv_per_position_flops = 2 * kernel_height * kernel_width * in_channels * out_channels
else:
# Grouped convolution
d_in = in_channels // conv_module.groups
d_out = out_channels // conv_module.groups
conv_per_position_flops = 2 * kernel_height * kernel_width * d_in * d_out * conv_module.groups
active_elements_count = batch_size * output_height * output_width
if conv_module.__mask__ is not None:
# (b, 1, h, w)
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += overall_flops
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += output_elements_count
def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += batch_size * input.shape[1] * output.shape[1]
def batch_counter_hook(module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
# print(input)
batch_size = input.shape[0]
module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, torch.nn.Conv2d):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif isinstance(module, torch.nn.Linear):
handle = module.register_forward_hook(linear_flops_counter_hook)
elif isinstance(module, torch.nn.Upsample):
handle = module.register_forward_hook(upsample_flops_counter_hook)
else:
handle = module.register_forward_hook(empty_flops_counter_hook)
module.__flops_handle__ = handle
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
# Also being run in the initialization
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
|
astmt-master
|
fblib/util/model_resources/flops.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
astmt-master
|
fblib/util/model_resources/num_parameters.py
|
astmt-master
|
fblib/util/dense_predict/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def lr_poly(base_lr, iter_, max_iter=100, power=0.9):
return base_lr * ((1 - float(iter_) / max_iter) ** power)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
|
astmt-master
|
fblib/util/dense_predict/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
def traverse_graph(var):
"""
Args:
var: output Variable
"""
seen = set()
var_lst = []
def add_nodes(var):
if var not in seen:
if hasattr(var, 'variable'):
u = var.variable
if isinstance(u, nn.Parameter):
var_lst.append(u)
seen.add(var)
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if u[0] is not None:
add_nodes(u[0])
# handle multiple outputs
if isinstance(var, tuple):
for v in var:
add_nodes(v.grad_fn)
else:
add_nodes(var.grad_fn)
return var_lst
def make_closure(loss, net):
def closure():
used_vars = traverse_graph(loss)
loss.backward()
for p in net.parameters():
exists = False
for v in used_vars:
exists = (p is v)
if exists:
break
if not exists:
p.grad = None
return loss
return closure
def make_closure_fast(loss, net):
def closure():
used_vars = set(traverse_graph(loss))
loss.backward()
for p in net.parameters():
if p not in used_vars:
p.grad = None
return loss
return closure
class MWENet(nn.Module):
def __init__(self):
super(MWENet, self).__init__()
self.a = nn.Parameter(torch.rand(1))
self.b = nn.Parameter(torch.rand(1))
self.c = nn.Parameter(torch.rand(1))
def forward_b(self, x):
x = self.a * x
x = x ** self.b
return x
def forward_c(self, x):
x = self.a * x
x = x ** self.c
return x
def print_params(self, txt='Before'):
print('{0}: a: {1:.7f}, b: {2:.7f}, c: {3:.7f}'.format(
txt, self.a[0].detach().numpy(), self.b[0].detach().numpy(), self.c[0].detach().numpy()))
def perform_first_iter(net, optimizer, x):
out_b = net.forward_b(x)
out_c = net.forward_c(x)
loss = (1 - out_b) + (2 - out_c)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_default_optimizer():
print('\n Using default optimizer. All parameters should change')
x = torch.rand(1, requires_grad=True)
net = MWENet()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.99, weight_decay=0.001)
# First backward to get some momentum going
perform_first_iter(net, optimizer, x)
# Without modified optimizer
out_b = net.forward_b(x)
loss = (1 - out_b)
# Before
net.print_params()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# After: c must get updated without being part of the graph
net.print_params('After ')
def test_modified_optimizer():
print('\n Using modified optimizer. parameter c should not change')
x = torch.rand(1, requires_grad=True)
net = MWENet()
optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.99, weight_decay=0.0001)
# First backward to get some momentum going
perform_first_iter(net, optimizer, x)
# With modified optimizer
out_b = net.forward_b(x)
loss = (1 - out_b)
# Before
net.print_params()
optimizer.zero_grad()
optimizer.step(closure=make_closure(loss, net))
# After: c SHOULD NOT get updated because it's not part of the graph
net.print_params('After ')
if __name__ == '__main__':
test_default_optimizer()
test_modified_optimizer()
|
astmt-master
|
fblib/util/optimizer_mtl/select_used_modules.py
|
astmt-master
|
fblib/util/optimizer_mtl/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from fblib.util.mypath import Path
import numpy as np
import torch.utils.data as data
import cv2
class FSVGTA(data.Dataset):
def __init__(self,
root=Path.db_root_dir('FSV'),
split='test',
mini=True,
transform=None,
retname=True,
overfit=False,
do_semseg=False,
do_albedo=False,
do_depth=False,
prune_rare_classes=True,
):
self.root = root
self.transform = transform
self.prune = []
if prune_rare_classes:
self.prune = [1, 4, 5, 6, 7]
self.split = split
self.retname = retname
# Original Images
self.im_ids = []
self.images = []
_image_dir = os.path.join(root, 'gta_' + split)
# Semantic segmentation
self.do_semseg = do_semseg
self.semsegs = []
# Albedo
self.do_albedo = do_albedo
self.albedos = []
# Depth
self.do_depth = do_depth
self.depths = []
# train/val/test splits are pre-cut
_splits_dir = os.path.join(root, 'gt_sets')
print("Initializing dataloader for FSV GTA {} set".format(self.split))
with open(os.path.join(os.path.join(_splits_dir, 'gta_' + self.split + '.txt')), "r") as f:
lines = f.read().splitlines()
if split == 'test' and mini:
lines = lines[0:len(lines):int(len(lines)/5000)]
for ii, line in enumerate(lines):
# Images
_image = os.path.join(_image_dir, line + "_final.webp")
# assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
# Semantic Segmentation
_semseg = os.path.join(_image_dir, line + "_object_id.png")
# assert os.path.isfile(_semseg)
self.semsegs.append(_semseg)
# Albedo
_albedo = os.path.join(_image_dir, line + "_albedo.webp")
# assert os.path.isfile(_albedo)
self.albedos.append(_albedo)
# Depth Estimation
_depth = os.path.join(_image_dir, line + "_disparity.webp")
# assert os.path.isfile(_depth)
self.depths.append(_depth)
if self.do_semseg:
assert (len(self.images) == len(self.semsegs))
if self.do_albedo:
assert (len(self.images) == len(self.albedos))
if self.do_depth:
assert (len(self.images) == len(self.depths))
# Uncomment to overfit to one image
if overfit:
n_of = 64
self.images = self.images[:n_of]
self.im_ids = self.im_ids[:n_of]
# Display stats
print('Number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
# if index == 1102:
# print('hi')
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_semseg:
_semseg = self._load_semseg(index)
if _semseg.shape != _img.shape[:2]:
_semseg = cv2.resize(_semseg, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['semseg'] = _semseg
if self.do_albedo:
_albedo = self._load_albedo(index)
if _albedo.shape[:2] != _img.shape[:2]:
_depth = cv2.resize(_albedo, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['albedo'] = _albedo
if self.do_depth:
_depth = self._load_depth(index)
if _depth.shape[:2] != _img.shape[:2]:
_depth = cv2.resize(_depth, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['depth'] = _depth
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.images)
def _load_img(self, index):
_img = cv2.imread(self.images[index])[:, :, ::-1].astype(np.float32)
return _img
def _load_semseg(self, index):
_semseg = cv2.imread(self.semsegs[index])[:, :, -1].astype(np.float32)
# Prune rare classes
if self.prune:
uniq = np.unique(_semseg)
for cls in self.prune:
if cls in uniq:
_semseg[_semseg == cls] = 0
_semseg = np.maximum(_semseg - 1, 0)
return _semseg
def _load_albedo(self, index):
_albedo = cv2.imread(self.albedos[index])[:, :, ::-1].astype(np.float32) / 255.
return _albedo
def _load_depth(self, index):
_depth = cv2.imread(self.depths[index])
_depth = (_depth[:, :, 0] * 256 * 256 + _depth[:, :, 1] * 256 + _depth[:, :, 2]).astype(np.float32) / 8192
return _depth
def __str__(self):
return 'FSV GTA Multitask (split=' + str(self.split) + ')'
|
astmt-master
|
fblib/dataloaders/fsv.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import os.path
from pycocotools.coco import COCO
import torch.utils.data as data
from PIL import Image
import numpy as np
from fblib.util.mypath import Path
class CocoCaptions(data.Dataset):
"""`MS Coco Captions <http://mscoco.org/dataset/#captions-challenge2015>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
Example:
.. code:: python
import torchvision.datasets as dset
import torchvision.transforms as transforms
cap = dset.CocoCaptions(root = 'dir where images are',
annFile = 'json annotation file',
transform=transforms.ToTensor())
print('Number of samples: ', len(cap))
img, target = cap[3] # load 4th sample
print("Image Size: ", img.size())
print(target)
Output: ::
Number of samples: 82783
Image Size: (3L, 427L, 640L)
[u'A plane emitting smoke stream flying over a mountain.',
u'A plane darts across a bright blue sky behind a mountain covered in snow',
u'A plane leaves a contrail above the snowy mountain top.',
u'A mountain that has a plane flying overheard in the distance.',
u'A mountain view with a plume of smoke in the background']
"""
def __init__(self, root, annFile, transform=None, target_transform=None):
self.root = os.path.expanduser(root)
self.coco = COCO(annFile)
self.ids = list(self.coco.imgs.keys())
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is a list of captions for the image.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
anns = coco.loadAnns(ann_ids)
target = [ann['caption'] for ann in anns]
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.ids)
class CocoDetection(data.Dataset):
"""`MS Coco Captions <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
def __init__(self, root, annFile, transform=None, target_transform=None):
self.root = root
self.coco = COCO(annFile)
self.ids = list(self.coco.imgs.keys())
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
target = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.ids)
class COCOSegmentation(data.Dataset):
"""`MS Coco Captions <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
split (string): Select split of the dataset, eg 'val2014' or 'train2014'
area_range (list): Select min and max size of the objects eg [500, float("inf")]
pascal_categories (boolean): Select only the categories of pascal
db_root (string): Root folder where the coco dataset is stored, folder containing annotation and images folders.
transform (callable, optional): A function/transform that takes in a sample
and returns a transformed version. E.g, ``transforms.ToTensor``
retname (boolean): Return metadata about the sample
"""
PASCAL_CAT_DICT = {'airplane': 1, 'bicycle': 2, 'bird': 3, 'boat': 4, 'bottle': 5,
'bus': 6, 'car': 7, 'cat': 8, 'chair': 9, 'cow': 10,
'dining table': 11, 'dog': 12, 'horse': 13, 'motorcycle': 14, 'person': 15,
'potted plant': 16, 'sheep': 17, 'couch': 18, 'train': 19, 'tv': 20}
def __init__(self,
split,
area_range=[],
only_pascal_categories=False,
mask_per_class=True,
db_root=Path.db_root_dir('COCO'),
n_samples=-1,
transform=None,
retname=True,
overfit=False
):
self.split = split
self.root = os.path.join(db_root, 'images', split)
annFile = os.path.join(db_root, 'annotations', 'instances_' + split + '.json')
self.coco = COCO(annFile)
self.pascal_cat_name = ['person', 'bird', 'cat', 'cow', 'dog', 'horse', 'sheep', 'airplane',
'bicycle', 'boat', 'bus', 'car', 'motorcycle', 'train', 'bottle', 'chair',
'dining table', 'potted plant', 'couch', 'tv']
self.only_pascal_categories = only_pascal_categories
if self.only_pascal_categories:
cat_ids = self.coco.getCatIds(catNms=self.pascal_cat_name)
else:
cat_ids = self.coco.getCatIds()
self.img_ids = list(self.coco.imgs.keys())
self.ids = self.coco.getAnnIds(imgIds=self.img_ids, areaRng=area_range, catIds=cat_ids)
self.transform = transform
self.area_range = area_range
self.cat_ids = cat_ids
self.mask_per_class = mask_per_class
self.retname = retname
if self.mask_per_class:
self._select_imgs()
if n_samples > 0:
if self.mask_per_class:
self.img_ids = list(self.img_ids)[:n_samples]
else:
self.ids = self.ids[:n_samples]
if overfit:
n_of = 64
self.img_ids = list(self.img_ids)[:n_of]
# Display stats
if self.mask_per_class:
print("Number of images: {:d}".format(len(self.img_ids)))
else:
print('Number of images: {:d}\nNumber of objects: {:d}'.format(len(self.coco.imgs), len(self.ids)))
def _select_imgs(self):
lst = []
for x in self.img_ids:
ids_area = self.coco.getAnnIds(imgIds=x, areaRng=self.area_range, catIds=self.cat_ids)
ids = self.coco.getAnnIds(imgIds=x, areaRng=[0, float('Inf')], catIds=self.cat_ids)
if ids_area and len(ids) == len(ids_area):
lst.append(x)
self.img_ids = lst
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
if self.mask_per_class:
img_id = self.img_ids[index]
ann_meta = []
for cat_id in self.cat_ids:
ids = coco.getAnnIds(imgIds=img_id, catIds=cat_id)
ann_meta.append(coco.loadAnns(ids))
cat_id = self.cat_ids
else:
ann_meta = coco.loadAnns(self.ids[index])
img_id = ann_meta[0]["image_id"]
cat_id = ann_meta[0]['category_id']
img_meta = coco.loadImgs(img_id)[0]
path = img_meta['file_name']
sample = {}
if self.retname:
sample['meta'] = {'image': str(path).split('.')[0],
'object': str(self.ids[index]),
'category': cat_id,
'im_size': (img_meta['height'], img_meta['width'])}
try:
img = np.array(Image.open(os.path.join(self.root, path)).convert('RGB')).astype(np.float32)
if self.mask_per_class:
target = np.zeros([img.shape[0], img.shape[1]])
for ii in range(len(cat_id)):
ann_meta_class = ann_meta[ii]
target_tmp = np.zeros([img.shape[0], img.shape[1]])
for ann in ann_meta_class:
target_tmp = np.logical_or(target_tmp > 0, np.array(coco.annToMask(ann)) > 0)
if self.only_pascal_categories:
coco_cat_name = self.coco.cats[self.cat_ids[ii]]['name']
if coco_cat_name in self.pascal_cat_name:
target[target_tmp > 0] = self.PASCAL_CAT_DICT[coco_cat_name]
else:
target[target_tmp > 0] = ii + 1
else:
target = np.zeros([img.shape[0], img.shape[1], 1])
for ann in ann_meta:
target = np.logical_or(target, np.array(coco.annToMask(ann).reshape([img.shape[0], img.shape[1], 1])))
target = target.astype(np.float32)
except ValueError:
img = np.zeros((100, 100, 3))
target = np.zeros((100, 100))
print('Error reading image ' + str(path) + ' with object id ' + str(self.ids[index]))
sample['image'] = img
if self.mask_per_class:
sample['semseg'] = target
else:
sample['gt'] = target
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
if self.mask_per_class:
return len(self.img_ids)
else:
return len(self.ids)
def __str__(self):
return 'COCOSegmentation(split='+str(self.split)+', area_range='+str(self.area_range) + ')'
if __name__ == "__main__":
from matplotlib.pyplot import imshow, show
import torchvision.transforms as transforms
import fblib.dataloaders.custom_transforms as tr
transform = transforms.Compose([tr.ToTensor()])
dataset = COCOSegmentation(split='val2017', transform=None, retname=True,
area_range=[1000, float("inf")], only_pascal_categories=True, overfit=True)
for i in range(len(dataset)):
sample = dataset[i]
imshow(sample['semseg'])
show()
|
astmt-master
|
fblib/dataloaders/coco.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.utils.data as data
class CombineIMDBs(data.Dataset):
"""
Combine two datasets, for example to create VOC and SBD training set
"""
def __init__(self, dataloaders, excluded=None, repeat=None):
self.dataloaders = dataloaders
self.excluded = excluded
self.im_ids = []
# Combine object lists
for dl in dataloaders:
for elem in dl.im_ids:
if elem not in self.im_ids:
self.im_ids.append(elem)
# Exclude
if excluded:
for dl in excluded:
for elem in dl.im_ids:
if elem in self.im_ids:
self.im_ids.remove(elem)
if repeat:
self.repeat = repeat
assert(len(repeat) == len(dataloaders))
else:
self.repeat = [1] * len(dataloaders)
# Get object pointers
self.im_list = []
new_im_ids = []
num_images = 0
for ii, dl in enumerate(dataloaders):
for jj, curr_im_id in enumerate(dl.im_ids):
if (curr_im_id in self.im_ids) and (curr_im_id not in new_im_ids):
for r in range(self.repeat[ii]):
new_im_ids.append(curr_im_id)
self.im_list.append({'db_ii': ii, 'im_ii': jj})
num_images += 1
self.im_ids = new_im_ids
print('Combined number of images: {:d}\n'.format(num_images))
def __getitem__(self, index):
_db_ii = self.im_list[index]["db_ii"]
_im_ii = self.im_list[index]['im_ii']
# print("db_id: {}, im_id: {}".format(_db_ii, _im_ii))
sample = self.dataloaders[_db_ii].__getitem__(_im_ii)
if 'meta' in sample.keys():
sample['meta']['db'] = str(self.dataloaders[_db_ii])
return sample
def __len__(self):
return len(self.im_ids)
def __str__(self):
include_db = [str(db) for db in self.dataloaders]
exclude_db = [str(db) for db in self.excluded]
return 'Included datasets:'+str(include_db)+'\n'+'Excluded datasets:'+str(exclude_db)
if __name__ == '__main__':
from matplotlib import pyplot as plt
import fblib.dataloaders as dataloaders
pascal_train = dataloaders.VOC12(split='train', do_semseg=True)
sbd = dataloaders.SBD(split=['train', 'val'], do_semseg=True)
pascal_val = dataloaders.VOC12(split='val', do_semseg=True)
dataset = CombineIMDBs([pascal_train, sbd], excluded=[pascal_val])
for i, sample in enumerate(dataset):
plt.imshow(sample['image']/255.)
plt.show()
plt.imshow(sample['semseg'])
plt.show()
|
astmt-master
|
fblib/dataloaders/combine_im_dbs.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import tarfile
import cv2
import numpy as np
import torch.utils.data as data
from six.moves import urllib
from fblib.util.mypath import Path
class MSRA(data.Dataset):
"""
MSRA10k dataset for Saliency Estimation
"""
URL = 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL/MSRA10K.tgz'
FILE = 'MSRA10K.tgz'
def __init__(self,
root=Path.db_root_dir('MSRA10K'),
download=True,
split='trainval',
transform=None,
retname=True,
overfit=False):
if download:
self._download()
self.transform = transform
self.retname = retname
self.root = root
self.gt_dir = os.path.join(self.root, 'gt')
self.image_dir = os.path.join(self.root, 'Imgs')
_splits_dir = os.path.join(self.root, 'gt_sets')
self.split = split
if isinstance(self.split, str):
self.split = [self.split]
self.images = []
self.gts = []
self.im_ids = []
for sp in self.split:
with open(os.path.join(os.path.join(_splits_dir, sp + '.txt')), "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
_image = os.path.join(self.image_dir, line + ".jpg")
_gt = os.path.join(self.gt_dir, line + ".png")
assert os.path.isfile(_image)
assert os.path.isfile(_gt)
self.im_ids.append(line)
self.images.append(_image)
self.gts.append(_gt)
assert (len(self.images) == len(self.gts) == len(self.im_ids))
if overfit:
n_of = 64
self.images = self.images[:n_of]
self.im_ids = self.im_ids[:n_of]
# Display stats
print('Number of images: {:d}'.format(len(self.im_ids)))
def __getitem__(self, index):
sample = {}
_img = self._load_img(index)
sample['image'] = _img
_sal = self._load_sal(index)
sample['sal'] = _sal
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.im_ids)
def _load_img(self, index):
# Read Image
_img = cv2.imread(self.images[index])[:, :, ::-1].astype(np.float32)
return _img
def _load_sal(self, index):
# Read Target object
_gt = cv2.imread(self.gts[index], flags=0).astype(np.float32) / 255.
return _gt
def _download(self):
_fpath = os.path.join(Path.db_root_dir(), self.FILE)
if os.path.isfile(_fpath):
print('Files already downloaded')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.db_root_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def __str__(self):
return 'MSRA(split=' + str(self.split) + ')'
if __name__ == '__main__':
from matplotlib import pyplot as plt
dataset = MSRA()
for i, sample in enumerate(dataset):
plt.imshow(sample['image']/255)
plt.show()
plt.imshow(sample['sal'])
plt.show()
|
astmt-master
|
fblib/dataloaders/msra10k.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import tarfile
import json
import cv2
import numpy as np
import scipy.io as sio
import torch.utils.data as data
from PIL import Image
from skimage.morphology import thin
from six.moves import urllib
from fblib import PROJECT_ROOT_DIR
from fblib.util.mypath import Path
class PASCALContext(data.Dataset):
"""
PASCAL-Context dataset, for multiple tasks
Included tasks:
1. Edge detection,
2. Semantic Segmentation,
3. Human Part Segmentation,
4. Surface Normal prediction (distilled),
5. Saliency (distilled)
"""
URL = 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL/PASCAL_MT.tgz'
FILE = 'PASCAL_MT.tgz'
HUMAN_PART = {1: {'hair': 1, 'head': 1, 'lear': 1, 'lebrow': 1, 'leye': 1, 'lfoot': 1,
'lhand': 1, 'llarm': 1, 'llleg': 1, 'luarm': 1, 'luleg': 1, 'mouth': 1,
'neck': 1, 'nose': 1, 'rear': 1, 'rebrow': 1, 'reye': 1, 'rfoot': 1,
'rhand': 1, 'rlarm': 1, 'rlleg': 1, 'ruarm': 1, 'ruleg': 1, 'torso': 1},
4: {'hair': 1, 'head': 1, 'lear': 1, 'lebrow': 1, 'leye': 1, 'lfoot': 4,
'lhand': 3, 'llarm': 3, 'llleg': 4, 'luarm': 3, 'luleg': 4, 'mouth': 1,
'neck': 2, 'nose': 1, 'rear': 1, 'rebrow': 1, 'reye': 1, 'rfoot': 4,
'rhand': 3, 'rlarm': 3, 'rlleg': 4, 'ruarm': 3, 'ruleg': 4, 'torso': 2},
6: {'hair': 1, 'head': 1, 'lear': 1, 'lebrow': 1, 'leye': 1, 'lfoot': 6,
'lhand': 4, 'llarm': 4, 'llleg': 6, 'luarm': 3, 'luleg': 5, 'mouth': 1,
'neck': 2, 'nose': 1, 'rear': 1, 'rebrow': 1, 'reye': 1, 'rfoot': 6,
'rhand': 4, 'rlarm': 4, 'rlleg': 6, 'ruarm': 3, 'ruleg': 5, 'torso': 2},
14: {'hair': 1, 'head': 1, 'lear': 1, 'lebrow': 1, 'leye': 1, 'lfoot': 14,
'lhand': 8, 'llarm': 7, 'llleg': 13, 'luarm': 6, 'luleg': 12, 'mouth': 1,
'neck': 2, 'nose': 1, 'rear': 1, 'rebrow': 1, 'reye': 1, 'rfoot': 11,
'rhand': 5, 'rlarm': 4, 'rlleg': 10, 'ruarm': 3, 'ruleg': 9, 'torso': 2}
}
VOC_CATEGORY_NAMES = ['background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
CONTEXT_CATEGORY_LABELS = [0,
2, 23, 25, 31, 34,
45, 59, 65, 72, 98,
397, 113, 207, 258, 284,
308, 347, 368, 416, 427]
def __init__(self,
root=Path.db_root_dir('PASCAL_MT'),
download=True,
split='val',
transform=None,
area_thres=0,
retname=True,
overfit=False,
do_edge=True,
do_human_parts=False,
do_semseg=False,
do_normals=False,
do_sal=False,
num_human_parts=6,
):
self.root = root
if download:
self._download()
image_dir = os.path.join(self.root, 'JPEGImages')
self.transform = transform
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.area_thres = area_thres
self.retname = retname
# Edge Detection
self.do_edge = do_edge
self.edges = []
edge_gt_dir = os.path.join(self.root, 'pascal-context', 'trainval')
# Semantic Segmentation
self.do_semseg = do_semseg
self.semsegs = []
# Human Part Segmentation
self.do_human_parts = do_human_parts
part_gt_dir = os.path.join(self.root, 'human_parts')
self.parts = []
self.human_parts_category = 15
self.cat_part = json.load(open(os.path.join(os.path.dirname(__file__),
'../util/db_info/pascal_part.json'), 'r'))
self.cat_part["15"] = self.HUMAN_PART[num_human_parts]
self.parts_file = os.path.join(os.path.join(self.root, 'ImageSets', 'Parts'),
''.join(self.split) + '.txt')
# Surface Normal Estimation
self.do_normals = do_normals
_normal_gt_dir = os.path.join(self.root, 'normals_distill')
self.normals = []
if self.do_normals:
with open(os.path.join(PROJECT_ROOT_DIR, 'util/db_info/nyu_classes.json')) as f:
cls_nyu = json.load(f)
with open(os.path.join(PROJECT_ROOT_DIR, 'util/db_info/context_classes.json')) as f:
cls_context = json.load(f)
self.normals_valid_classes = []
for cl_nyu in cls_nyu:
if cl_nyu in cls_context and cl_nyu != 'unknown':
self.normals_valid_classes.append(cls_context[cl_nyu])
# Custom additions due to incompatibilities
self.normals_valid_classes.append(cls_context['tvmonitor'])
# Saliency
self.do_sal = do_sal
_sal_gt_dir = os.path.join(self.root, 'sal_distill')
self.sals = []
# train/val/test splits are pre-cut
_splits_dir = os.path.join(self.root, 'ImageSets', 'Context')
self.im_ids = []
self.images = []
print("Initializing dataloader for PASCAL {} set".format(''.join(self.split)))
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
# Images
_image = os.path.join(image_dir, line + ".jpg")
assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
# Edges
_edge = os.path.join(edge_gt_dir, line + ".mat")
assert os.path.isfile(_edge)
self.edges.append(_edge)
# Semantic Segmentation
_semseg = self._get_semseg_fname(line)
assert os.path.isfile(_semseg)
self.semsegs.append(_semseg)
# Human Parts
_human_part = os.path.join(self.root, part_gt_dir, line + ".mat")
assert os.path.isfile(_human_part)
self.parts.append(_human_part)
_normal = os.path.join(self.root, _normal_gt_dir, line + ".png")
assert os.path.isfile(_normal)
self.normals.append(_normal)
_sal = os.path.join(self.root, _sal_gt_dir, line + ".png")
assert os.path.isfile(_sal)
self.sals.append(_sal)
if self.do_edge:
assert (len(self.images) == len(self.edges))
if self.do_human_parts:
assert (len(self.images) == len(self.parts))
if self.do_semseg:
assert (len(self.images) == len(self.semsegs))
if self.do_normals:
assert (len(self.images) == len(self.normals))
if self.do_sal:
assert (len(self.images) == len(self.sals))
if not self._check_preprocess_parts():
print('Pre-processing PASCAL dataset for human parts, this will take long, but will be done only once.')
self._preprocess_parts()
if self.do_human_parts:
# Find images which have human parts
self.has_human_parts = []
for ii in range(len(self.im_ids)):
if self.human_parts_category in self.part_obj_dict[self.im_ids[ii]]:
self.has_human_parts.append(1)
else:
self.has_human_parts.append(0)
# If the other tasks are disabled, select only the images that contain human parts, to allow batching
if not self.do_edge and not self.do_semseg and not self.do_sal and not self.do_normals:
print('Ignoring images that do not contain human parts')
for i in range(len(self.parts) - 1, -1, -1):
if self.has_human_parts[i] == 0:
del self.im_ids[i]
del self.images[i]
del self.parts[i]
del self.has_human_parts[i]
print('Number of images with human parts: {:d}'.format(np.sum(self.has_human_parts)))
# Overfit to n_of images
if overfit:
n_of = 64
self.images = self.images[:n_of]
self.im_ids = self.im_ids[:n_of]
if self.do_edge:
self.edges = self.edges[:n_of]
if self.do_semseg:
self.semsegs = self.semsegs[:n_of]
if self.do_human_parts:
self.parts = self.parts[:n_of]
if self.do_normals:
self.normals = self.normals[:n_of]
if self.do_sal:
self.sals = self.sals[:n_of]
# Display stats
print('Number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_edge:
_edge = self._load_edge(index)
if _edge.shape != _img.shape[:2]:
_edge = cv2.resize(_edge, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['edge'] = _edge
if self.do_human_parts:
_human_parts, _ = self._load_human_parts(index)
if _human_parts.shape != _img.shape[:2]:
_human_parts = cv2.resize(_human_parts, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['human_parts'] = _human_parts
if self.do_semseg:
_semseg = self._load_semseg(index)
if _semseg.shape != _img.shape[:2]:
_semseg = cv2.resize(_semseg, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['semseg'] = _semseg
if self.do_normals:
_normals = self._load_normals_distilled(index)
if _normals.shape[:2] != _img.shape[:2]:
_normals = cv2.resize(_normals, _img.shape[:2][::-1], interpolation=cv2.INTER_CUBIC)
sample['normals'] = _normals
if self.do_sal:
_sal = self._load_sal_distilled(index)
if _sal.shape[:2] != _img.shape[:2]:
_sal = cv2.resize(_sal, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['sal'] = _sal
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.images)
def _load_img(self, index):
_img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32)
return _img
def _load_edge(self, index):
# Read Target object
_tmp = sio.loadmat(self.edges[index])
_edge = cv2.Laplacian(_tmp['LabelMap'], cv2.CV_64F)
_edge = thin(np.abs(_edge) > 0).astype(np.float32)
return _edge
def _load_human_parts(self, index):
if self.has_human_parts[index]:
# Read Target object
_part_mat = sio.loadmat(self.parts[index])['anno'][0][0][1][0]
_inst_mask = _target = None
for _obj_ii in range(len(_part_mat)):
has_human = _part_mat[_obj_ii][1][0][0] == self.human_parts_category
has_parts = len(_part_mat[_obj_ii][3]) != 0
if has_human and has_parts:
if _inst_mask is None:
_inst_mask = _part_mat[_obj_ii][2].astype(np.float32)
_target = np.zeros(_inst_mask.shape)
else:
_inst_mask = np.maximum(_inst_mask, _part_mat[_obj_ii][2].astype(np.float32))
n_parts = len(_part_mat[_obj_ii][3][0])
for part_i in range(n_parts):
cat_part = str(_part_mat[_obj_ii][3][0][part_i][0][0])
mask_id = self.cat_part[str(self.human_parts_category)][cat_part]
mask = _part_mat[_obj_ii][3][0][part_i][1].astype(bool)
_target[mask] = mask_id
if _target is not None:
_target, _inst_mask = _target.astype(np.float32), _inst_mask.astype(np.float32)
else:
_target, _inst_mask = np.zeros((512, 512), dtype=np.float32), np.zeros((512, 512), dtype=np.float32)
return _target, _inst_mask
else:
return np.zeros((512, 512), dtype=np.float32), np.zeros((512, 512), dtype=np.float32)
def _load_semseg(self, index):
_semseg = np.array(Image.open(self.semsegs[index])).astype(np.float32)
return _semseg
def _load_normals_distilled(self, index):
_tmp = np.array(Image.open(self.normals[index])).astype(np.float32)
_tmp = 2.0 * _tmp / 255.0 - 1.0
labels = sio.loadmat(os.path.join(self.root, 'pascal-context', 'trainval', self.im_ids[index] + '.mat'))
labels = labels['LabelMap']
_normals = np.zeros(_tmp.shape, dtype=np.float)
for x in np.unique(labels):
if x in self.normals_valid_classes:
_normals[labels == x, :] = _tmp[labels == x, :]
return _normals
def _load_sal_distilled(self, index):
_sal = np.array(Image.open(self.sals[index])).astype(np.float32) / 255.
_sal = (_sal > 0.5).astype(np.float32)
return _sal
def _get_semseg_fname(self, fname):
fname_voc = os.path.join(self.root, 'semseg', 'VOC12', fname + '.png')
fname_context = os.path.join(self.root, 'semseg', 'pascal-context', fname + '.png')
if os.path.isfile(fname_voc):
seg = fname_voc
elif os.path.isfile(fname_context):
seg = fname_context
else:
seg = None
print('Segmentation for im: {} was not found'.format(fname))
return seg
def _check_preprocess_parts(self):
_obj_list_file = self.parts_file
if not os.path.isfile(_obj_list_file):
return False
else:
self.part_obj_dict = json.load(open(_obj_list_file, 'r'))
return list(np.sort([str(x) for x in self.part_obj_dict.keys()])) == list(np.sort(self.im_ids))
def _preprocess_parts(self):
self.part_obj_dict = {}
obj_counter = 0
for ii in range(len(self.im_ids)):
# Read object masks and get number of objects
if ii % 100 == 0:
print("Processing image: {}".format(ii))
part_mat = sio.loadmat(
os.path.join(self.root, 'human_parts', '{}.mat'.format(self.im_ids[ii])))
n_obj = len(part_mat['anno'][0][0][1][0])
# Get the categories from these objects
_cat_ids = []
for jj in range(n_obj):
obj_area = np.sum(part_mat['anno'][0][0][1][0][jj][2])
obj_cat = int(part_mat['anno'][0][0][1][0][jj][1])
if obj_area > self.area_thres:
_cat_ids.append(int(part_mat['anno'][0][0][1][0][jj][1]))
else:
_cat_ids.append(-1)
obj_counter += 1
self.part_obj_dict[self.im_ids[ii]] = _cat_ids
with open(self.parts_file, 'w') as outfile:
outfile.write('{{\n\t"{:s}": {:s}'.format(self.im_ids[0], json.dumps(self.part_obj_dict[self.im_ids[0]])))
for ii in range(1, len(self.im_ids)):
outfile.write(
',\n\t"{:s}": {:s}'.format(self.im_ids[ii], json.dumps(self.part_obj_dict[self.im_ids[ii]])))
outfile.write('\n}\n')
print('Preprocessing for parts finished')
def _download(self):
_fpath = os.path.join(Path.db_root_dir(), self.FILE)
if os.path.isfile(_fpath):
print('Files already downloaded')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.db_root_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def __str__(self):
return 'PASCAL_MT(split=' + str(self.split) + ')'
def test_all():
import matplotlib.pyplot as plt
import torch
import fblib.dataloaders.custom_transforms as tr
from torchvision import transforms
from fblib.util.custom_collate import collate_mil
transform = transforms.Compose([tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-90, 90), scales=(1., 1.),
flagvals={'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'human_parts': cv2.INTER_NEAREST,
'normals': cv2.INTER_CUBIC,
'sal': cv2.INTER_NEAREST}),
tr.FixedResize(resolutions={'image': (512, 512),
'edge': (512, 512),
'semseg': (512, 512),
'human_parts': (512, 512),
'normals': (512, 512),
'sal': (512, 512)},
flagvals={'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'human_parts': cv2.INTER_NEAREST,
'normals': cv2.INTER_CUBIC,
'sal': cv2.INTER_NEAREST}),
tr.AddIgnoreRegions(),
tr.ToTensor()])
dataset = PASCALContext(split='train', transform=transform, retname=True,
do_edge=True,
do_semseg=True,
do_human_parts=True,
do_normals=True,
do_sal=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=False, num_workers=0)
# plt.ion()
for i, sample in enumerate(dataloader):
print(i)
for j in range(sample['image'].shape[0]):
f, ax_arr = plt.subplots(2, 3)
for k in range(len(ax_arr)):
for l in range(len(ax_arr[k])):
ax_arr[k][l].cla()
ax_arr[0][0].set_title('Input Image')
ax_arr[0][0].imshow(np.transpose(sample['image'][j], (1, 2, 0))/255.)
ax_arr[0][1].set_title('Edge')
ax_arr[0][1].imshow(np.transpose(sample['edge'][j], (1, 2, 0))[:, :, 0])
ax_arr[0][2].set_title('Semantic Segmentation')
ax_arr[0][2].imshow(np.transpose(sample['semseg'][j], (1, 2, 0))[:, :, 0] / 20.)
ax_arr[1][0].set_title('Human Part Segmentation')
ax_arr[1][0].imshow(np.transpose(sample['human_parts'][j], (1, 2, 0))[:, :, 0] / 6.)
ax_arr[1][1].set_title('Surface Normals')
ax_arr[1][1].imshow(np.transpose(sample['normals'][j], (1, 2, 0)))
ax_arr[1][2].set_title('Saliency')
ax_arr[1][2].imshow(np.transpose(sample['sal'][j], (1, 2, 0))[:, :, 0])
plt.show()
break
if __name__ == '__main__':
test_all()
|
astmt-master
|
fblib/dataloaders/pascal_context.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import tarfile
import cv2
from PIL import Image
import numpy as np
import torch.utils.data as data
import scipy.io as sio
from six.moves import urllib
from fblib.util.mypath import Path
class NYUD_MT(data.Dataset):
"""
NYUD dataset for multi-task learning.
Includes edge detection, semantic segmentation, surface normals, and depth prediction
"""
URL = 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL/NYUD_MT.tgz'
FILE = 'NYUD_MT.tgz'
def __init__(self,
root=Path.db_root_dir('NYUD_MT'),
download=True,
split='val',
transform=None,
retname=True,
overfit=False,
do_edge=True,
do_semseg=False,
do_normals=False,
do_depth=False,
):
self.root = root
if download:
self._download()
self.transform = transform
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.retname = retname
# Original Images
self.im_ids = []
self.images = []
_image_dir = os.path.join(root, 'images')
# Edge Detection
self.do_edge = do_edge
self.edges = []
_edge_gt_dir = os.path.join(root, 'edge')
# Semantic segmentation
self.do_semseg = do_semseg
self.semsegs = []
_semseg_gt_dir = os.path.join(root, 'segmentation')
# Surface Normals
self.do_normals = do_normals
self.normals = []
_normal_gt_dir = os.path.join(root, 'normals')
# Depth
self.do_depth = do_depth
self.depths = []
_depth_gt_dir = os.path.join(root, 'depth')
# train/val/test splits are pre-cut
_splits_dir = os.path.join(root, 'gt_sets')
print('Initializing dataloader for NYUD {} set'.format(''.join(self.split)))
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), 'r') as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
# Images
_image = os.path.join(_image_dir, line + '.jpg')
assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
# Edges
_edge = os.path.join(self.root, _edge_gt_dir, line + '.png')
assert os.path.isfile(_edge)
self.edges.append(_edge)
# Semantic Segmentation
_semseg = os.path.join(self.root, _semseg_gt_dir, line + '.mat')
assert os.path.isfile(_semseg)
self.semsegs.append(_semseg)
_normal = os.path.join(self.root, _normal_gt_dir, line + '.jpg')
assert os.path.isfile(_normal)
self.normals.append(_normal)
_depth = os.path.join(self.root, _depth_gt_dir, line + '.mat')
assert os.path.isfile(_depth)
self.depths.append(_depth)
if self.do_edge:
assert (len(self.images) == len(self.edges))
if self.do_semseg:
assert (len(self.images) == len(self.semsegs))
if self.do_normals:
assert (len(self.images) == len(self.normals))
if self.do_depth:
assert (len(self.images) == len(self.depths))
# Uncomment to overfit to one image
if overfit:
n_of = 64
self.images = self.images[:n_of]
self.im_ids = self.im_ids[:n_of]
# Display stats
print('Number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
# if index == 1102:
# print('hi')
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_edge:
_edge = self._load_edge(index)
if _edge.shape != _img.shape[:2]:
_edge = cv2.resize(_edge, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['edge'] = _edge
if self.do_semseg:
_semseg = self._load_semseg(index)
if _semseg.shape != _img.shape[:2]:
_semseg = cv2.resize(_semseg, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['semseg'] = _semseg
if self.do_normals:
_normals = self._load_normals(index)
if _normals.shape[:2] != _img.shape[:2]:
_normals = cv2.resize(_normals, _img.shape[:2][::-1], interpolation=cv2.INTER_CUBIC)
sample['normals'] = _normals
if self.do_depth:
_depth = self._load_depth(index)
if _depth.shape[:2] != _img.shape[:2]:
_depth = cv2.resize(_depth, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['depth'] = _depth
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.images)
def _load_img(self, index):
_img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32)
return _img
def _load_edge(self, index):
_edge = np.array(Image.open(self.edges[index])).astype(np.float32) / 255.
return _edge
def _load_semseg(self, index):
# Note: Related works are ignoring the background class (40-way classification), such as:
# _semseg = np.array(sio.loadmat(self.semsegs[index])['segmentation']).astype(np.float32) - 1
# _semseg[_semseg == -1] = 255
# However, all experiments of ASTMT were conducted by using 41-way classification:
_semseg = np.array(sio.loadmat(self.semsegs[index])['segmentation']).astype(np.float32)
return _semseg
def _load_normals(self, index):
_tmp = np.array(Image.open(self.normals[index])).astype(np.float32)
_normals = 2.0 * _tmp / 255.0 - 1.0
return _normals
def _load_depth(self, index):
_depth = np.array(sio.loadmat(self.depths[index])['depth']).astype(np.float32)
return _depth
def _download(self):
_fpath = os.path.join(Path.db_root_dir(), self.FILE)
if os.path.isfile(_fpath):
print('Files already downloaded')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.db_root_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def __str__(self):
return 'NYUD Multitask (split=' + str(self.split) + ')'
class NYUDRaw(data.Dataset):
"""
NYUD dataset for Surface Normal and Depth Estimation using NYUD raw data.
"""
def __init__(self,
root=Path.db_root_dir('NYUD_raw'),
split='train',
transform=None,
do_normals=True,
do_depth=False,
retname=True,
overfit=False,
):
self.root = root
self.transform = transform
self.split = split
self.retname = retname
self.do_normals = do_normals
self.do_depth = do_depth
# Original Images
self.im_ids = []
self.images = []
_image_dir = os.path.join(root, self.split, 'images')
_mask_gt_dir = os.path.join(root, self.split, 'masks')
# Surface Normals
self.normals = []
nrm_ext = '.png' if self.split == 'train' else '.jpg'
self.masks = []
_normal_gt_dir = os.path.join(root, self.split, 'normals')
# Monocular depth
self.depths = []
_depth_gt_dir = os.path.join(root, self.split, 'depth')
# train/val/test splits are pre-cut
_splits_dir = os.path.join(root, 'gt_sets')
print('Initializing dataloader for NYUD Raw, {} set'.format(self.split))
with open(os.path.join(os.path.join(_splits_dir, self.split + '.txt')), 'r') as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
# Images
_image = os.path.join(_image_dir, line + '.jpg')
assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
if self.do_normals:
# Normals
_normal = os.path.join(self.root, _normal_gt_dir, line + nrm_ext)
assert os.path.isfile(_normal)
self.normals.append(_normal)
if self.do_depth:
# Depth
_depth = os.path.join(self.root, _depth_gt_dir, line + '.mat')
assert os.path.isfile(_depth)
self.depths.append(_depth)
if self.split == 'train':
# Masks (only available for train data)
_mask = os.path.join(self.root, _mask_gt_dir, line + '.png')
assert os.path.isfile(_mask)
self.masks.append(_mask)
if self.do_normals:
assert(len(self.images) == len(self.normals))
if self.do_depth:
assert(len(self.images) == len(self.depths))
if self.split == 'train':
assert(len(self.images) == len(self.masks))
# uncomment to overfit to one image
if overfit:
n_of = 64
self.images = self.images[:n_of]
self.im_ids = self.im_ids[:n_of]
# display stats
print('number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_normals:
_normals = self._load_normals(index)
sample['normals'] = _normals
if self.do_depth:
_depth = self._load_depth(index)
sample['depth'] = _depth
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def _load_img(self, index):
_img = cv2.imread(self.images[index])[:, :, ::-1].astype(np.float32)
return _img
def _load_normals(self, index):
_tmp = cv2.imread(self.normals[index])[:, :, ::-1].astype(np.float32)
_normals = 2.0 * _tmp / 255.0 - 1.0
if self.split == 'train':
_mask = cv2.imread(self.masks[index], 0)
_normals[_mask == 0, :] = 0
return _normals
def _load_depth(self, index):
_depth = np.array(sio.loadmat(self.depths[index])['depth']).astype(np.float32)
if self.split == 'train':
_mask = cv2.imread(self.masks[index], 0)
_depth[_mask == 0] = 0
return _depth
def __len__(self):
return len(self.images)
def __str__(self):
return 'NYUD-v2 Raw,split=' + str(self.split) + ')'
def test_mt():
transform = transforms.Compose([tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-90, 90), scales=(1., 1.),
flagvals={'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'normals': cv2.INTER_LINEAR,
'depth': cv2.INTER_LINEAR}),
tr.FixedResize(resolutions={'image': (512, 512),
'edge': (512, 512),
'semseg': (512, 512),
'normals': (512, 512),
'depth': (512, 512)},
flagvals={'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'normals': cv2.INTER_LINEAR,
'depth': cv2.INTER_LINEAR}),
tr.AddIgnoreRegions(),
tr.ToTensor()])
dataset = NYUD_MT(split='train', transform=transform, retname=True,
do_edge=True,
do_semseg=True,
do_normals=True,
do_depth=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=5, shuffle=False, num_workers=5)
for i, sample in enumerate(dataloader):
imshow(sample['image'][0, 0])
show()
imshow(sample['edge'][0, 0])
show()
imshow(sample['semseg'][0, 0])
show()
imshow(sample['normals'][0, 0])
show()
imshow(sample['depth'][0, 0])
show()
if __name__ == '__main__':
from matplotlib.pyplot import imshow, show
import torch
import fblib.dataloaders.custom_transforms as tr
from torchvision import transforms
test_mt()
|
astmt-master
|
fblib/dataloaders/nyud.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy.random as random
import numpy as np
import torch
import cv2
import math
import fblib.util.helpers as helpers
class ScaleNRotate(object):
"""Scale (zoom-in, zoom-out) and Rotate the image and the ground truth.
Args:
two possibilities:
1. rots (tuple): (minimum, maximum) rotation angle
scales (tuple): (minimum, maximum) scale
2. rots [list]: list of fixed possible rotation angles
scales [list]: list of fixed possible scales
"""
def __init__(self, rots=(-30, 30), scales=(.75, 1.25), semseg=False, flagvals=None):
assert (isinstance(rots, type(scales)))
self.rots = rots
self.scales = scales
self.semseg = semseg
self.flagvals = flagvals
def __call__(self, sample):
if type(self.rots) == tuple:
# Continuous range of scales and rotations
rot = (self.rots[1] - self.rots[0]) * random.random() - \
(self.rots[1] - self.rots[0])/2
sc = (self.scales[1] - self.scales[0]) * random.random() - \
(self.scales[1] - self.scales[0]) / 2 + 1
elif type(self.rots) == list:
# Fixed range of scales and rotations
rot = self.rots[random.randint(0, len(self.rots))]
sc = self.scales[random.randint(0, len(self.scales))]
for elem in sample.keys():
if 'meta' in elem:
continue
tmp = sample[elem]
h, w = tmp.shape[:2]
center = (w / 2, h / 2)
assert(center != 0) # Strange behaviour warpAffine
M = cv2.getRotationMatrix2D(center, rot, sc)
if self.flagvals is None:
if ((tmp == 0) | (tmp == 1)).all():
flagval = cv2.INTER_NEAREST
elif 'gt' in elem and self.semseg:
flagval = cv2.INTER_NEAREST
else:
flagval = cv2.INTER_CUBIC
else:
flagval = self.flagvals[elem]
if elem == 'normals':
# Rotate Normals properly
in_plane = np.arctan2(tmp[:, :, 0], tmp[:, :, 1])
nrm_0 = np.sqrt(tmp[:, :, 0] ** 2 + tmp[:, :, 1] ** 2)
rot_rad= rot * 2 * math.pi / 360
tmp[:, :, 0] = np.sin(in_plane + rot_rad) * nrm_0
tmp[:, :, 1] = np.cos(in_plane + rot_rad) * nrm_0
tmp = cv2.warpAffine(tmp, M, (w, h), flags=flagval)
sample[elem] = tmp
return sample
def __str__(self):
return 'ScaleNRotate:(rot='+str(self.rots)+',scale='+str(self.scales)+')'
class FixedResize(object):
"""Resize the image and the ground truth to specified resolution.
Args:
resolutions (dict): the list of resolutions
"""
def __init__(self, resolutions=None, flagvals=None):
self.resolutions = resolutions
self.flagvals = flagvals
if self.flagvals is not None:
assert(len(self.resolutions) == len(self.flagvals))
def __call__(self, sample):
# Fixed range of scales
if self.resolutions is None:
return sample
elems = list(sample.keys())
for elem in elems:
if 'meta' in elem or 'bbox' in elem:
continue
if elem in self.resolutions:
if self.resolutions[elem] is None:
continue
if isinstance(sample[elem], list):
if sample[elem][0].ndim == 3:
output_size = np.append(self.resolutions[elem], [3, len(sample[elem])])
else:
output_size = np.append(self.resolutions[elem], len(sample[elem]))
tmp = sample[elem]
sample[elem] = np.zeros(output_size, dtype=np.float32)
for ii, crop in enumerate(tmp):
if self.flagvals is None:
sample[elem][..., ii] = helpers.fixed_resize(crop, self.resolutions[elem])
else:
sample[elem][..., ii] = helpers.fixed_resize(crop, self.resolutions[elem], flagval=self.flagvals[elem])
else:
if self.flagvals is None:
sample[elem] = helpers.fixed_resize(sample[elem], self.resolutions[elem])
else:
sample[elem] = helpers.fixed_resize(sample[elem], self.resolutions[elem], flagval=self.flagvals[elem])
if elem == 'normals':
N1, N2, N3 = sample[elem][:, :, 0], sample[elem][:, :, 1], sample[elem][:, :, 2]
Nn = np.sqrt(N1 ** 2 + N2 ** 2 + N3 ** 2) + np.finfo(np.float32).eps
sample[elem][:, :, 0], sample[elem][:, :, 1], sample[elem][:, :, 2] = N1/Nn, N2/Nn, N3/Nn
else:
del sample[elem]
return sample
def __str__(self):
return 'FixedResize:'+str(self.resolutions)
class RandomResize(object):
"""Randomly resize the image and the ground truth to specified scales.
Args:
scales (list): the list of scales
"""
def __init__(self, scales=[0.5, 0.8, 1]):
self.scales = scales
def __call__(self, sample):
# Fixed range of scales
sc = self.scales[random.randint(0, len(self.scales))]
for elem in sample.keys():
if 'meta' in elem or 'bbox' in elem:
continue
tmp = sample[elem]
if ((tmp == 0) | (tmp == 1)).all():
flagval = cv2.INTER_NEAREST
else:
flagval = cv2.INTER_CUBIC
tmp = cv2.resize(tmp, None, fx=sc, fy=sc, interpolation=flagval)
sample[elem] = tmp
return sample
def __str__(self):
return 'RandomResize:'+str(self.scales)
class FixedResizeRatio(object):
"""Fixed resize for the image and the ground truth to specified scale.
Args:
scales (float): the scale
"""
def __init__(self, scale=None, flagvals=None):
self.scale = scale
self.flagvals = flagvals
def __call__(self, sample):
for elem in sample.keys():
if 'meta' in elem:
continue
if elem in self.flagvals:
if self.flagvals[elem] is None:
continue
tmp = sample[elem]
tmp = cv2.resize(tmp, None, fx=self.scale, fy=self.scale, interpolation=self.flagvals[elem])
sample[elem] = tmp
return sample
def __str__(self):
return 'FixedResizeRatio: '+str(self.scale)
class RandomHorizontalFlip(object):
"""Horizontally flip the given image and ground truth randomly with a probability of 0.5."""
def __call__(self, sample):
if random.random() < 0.5:
for elem in sample.keys():
if 'meta' in elem:
continue
else:
tmp = sample[elem]
tmp = cv2.flip(tmp, flipCode=1)
sample[elem] = tmp
if elem == 'normals':
sample[elem][:, :, 0] *= -1
return sample
def __str__(self):
return 'RandomHorizontalFlip'
class NormalizeImage(object):
"""
Return the given elements between 0 and 1
"""
def __init__(self, norm_elem='image', clip=False):
self.norm_elem = norm_elem
self.clip = clip
def __call__(self, sample):
if isinstance(self.norm_elem, tuple):
for elem in self.norm_elem:
if np.max(sample[elem]) > 1:
sample[elem] /= 255.0
else:
if self.clip:
sample[self.norm_elem] = np.clip(sample[self.norm_elem], 0, 255)
if np.max(sample[self.norm_elem]) > 1:
sample[self.norm_elem] /= 255.0
return sample
def __str__(self):
return 'NormalizeImage'
class ToImage(object):
"""
Return the given elements between 0 and 255
"""
def __init__(self, norm_elem='image', custom_max=255.):
self.norm_elem = norm_elem
self.custom_max = custom_max
def __call__(self, sample):
if isinstance(self.norm_elem, tuple):
for elem in self.norm_elem:
tmp = sample[elem]
sample[elem] = self.custom_max * (tmp - tmp.min()) / (tmp.max() - tmp.min() + 1e-10)
else:
tmp = sample[self.norm_elem]
sample[self.norm_elem] = self.custom_max * (tmp - tmp.min()) / (tmp.max() - tmp.min() + 1e-10)
return sample
def __str__(self):
return 'NormalizeImage'
class AddIgnoreRegions(object):
"""Add Ignore Regions"""
def __call__(self, sample):
for elem in sample.keys():
tmp = sample[elem]
if elem == 'normals':
# Check areas with norm 0
Nn = np.sqrt(tmp[:, :, 0] ** 2 + tmp[:, :, 1] ** 2 + tmp[:, :, 2] ** 2)
tmp[Nn == 0, :] = 255.
sample[elem] = tmp
elif elem == 'human_parts':
# Check for images without human part annotations
if (tmp == 0).all():
tmp = 255 * np.ones(tmp.shape, dtype=tmp.dtype)
sample[elem] = tmp
elif elem == 'depth':
tmp[tmp == 0] = 255.
sample[elem] = tmp
return sample
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
for elem in sample.keys():
if 'meta' in elem:
continue
elif 'bbox' in elem:
tmp = sample[elem]
sample[elem] = torch.from_numpy(tmp)
continue
tmp = sample[elem]
if tmp.ndim == 2:
tmp = tmp[:, :, np.newaxis]
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
tmp = tmp.transpose((2, 0, 1))
sample[elem] = torch.from_numpy(tmp.astype(np.float32))
return sample
def __str__(self):
return 'ToTensor'
|
astmt-master
|
fblib/dataloaders/custom_transforms.py
|
from .bsds import BSDS500
from .coco import COCOSegmentation
from .fsv import FSVGTA
from .nyud import NYUD_MT, NYUDRaw
from .pascal_context import PASCALContext
from .pascal_voc import VOC12
from .sbd import SBD
from .msra10k import MSRA
from .pascal_sal import PASCALS
__all__ = ['BSDS500', 'COCOSegmentation', 'FSVGTA', 'NYUD_MT',
'NYUDRaw', 'PASCALContext', 'VOC12', 'SBD', 'MSRA', 'PASCALS']
|
astmt-master
|
fblib/dataloaders/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import errno
import cv2
import hashlib
import tarfile
import numpy as np
import scipy.io as sio
import torch.utils.data as data
from PIL import Image
from six.moves import urllib
from fblib.util.mypath import Path
class SBD(data.Dataset):
URL = 'http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz'
FILE = 'benchmark.tgz'
MD5 = '82b4d87ceb2ed10f6038a1cba92111cb'
BASE_DIR = 'benchmark_RELEASE/dataset'
VOC_CATEGORY_NAMES = ['background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
def __init__(self,
root=Path.db_root_dir('PASCAL'),
download=True,
split='val',
transform=None,
retname=True,
do_semseg=True,
overfit=False,
):
self.root = root
_sbd_root = os.path.join(self.root, self.BASE_DIR)
_inst_dir = os.path.join(_sbd_root, 'inst')
_cat_dir = os.path.join(_sbd_root, 'cls')
_image_dir = os.path.join(_sbd_root, 'img')
if download:
self._download()
self.transform = transform
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.retname = retname
self.do_semseg = do_semseg
if self.do_semseg:
self.semsegs = []
# train/val/test splits are pre-cut
_splits_dir = os.path.join(_sbd_root)
self.im_ids = []
self.images = []
print("Initializing dataloader for SBD {} set".format(''.join(self.split)))
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
# Images
_image = os.path.join(_image_dir, line + ".jpg")
assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
# Semantic Segmentation
if self.do_semseg:
_semseg = os.path.join(_cat_dir, line + '.mat')
assert os.path.isfile(_semseg)
self.semsegs.append(_semseg)
if self.do_semseg:
assert (len(self.images) == len(self.semsegs))
# Uncomment to overfit to one image
if overfit:
n_of = 32
self.im_ids = self.im_ids[:n_of]
self.images = self.images[:n_of]
if self.do_semseg:
self.semsegs = self.semsegs[:n_of]
# Display stats
print('Number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
# if index == 1102:
# print('hi')
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_semseg:
_semseg = self._load_semseg(index)
if _semseg is not None:
if _semseg.shape != _img.shape[:2]:
_semseg = cv2.resize(_semseg, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['semseg'] = _semseg
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.images)
def _load_img(self, index):
_img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32)
return _img
def _load_semseg(self, index):
_semseg = sio.loadmat(self.semsegs[index])['GTcls'][0][0][1]
_semseg = np.array(_semseg).astype(np.float32)
return _semseg
def _check_integrity(self):
_fpath = os.path.join(self.root, self.FILE)
if not os.path.isfile(_fpath):
print("{} does not exist".format(_fpath))
return False
_md5c = hashlib.md5(open(_fpath, 'rb').read()).hexdigest()
if _md5c != self.MD5:
print(" MD5({}) did not match MD5({}) expected for {}".format(
_md5c, self.MD5, _fpath))
return False
return True
def _download(self):
_fpath = os.path.join(self.root, self.FILE)
try:
os.makedirs(self.root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
if self._check_integrity():
print('Files already downloaded and verified')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting the tar file')
tar = tarfile.open(_fpath)
os.chdir(self.root)
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def get_img_size(self, idx=0):
img = Image.open(os.path.join(self.root, 'JPEGImages', self.images[idx] + '.jpg'))
return list(reversed(img.size))
def __str__(self):
return 'SBD(split=' + str(self.split) + ')'
if __name__ == '__main__':
from matplotlib import pyplot as plt
dataset = SBD(split=['train', 'val'], retname=True, do_semseg=True)
for i, sample in enumerate(dataset):
plt.imshow(sample['image']/255.)
plt.show()
plt.imshow(sample['semseg'])
plt.show()
|
astmt-master
|
fblib/dataloaders/sbd.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import print_function
import torch.utils.data as data
from PIL import Image
import os
import os.path
import errno
import numpy as np
import torch
import codecs
import random
from fblib.util.mypath import Path
class MNIST(data.Dataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
urls = [
'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
raw_folder = 'raw'
processed_folder = 'processed'
training_file = 'training.pt'
test_file = 'test.pt'
def __init__(self, root=Path.db_root_dir('MNIST'), train=True, transform=None, target_transform=None, download=False,
multitask=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.multitask = multitask
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
self.train_data, self.train_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.training_file))
else:
self.test_data, self.test_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.test_file))
if multitask:
self._process_labels()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if not self.multitask:
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
else:
if self.train:
img, target, orig = self.train_data[index], self.train_labels_multitask[index], self.train_labels[index]
else:
img, target, orig = self.test_data[index], self.test_labels_multitask[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if not self.multitask:
return img, target
else:
return img, target, orig
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def _process_labels(self):
elem = self.train_labels if self.train else self.test_labels
temp = [[0] * 2 for i in range(len(elem))]
for i in range(len(elem)):
# Create two conflicting tasks
if elem[i] >= 5:
temp[i][0] = 1
if elem[i] % 2 == 1:
temp[i][1] = 1
if self.train:
self.train_labels_multitask = temp
else:
self.test_labels_multitask = temp
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \
os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
from six.moves import urllib
import gzip
if self._check_exists():
return
# download files
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
for url in self.urls:
print('Downloading ' + url)
data = urllib.request.urlopen(url)
filename = url.rpartition('/')[2]
file_path = os.path.join(self.root, self.raw_folder, filename)
with open(file_path, 'wb') as f:
f.write(data.read())
with open(file_path.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(file_path) as zip_f:
out_f.write(zip_f.read())
os.unlink(file_path)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.root, self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.root, self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.root, self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.root, self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.root, self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.root, self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class FashionMNIST(MNIST):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
class EMNIST(MNIST):
"""`EMNIST <https://www.nist.gov/itl/iad/image-group/emnist-dataset/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
split (string): The dataset has 6 different splits: ``byclass``, ``bymerge``,
``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies
which one to use.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
url = 'http://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip'
splits = ('byclass', 'bymerge', 'balanced', 'letters', 'digits', 'mnist')
def __init__(self, root, split, **kwargs):
if split not in self.splits:
raise ValueError('Split "{}" not found. Valid splits are: {}'.format(
split, ', '.join(self.splits),
))
self.split = split
self.training_file = self._training_file(split)
self.test_file = self._test_file(split)
super(EMNIST, self).__init__(root, **kwargs)
def _training_file(self, split):
return 'training_{}.pt'.format(split)
def _test_file(self, split):
return 'test_{}.pt'.format(split)
def download(self):
"""Download the EMNIST data if it doesn't exist in processed_folder already."""
from six.moves import urllib
import gzip
import shutil
import zipfile
if self._check_exists():
return
# download files
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
print('Downloading ' + self.url)
data = urllib.request.urlopen(self.url)
filename = self.url.rpartition('/')[2]
raw_folder = os.path.join(self.root, self.raw_folder)
file_path = os.path.join(raw_folder, filename)
with open(file_path, 'wb') as f:
f.write(data.read())
print('Extracting zip archive')
with zipfile.ZipFile(file_path) as zip_f:
zip_f.extractall(raw_folder)
os.unlink(file_path)
gzip_folder = os.path.join(raw_folder, 'gzip')
for gzip_file in os.listdir(gzip_folder):
if gzip_file.endswith('.gz'):
print('Extracting ' + gzip_file)
with open(os.path.join(raw_folder, gzip_file.replace('.gz', '')), 'wb') as out_f, \
gzip.GzipFile(os.path.join(gzip_folder, gzip_file)) as zip_f:
out_f.write(zip_f.read())
shutil.rmtree(gzip_folder)
# process and save as torch files
for split in self.splits:
print('Processing ' + split)
training_set = (
read_image_file(os.path.join(raw_folder, 'emnist-{}-train-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(raw_folder, 'emnist-{}-train-labels-idx1-ubyte'.format(split)))
)
test_set = (
read_image_file(os.path.join(raw_folder, 'emnist-{}-test-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(raw_folder, 'emnist-{}-test-labels-idx1-ubyte'.format(split)))
)
with open(os.path.join(self.root, self.processed_folder, self._training_file(split)), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.root, self.processed_folder, self._test_file(split)), 'wb') as f:
torch.save(test_set, f)
print('Done!')
class MultiMNIST(MNIST):
def __init__(self, root=Path.db_root_dir('MNIST'), train=True, transform=None, target_transform=None, download=False):
super(MultiMNIST, self).__init__(root, train, transform, target_transform, download, multitask=False)
def __getitem__(self, index1):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
image_lst = self.train_data
target_lst = self.train_labels
else:
image_lst = self.test_data
target_lst = self.test_labels
if self.train:
index2 = random.randint(0, self.__len__() - 1)
else:
index2 = self.__len__() - 1 - index1
img1, target1 = image_lst[index1], target_lst[index1]
img2, target2 = image_lst[index2], target_lst[index2]
shift = 2
img = torch.zeros(img1.size())
img[:28-shift, :28-shift] = img1[shift:, shift:]
img[shift:, shift:] = torch.max(img[shift:, shift:], img2[:28-shift, :28-shift].float())
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy().astype(np.uint8), mode='L')
if self.transform is not None:
img = self.transform(img)
return img, target1, target2
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def read_label_file(path):
with open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2049
length = get_int(data[4:8])
parsed = np.frombuffer(data, dtype=np.uint8, offset=8)
return torch.from_numpy(parsed).view(length).long()
def read_image_file(path):
with open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2051
length = get_int(data[4:8])
num_rows = get_int(data[8:12])
num_cols = get_int(data[12:16])
images = []
parsed = np.frombuffer(data, dtype=np.uint8, offset=16)
return torch.from_numpy(parsed).view(length, num_rows, num_cols)
if __name__ == '__main__':
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from matplotlib import pyplot as plt
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
db_train = MultiMNIST(train=True, transform=trans, download=True)
db_test = MultiMNIST(train=False, transform=trans, download=True)
trainloader = DataLoader(db_train, batch_size=64, shuffle=True, num_workers=2, pin_memory=True)
for ii, (img, label1, label2) in enumerate(trainloader):
plt.imshow(img[0, 0, :, :])
plt.show()
|
astmt-master
|
fblib/dataloaders/mnist_multitask.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import tarfile
from PIL import Image
import numpy as np
from glob import glob
import scipy.io as sio
import torch.utils.data as data
from six.moves import urllib
from fblib.util.mypath import Path
class BSDS500(data.Dataset):
"""
BSDS500 datasets for edge detection.
"""
URL = 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL/BSDS500.tgz'
FILE = 'BSDS500.tgz'
def __init__(self,
root=Path.db_root_dir('BSDS500'),
download=True,
split=['train', 'val'],
transform=None,
retname=True,
n_votes=1,
overfit=False):
if download:
self._download()
self.transform = transform
self.retname = retname
self.n_votes = n_votes
self.root = root
self.gt_dir = os.path.join(self.root, 'data', 'groundTruth')
self.image_dir = os.path.join(self.root, 'data', 'images')
_splits_dir = os.path.join(self.root, 'lists')
if not os.path.exists(os.path.join(_splits_dir)):
os.mkdir(os.path.join(_splits_dir))
self.split = split
self._get_images_trainval()
if isinstance(self.split, str):
self.split = [self.split]
self.images = []
self.gts = []
self.im_ids = []
for sp in self.split:
with open(os.path.join(os.path.join(_splits_dir, sp + '.txt')), "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
_image = os.path.join(self.image_dir, sp, line + ".jpg")
_gt = os.path.join(self.gt_dir, sp, line + ".mat")
assert os.path.isfile(_image)
assert os.path.isfile(_gt)
self.im_ids.append(line)
self.images.append(_image)
self.gts.append(_gt)
assert (len(self.images) == len(self.gts) == len(self.im_ids))
if overfit:
n_of = 16
self.images = self.images[:n_of]
self.gts = self.gts[:n_of]
self.im_ids = self.im_ids[:n_of]
# Display stats
print('Number of images: {:d}'.format(len(self.im_ids)))
def __getitem__(self, index):
sample = {}
_img = self._load_img(index)
sample['image'] = _img
_edge = self._load_edge(index)
sample['edge'] = _edge
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.im_ids)
def _get_images_trainval(self):
for sp in self.split:
if os.path.isfile(os.path.join(self.root, 'lists', sp + '.txt')):
continue
img_list = glob(os.path.join(self.gt_dir, sp, '*.mat'))
img_list = sorted([x.split('/')[-1].split('.')[-2] for x in img_list])
split_f = os.path.join(self.root, 'lists', sp + '.txt')
with open(split_f, 'w') as f:
for img in img_list:
assert os.path.isfile(os.path.join(self.image_dir, sp, img + '.jpg'))
f.write('{}\n'.format(img))
def _load_img(self, index):
# Read Image
_img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32)
return _img
def _load_edge(self, index):
# Read Target object
_gt_mat = sio.loadmat(self.gts[index])
_target = np.zeros(_gt_mat['groundTruth'][0][0]['Boundaries'][0][0].shape)
for i in range(len(_gt_mat['groundTruth'][0])):
_target += _gt_mat['groundTruth'][0][i]['Boundaries'][0][0]
if self.n_votes and self.n_votes > 0:
_target = (_target >= self.n_votes).astype(np.float32)
else:
_target = (_target / max(1e-8, _target.max())).astype(np.float32)
return _target
def _download(self):
_fpath = os.path.join(Path.db_root_dir(), self.FILE)
if os.path.isfile(_fpath):
print('Files already downloaded')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.db_root_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def __str__(self):
return 'BSDS500(split=' + str(self.split) + ', n_votes=' + str(self.n_votes) + ')'
if __name__ == '__main__':
from matplotlib import pyplot as plt
dataset = BSDS500()
for i, sample in enumerate(dataset):
plt.imshow(sample['image'] / 255.)
plt.show()
plt.imshow(sample['edge'])
plt.show()
|
astmt-master
|
fblib/dataloaders/bsds.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import errno
import cv2
import hashlib
import tarfile
import numpy as np
import torch.utils.data as data
from PIL import Image
from six.moves import urllib
from fblib.util.mypath import Path
class VOC12(data.Dataset):
URL = "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar"
FILE = "VOCtrainval_11-May-2012.tar"
MD5 = '6cd6e144f989b92b3379bac3b3de84fd'
BASE_DIR = 'VOCdevkit/VOC2012'
VOC_CATEGORY_NAMES = ['background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
def __init__(self,
root=Path.db_root_dir('PASCAL'),
download=True,
split='val',
transform=None,
area_thres=0,
retname=True,
suppress_void_pixels=False,
do_semseg=True,
overfit=False,
):
self.root = root
_voc_root = os.path.join(self.root, self.BASE_DIR)
_inst_dir = os.path.join(_voc_root, 'SegmentationObject')
_cat_dir = os.path.join(_voc_root, 'SegmentationClass')
_image_dir = os.path.join(_voc_root, 'JPEGImages')
if download:
self._download()
self.transform = transform
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.area_thres = area_thres
self.retname = retname
self.suppress_void_pixels = suppress_void_pixels
self.do_semseg = do_semseg
if self.do_semseg:
self.semsegs = []
# train/val/test splits are pre-cut
_splits_dir = os.path.join(_voc_root, 'ImageSets', 'Segmentation')
self.im_ids = []
self.images = []
print("Initializing dataloader for PASCAL VOC12 {} set".format(''.join(self.split)))
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
# Images
_image = os.path.join(_image_dir, line + ".jpg")
assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
# Semantic Segmentation
if self.do_semseg:
_semseg = os.path.join(_cat_dir, line + '.png')
assert os.path.isfile(_semseg)
self.semsegs.append(_semseg)
if self.do_semseg:
assert (len(self.images) == len(self.semsegs))
# Uncomment to overfit to one image
if overfit:
n_of = 32
self.im_ids = self.im_ids[:n_of]
self.images = self.images[:n_of]
if self.do_semseg:
self.semsegs = self.semsegs[:n_of]
# Display stats
print('Number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
# if index == 1102:
# print('hi')
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_semseg:
_semseg = self._load_semseg(index)
if _semseg is not None:
if _semseg.shape != _img.shape[:2]:
_semseg = cv2.resize(_semseg, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['semseg'] = _semseg
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.images)
def _check_integrity(self):
_fpath = os.path.join(self.root, self.FILE)
if not os.path.isfile(_fpath):
print("{} does not exist".format(_fpath))
return False
_md5c = hashlib.md5(open(_fpath, 'rb').read()).hexdigest()
if _md5c != self.MD5:
print(" MD5({}) did not match MD5({}) expected for {}".format(
_md5c, self.MD5, _fpath))
return False
return True
def _download(self):
_fpath = os.path.join(self.root, self.FILE)
try:
os.makedirs(self.root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
if self._check_integrity():
print('Files already downloaded and verified')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting the tar file')
tar = tarfile.open(_fpath)
os.chdir(self.root)
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def _load_img(self, index):
_img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32)
return _img
def _load_semseg(self, index):
_semseg = np.array(Image.open(self.semsegs[index])).astype(np.float32)
if self.suppress_void_pixels:
_semseg[_semseg == 255] = 0
return _semseg
def get_img_size(self, idx=0):
img = Image.open(os.path.join(self.root, 'JPEGImages', self.images[idx] + '.jpg'))
return list(reversed(img.size))
def __str__(self):
return 'VOC12(split=' + str(self.split) + ',area_thres=' + str(self.area_thres) + ')'
if __name__ == '__main__':
from matplotlib import pyplot as plt
dataset = VOC12(split='train', retname=True, do_semseg=True, suppress_void_pixels=True)
for i, sample in enumerate(dataset):
plt.imshow(sample['image']/255.)
plt.show()
plt.imshow(sample['semseg'])
plt.show()
|
astmt-master
|
fblib/dataloaders/pascal_voc.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.