repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
VLC-BERT | VLC-BERT-master/vqa/modules/resnet_vlbert_for_vqa.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from external.pytorch_pretrained_bert.modeling import BertPredictionHeadTransform
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_bert import VisualLinguisticBert
BERT_WEIGHTS_NAME = 'pytorch_model.bin'
class ResNetVLBERT(Module):
def __init__(self, config):
super(ResNetVLBERT, self).__init__(config)
self.enable_cnn_reg_loss = config.NETWORK.ENABLE_CNN_REG_LOSS
if not config.NETWORK.BLIND:
self.image_feature_extractor = FastRCNN(config,
average_pool=True,
final_dim=config.NETWORK.IMAGE_FINAL_DIM,
enable_cnn_reg_loss=self.enable_cnn_reg_loss)
if config.NETWORK.VLBERT.object_word_embed_mode == 1:
self.object_linguistic_embeddings = nn.Embedding(81, config.NETWORK.VLBERT.hidden_size)
elif config.NETWORK.VLBERT.object_word_embed_mode == 2:
self.object_linguistic_embeddings = nn.Embedding(1, config.NETWORK.VLBERT.hidden_size)
elif config.NETWORK.VLBERT.object_word_embed_mode == 3:
self.object_linguistic_embeddings = None
else:
raise NotImplementedError
self.image_feature_bn_eval = config.NETWORK.IMAGE_FROZEN_BN
self.tokenizer = BertTokenizer.from_pretrained(config.NETWORK.BERT_MODEL_NAME)
language_pretrained_model_path = None
if config.NETWORK.BERT_PRETRAINED != '':
language_pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.BERT_PRETRAINED,
config.NETWORK.BERT_PRETRAINED_EPOCH)
elif os.path.isdir(config.NETWORK.BERT_MODEL_NAME):
weight_path = os.path.join(config.NETWORK.BERT_MODEL_NAME, BERT_WEIGHTS_NAME)
if os.path.isfile(weight_path):
language_pretrained_model_path = weight_path
self.language_pretrained_model_path = language_pretrained_model_path
if language_pretrained_model_path is None:
print("Warning: no pretrained language model found, training from scratch!!!")
self.vlbert = VisualLinguisticBert(config.NETWORK.VLBERT,
language_pretrained_model_path=language_pretrained_model_path)
# self.hm_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)
# self.hi_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)
dim = config.NETWORK.VLBERT.hidden_size
if config.NETWORK.CLASSIFIER_TYPE == "2fc":
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(dim, config.NETWORK.CLASSIFIER_HIDDEN_SIZE),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(config.NETWORK.CLASSIFIER_HIDDEN_SIZE, config.DATASET.ANSWER_VOCAB_SIZE),
)
elif config.NETWORK.CLASSIFIER_TYPE == "1fc":
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(dim, config.DATASET.ANSWER_VOCAB_SIZE)
)
elif config.NETWORK.CLASSIFIER_TYPE == 'mlm':
transform = BertPredictionHeadTransform(config.NETWORK.VLBERT)
linear = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.DATASET.ANSWER_VOCAB_SIZE)
self.final_mlp = nn.Sequential(
transform,
nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
linear
)
else:
raise ValueError("Not support classifier type: {}!".format(config.NETWORK.CLASSIFIER_TYPE))
# init weights
self.init_weight()
self.fix_params()
def init_weight(self):
# self.hm_out.weight.data.normal_(mean=0.0, std=0.02)
# self.hm_out.bias.data.zero_()
# self.hi_out.weight.data.normal_(mean=0.0, std=0.02)
# self.hi_out.bias.data.zero_()
self.image_feature_extractor.init_weight()
if self.object_linguistic_embeddings is not None:
self.object_linguistic_embeddings.weight.data.normal_(mean=0.0, std=0.02)
for m in self.final_mlp.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.constant_(m.bias, 0)
if self.config.NETWORK.CLASSIFIER_TYPE == 'mlm':
language_pretrained = torch.load(self.language_pretrained_model_path)
mlm_transform_state_dict = {}
pretrain_keys = []
for k, v in language_pretrained.items():
if k.startswith('cls.predictions.transform.'):
pretrain_keys.append(k)
k_ = k[len('cls.predictions.transform.'):]
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
mlm_transform_state_dict[k_] = v
print("loading pretrained classifier transform keys: {}.".format(pretrain_keys))
self.final_mlp[0].load_state_dict(mlm_transform_state_dict)
def train(self, mode=True):
super(ResNetVLBERT, self).train(mode)
# turn some frozen layers to eval mode
if self.image_feature_bn_eval:
self.image_feature_extractor.bn_eval()
def fix_params(self):
pass
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
span_tags_fixed = torch.clamp(span_tags, min=0) # In case there were masked values here
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def prepare_text_from_qa(self, question, question_tags, question_mask, answer, answer_tags, answer_mask):
batch_size, max_q_len = question.shape
_, max_a_len = answer.shape
max_len = (question_mask.sum(1) + answer_mask.sum(1)).max() + 3
cls_id, sep_id = self.tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]'])
q_end = 1 + question_mask.sum(1, keepdim=True)
a_end = q_end + 1 + answer_mask.sum(1, keepdim=True)
input_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
input_mask = torch.ones((batch_size, max_len), dtype=torch.uint8, device=question.device)
input_type_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
text_tags = input_type_ids.new_zeros((batch_size, max_len))
grid_i, grid_j = torch.meshgrid(torch.arange(batch_size, device=question.device),
torch.arange(max_len, device=question.device))
input_mask[grid_j > a_end] = 0
input_type_ids[(grid_j > q_end) & (grid_j <= a_end)] = 1
q_input_mask = (grid_j > 0) & (grid_j < q_end)
a_input_mask = (grid_j > q_end) & (grid_j < a_end)
input_ids[:, 0] = cls_id
input_ids[grid_j == q_end] = sep_id
input_ids[grid_j == a_end] = sep_id
input_ids[q_input_mask] = question[question_mask]
input_ids[a_input_mask] = answer[answer_mask]
text_tags[q_input_mask] = question_tags[question_mask]
text_tags[a_input_mask] = answer_tags[answer_mask]
return input_ids, input_type_ids, text_tags, input_mask, (a_end - 1).squeeze(1)
def train_forward(self,
image,
boxes,
im_info,
question,
label,
):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > - 1.5)
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None)
question_ids = question
question_tags = question.new_zeros(question_ids.shape)
question_mask = (question > 0.5)
answer_ids = question_ids.new_zeros((question_ids.shape[0], 1)).fill_(
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
answer_mask = question_mask.new_zeros(answer_ids.shape).fill_(1)
answer_tags = question_tags.new_zeros(answer_ids.shape)
############################################
# prepare text
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qa(question_ids,
question_tags,
question_mask,
answer_ids,
answer_tags,
answer_mask)
if self.config.NETWORK.NO_GROUNDING:
obj_rep_zeroed = obj_reps['obj_reps'].new_zeros(obj_reps['obj_reps'].shape)
text_tags.zero_()
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_rep_zeroed)
else:
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])
assert self.config.NETWORK.VLBERT.object_word_embed_mode == 2
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
hidden_states, hc = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask,
output_all_encoded_layers=False)
_batch_inds = torch.arange(question.shape[0], device=question.device)
hm = hidden_states[_batch_inds, ans_pos]
# hm = F.tanh(self.hm_out(hidden_states[_batch_inds, ans_pos]))
# hi = F.tanh(self.hi_out(hidden_states[_batch_inds, ans_pos + 2]))
###########################################
outputs = {}
# classifier
# logits = self.final_mlp(hc * hm * hi)
# logits = self.final_mlp(hc)
logits = self.final_mlp(hm)
# loss
ans_loss = F.binary_cross_entropy_with_logits(logits, label) * label.size(1)
outputs.update({'label_logits': logits,
'label': label,
'ans_loss': ans_loss})
loss = ans_loss.mean()
return outputs, loss
def inference_forward(self,
image,
boxes,
im_info,
question):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > - 1.5)
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None)
question_ids = question
question_tags = question.new_zeros(question_ids.shape)
question_mask = (question > 0.5)
answer_ids = question_ids.new_zeros((question_ids.shape[0], 1)).fill_(
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
answer_mask = question_mask.new_zeros(answer_ids.shape).fill_(1)
answer_tags = question_tags.new_zeros(answer_ids.shape)
############################################
# prepare text
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qa(question_ids,
question_tags,
question_mask,
answer_ids,
answer_tags,
answer_mask)
if self.config.NETWORK.NO_GROUNDING:
obj_rep_zeroed = obj_reps['obj_reps'].new_zeros(obj_reps['obj_reps'].shape)
text_tags.zero_()
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_rep_zeroed)
else:
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])
assert self.config.NETWORK.VLBERT.object_word_embed_mode == 2
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
hidden_states, hc = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask,
output_all_encoded_layers=False)
_batch_inds = torch.arange(question.shape[0], device=question.device)
hm = hidden_states[_batch_inds, ans_pos]
# hm = F.tanh(self.hm_out(hidden_states[_batch_inds, ans_pos]))
# hi = F.tanh(self.hi_out(hidden_states[_batch_inds, ans_pos + 2]))
###########################################
outputs = {}
# classifier
# logits = self.final_mlp(hc * hm * hi)
# logits = self.final_mlp(hc)
logits = self.final_mlp(hm)
outputs.update({'label_logits': logits})
return outputs
| 16,341 | 47.064706 | 117 | py |
VLC-BERT | VLC-BERT-master/vqa/data/collate_batch.py | import torch
from common.utils.clip_pad import *
class BatchCollator(object):
def __init__(self, dataset, append_ind=False):
self.dataset = dataset
self.test_mode = self.dataset.test_mode
self.data_names = self.dataset.data_names
self.append_ind = append_ind
def __call__(self, batch):
if not isinstance(batch, list):
batch = list(batch)
if batch[0][self.data_names.index('image')] is not None:
max_shape = tuple(max(s) for s in zip(*[data[self.data_names.index('image')].shape for data in batch]))
image_none = False
else:
image_none = True
max_boxes = max([data[self.data_names.index('boxes')].shape[0] for data in batch])
max_question_length = max([len(data[self.data_names.index('question')]) for data in batch])
for i, ibatch in enumerate(batch):
out = {}
if image_none:
out['image'] = None
else:
image = ibatch[self.data_names.index('image')]
out['image'] = clip_pad_images(image, max_shape, pad=0)
boxes = ibatch[self.data_names.index('boxes')]
out['boxes'] = clip_pad_boxes(boxes, max_boxes, pad=-2)
question = ibatch[self.data_names.index('question')]
out['question'] = clip_pad_1d(question, max_question_length, pad=0)
other_names = [data_name for data_name in self.data_names if data_name not in out]
for name in other_names:
out[name] = torch.as_tensor(ibatch[self.data_names.index(name)])
batch[i] = tuple(out[data_name] for data_name in self.data_names)
if self.append_ind:
batch[i] += (torch.tensor(i, dtype=torch.int64),)
out_tuple = ()
for items in zip(*batch):
if items[0] is None:
out_tuple += (None,)
else:
out_tuple += (torch.stack(tuple(items), dim=0), )
return out_tuple
| 2,035 | 35.357143 | 115 | py |
VLC-BERT | VLC-BERT-master/vqa/data/build.py | import torch.utils.data
from .datasets import *
from . import samplers
from .transforms.build import build_transforms
from .collate_batch import BatchCollator
import pprint
DATASET_CATALOGS = {'vqa': VQA}
def build_dataset(dataset_name, *args, **kwargs):
assert dataset_name in DATASET_CATALOGS, "dataset not in catalogs"
return DATASET_CATALOGS[dataset_name](*args, **kwargs)
def make_data_sampler(dataset, shuffle, distributed, num_replicas, rank):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle, num_replicas=num_replicas, rank=rank)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def make_batch_data_sampler(dataset, sampler, aspect_grouping, batch_size):
if aspect_grouping:
group_ids = dataset.group_ids
batch_sampler = samplers.GroupedBatchSampler(
sampler, group_ids, batch_size, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last=False
)
return batch_sampler
def make_dataloader(cfg, dataset=None, mode='train', distributed=False, num_replicas=None, rank=None,
expose_sampler=False):
assert mode in ['train', 'val', 'test']
if mode == 'train':
ann_file = cfg.DATASET.TRAIN_ANNOTATION_FILE
image_set = cfg.DATASET.TRAIN_IMAGE_SET
aspect_grouping = cfg.TRAIN.ASPECT_GROUPING
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TRAIN.BATCH_IMAGES * num_gpu
shuffle = cfg.TRAIN.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
elif mode == 'val':
ann_file = cfg.DATASET.VAL_ANNOTATION_FILE
image_set = cfg.DATASET.VAL_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.VAL.BATCH_IMAGES * num_gpu
shuffle = cfg.VAL.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
else:
ann_file = cfg.DATASET.TEST_ANNOTATION_FILE
image_set = cfg.DATASET.TEST_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TEST.BATCH_IMAGES * num_gpu
shuffle = cfg.TEST.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
transform = build_transforms(cfg, mode)
if dataset is None:
dataset = build_dataset(dataset_name=cfg.DATASET.DATASET, ann_file=ann_file, image_set=image_set,
use_imdb=cfg.DATASET.USE_IMDB,
with_precomputed_visual_feat=cfg.NETWORK.IMAGE_FEAT_PRECOMPUTED,
boxes=cfg.DATASET.BOXES,
answer_vocab_file=cfg.DATASET.ANSWER_VOCAB_FILE,
root_path=cfg.DATASET.ROOT_PATH, data_path=cfg.DATASET.DATASET_PATH,
test_mode=(mode == 'test'), transform=transform,
zip_mode=cfg.DATASET.ZIP_MODE, cache_mode=cfg.DATASET.CACHE_MODE,
cache_db=True if (rank is None or rank == 0) else False,
ignore_db_cache=cfg.DATASET.IGNORE_DB_CACHE,
add_image_as_a_box=cfg.DATASET.ADD_IMAGE_AS_A_BOX,
aspect_grouping=aspect_grouping,
mask_size=(cfg.DATASET.MASK_SIZE, cfg.DATASET.MASK_SIZE),
pretrained_model_name=cfg.NETWORK.BERT_MODEL_NAME)
sampler = make_data_sampler(dataset, shuffle, distributed, num_replicas, rank)
batch_sampler = make_batch_data_sampler(dataset, sampler, aspect_grouping, batch_size)
collator = BatchCollator(dataset=dataset, append_ind=cfg.DATASET.APPEND_INDEX)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=False,
collate_fn=collator)
if expose_sampler:
return dataloader, sampler
return dataloader
| 4,336 | 42.37 | 106 | py |
VLC-BERT | VLC-BERT-master/vqa/data/datasets/vqa.py | import os
import json
import _pickle as cPickle
from PIL import Image
import re
import base64
import numpy as np
import csv
import sys
import time
import pprint
import logging
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipreader import ZipReader
from common.utils.create_logger import makedirsExist
from pycocotools.coco import COCO
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
class VQA(Dataset):
def __init__(self, image_set, root_path, data_path, answer_vocab_file, use_imdb=True,
with_precomputed_visual_feat=False, boxes="36",
transform=None, test_mode=False,
zip_mode=False, cache_mode=False, cache_db=True, ignore_db_cache=True,
tokenizer=None, pretrained_model_name=None,
add_image_as_a_box=False, mask_size=(14, 14),
aspect_grouping=False, **kwargs):
"""
Visual Question Answering Dataset
:param image_set: image folder name
:param root_path: root path to cache database loaded from annotation file
:param data_path: path to vcr dataset
:param transform: transform
:param test_mode: test mode means no labels available
:param zip_mode: reading images and metadata in zip archive
:param cache_mode: cache whole dataset to RAM first, then __getitem__ read them from RAM
:param ignore_db_cache: ignore previous cached database, reload it from annotation file
:param tokenizer: default is BertTokenizer from pytorch_pretrained_bert
:param add_image_as_a_box: add whole image as a box
:param mask_size: size of instance mask of each object
:param aspect_grouping: whether to group images via their aspect
:param kwargs:
"""
super(VQA, self).__init__()
assert not cache_mode, 'currently not support cache mode!'
categories = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat',
'trafficlight', 'firehydrant', 'stopsign', 'parkingmeter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sportsball', 'kite', 'baseballbat', 'baseballglove',
'skateboard', 'surfboard', 'tennisracket', 'bottle', 'wineglass', 'cup', 'fork', 'knife', 'spoon',
'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hotdog', 'pizza', 'donut',
'cake', 'chair', 'couch', 'pottedplant', 'bed', 'diningtable', 'toilet', 'tv', 'laptop', 'mouse',
'remote', 'keyboard', 'cellphone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book',
'clock', 'vase', 'scissors', 'teddybear', 'hairdrier', 'toothbrush']
vqa_question = {
"train2014": "vqa/v2_OpenEnded_mscoco_train2014_questions.json",
"valminusminival2014": "vqa/v2_OpenEnded_mscoco_valminusminival2014_questions.json",
"val2014": "vqa/v2_OpenEnded_mscoco_val2014_questions.json",
"minival2014": "vqa/v2_OpenEnded_mscoco_minival2014_questions.json",
"test-dev2015": "vqa/v2_OpenEnded_mscoco_test-dev2015_questions.json",
"test2015": "vqa/v2_OpenEnded_mscoco_test2015_questions.json",
}
vqa_annot = {
"train2014": "vqa/v2_mscoco_train2014_annotations.json",
"valminusminival2014": "vqa/v2_mscoco_valminusminival2014_annotations.json",
"val2014": "vqa/v2_mscoco_val2014_annotations.json",
"minival2014": "vqa/v2_mscoco_minival2014_annotations.json",
}
vqa_imdb = {
"train2014": "vqa/vqa_imdb/imdb_train2014.npy",
"val2014": "vqa/vqa_imdb/imdb_val2014.npy",
'test2015': "vqa/vqa_imdb/imdb_test2015.npy",
'minival2014': "vqa/vqa_imdb/imdb_minival2014.npy",
}
if boxes == "36":
precomputed_boxes = {
'train2014': ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
"valminusminival2014": ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
'val2014': ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
"minival2014": ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
"test-dev2015": ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome_36"),
"test2015": ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome_36"),
}
elif boxes == "10-100ada":
precomputed_boxes = {
'train2014': ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
"valminusminival2014": ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
'val2014': ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
"minival2014": ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
"test-dev2015": ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome"),
"test2015": ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome"),
}
else:
raise ValueError("Not support boxes: {}!".format(boxes))
coco_dataset = {
"train2014": ("train2014", "annotations/instances_train2014.json"),
"valminusminival2014": ("val2014", "annotations/instances_val2014.json"),
"val2014": ("val2014", "annotations/instances_val2014.json"),
"minival2014": ("val2014", "annotations/instances_val2014.json"),
"test-dev2015": ("test2015", "annotations/image_info_test2015.json"),
"test2015": ("test2015", "annotations/image_info_test2015.json"),
}
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(\,)(\d)")
self.punct = [';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
self.use_imdb = use_imdb
self.boxes = boxes
self.test_mode = test_mode
self.with_precomputed_visual_feat = with_precomputed_visual_feat
self.category_to_idx = {c: i for i, c in enumerate(categories)}
self.data_path = data_path
self.root_path = root_path
with open(answer_vocab_file, 'r', encoding='utf8') as f:
self.answer_vocab = [w.lower().strip().strip('\r').strip('\n').strip('\r') for w in f.readlines()]
self.answer_vocab = list(filter(lambda x: x != '', self.answer_vocab))
if not self.use_imdb:
self.answer_vocab = [self.processPunctuation(w) for w in self.answer_vocab]
self.image_sets = [iset.strip() for iset in image_set.split('+')]
self.ann_files = [os.path.join(data_path, vqa_annot[iset]) for iset in self.image_sets] \
if not self.test_mode else [None for iset in self.image_sets]
self.q_files = [os.path.join(data_path, vqa_question[iset]) for iset in self.image_sets]
self.imdb_files = [os.path.join(data_path, vqa_imdb[iset]) for iset in self.image_sets]
self.precomputed_box_files = [
os.path.join(data_path, precomputed_boxes[iset][0],
'{0}.zip@/{0}'.format(precomputed_boxes[iset][1])
if zip_mode else precomputed_boxes[iset][1])
for iset in self.image_sets]
self.box_bank = {}
self.coco_datasets = [(os.path.join(data_path,
coco_dataset[iset][0],
'COCO_{}_{{:012d}}.jpg'.format(coco_dataset[iset][0]))
if not zip_mode else
os.path.join(data_path,
coco_dataset[iset][0] + '.zip@/' + coco_dataset[iset][0],
'COCO_{}_{{:012d}}.jpg'.format(coco_dataset[iset][0])),
os.path.join(data_path, coco_dataset[iset][1]))
for iset in self.image_sets]
self.transform = transform
self.zip_mode = zip_mode
self.cache_mode = cache_mode
self.cache_db = cache_db
self.ignore_db_cache = ignore_db_cache
self.aspect_grouping = aspect_grouping
self.cache_dir = os.path.join(root_path, 'cache')
self.add_image_as_a_box = add_image_as_a_box
self.mask_size = mask_size
if not os.path.exists(self.cache_dir):
makedirsExist(self.cache_dir)
self.tokenizer = tokenizer if tokenizer is not None \
else BertTokenizer.from_pretrained(
'bert-base-uncased' if pretrained_model_name is None else pretrained_model_name,
cache_dir=self.cache_dir)
if zip_mode:
self.zipreader = ZipReader()
self.database = self.load_annotations()
if self.aspect_grouping:
self.group_ids = self.group_aspect(self.database)
@property
def data_names(self):
if self.test_mode:
return ['image', 'boxes', 'im_info', 'question']
else:
return ['image', 'boxes', 'im_info', 'question', 'label']
def __getitem__(self, index):
idb = self.database[index]
# image, boxes, im_info
boxes_data = self._load_json(idb['box_fn'])
if self.with_precomputed_visual_feat:
image = None
w0, h0 = idb['width'], idb['height']
boxes_features = torch.as_tensor(
np.frombuffer(self.b64_decode(boxes_data['features']), dtype=np.float32).reshape((boxes_data['num_boxes'], -1))
)
else:
image = self._load_image(idb['image_fn'])
w0, h0 = image.size
boxes = torch.as_tensor(
np.frombuffer(self.b64_decode(boxes_data['boxes']), dtype=np.float32).reshape(
(boxes_data['num_boxes'], -1))
)
if self.add_image_as_a_box:
image_box = torch.as_tensor([[0.0, 0.0, w0 - 1, h0 - 1]])
boxes = torch.cat((image_box, boxes), dim=0)
if self.with_precomputed_visual_feat:
if 'image_box_feature' in boxes_data:
image_box_feature = torch.as_tensor(
np.frombuffer(
self.b64_decode(boxes_data['image_box_feature']), dtype=np.float32
).reshape((1, -1))
)
else:
image_box_feature = boxes_features.mean(0, keepdim=True)
boxes_features = torch.cat((image_box_feature, boxes_features), dim=0)
im_info = torch.tensor([w0, h0, 1.0, 1.0])
flipped = False
if self.transform is not None:
image, boxes, _, im_info, flipped = self.transform(image, boxes, None, im_info, flipped)
# clamp boxes
w = im_info[0].item()
h = im_info[1].item()
boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=w - 1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=h - 1)
# flip: 'left' -> 'right', 'right' -> 'left'
if self.use_imdb:
q_tokens = idb['question_tokens']
else:
q_tokens = self.tokenizer.tokenize(idb['question'])
if flipped:
q_tokens = self.flip_tokens(q_tokens, verbose=False)
if not self.test_mode:
answers = idb['answers']
if flipped:
answers_tokens = [a.split(' ') for a in answers]
answers_tokens = [self.flip_tokens(a_toks, verbose=False) for a_toks in answers_tokens]
answers = [' '.join(a_toks) for a_toks in answers_tokens]
label = self.get_soft_target(answers)
# question
if self.use_imdb:
q_str = ' '.join(q_tokens)
q_retokens = self.tokenizer.tokenize(q_str)
else:
q_retokens = q_tokens
q_ids = self.tokenizer.convert_tokens_to_ids(q_retokens)
# concat box feature to box
if self.with_precomputed_visual_feat:
boxes = torch.cat((boxes, boxes_features), dim=-1)
if self.test_mode:
return image, boxes, im_info, q_ids
else:
# print([(self.answer_vocab[i], p.item()) for i, p in enumerate(label) if p.item() != 0])
return image, boxes, im_info, q_ids, label
@staticmethod
def flip_tokens(tokens, verbose=True):
changed = False
tokens_new = [tok for tok in tokens]
for i, tok in enumerate(tokens):
if tok == 'left':
tokens_new[i] = 'right'
changed = True
elif tok == 'right':
tokens_new[i] = 'left'
changed = True
if verbose and changed:
logging.info('[Tokens Flip] {} -> {}'.format(tokens, tokens_new))
return tokens_new
@staticmethod
def b64_decode(string):
return base64.decodebytes(string.encode())
def answer_to_ind(self, answer):
if answer in self.answer_vocab:
return self.answer_vocab.index(answer)
else:
return self.answer_vocab.index('<unk>')
def get_soft_target(self, answers):
soft_target = torch.zeros(len(self.answer_vocab), dtype=torch.float)
answer_indices = [self.answer_to_ind(answer) for answer in answers]
gt_answers = list(enumerate(answer_indices))
unique_answers = set(answer_indices)
for answer in unique_answers:
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [item for item in other_answers if item[1] == answer]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
avg_acc = sum(accs) / len(accs)
if answer != self.answer_vocab.index('<unk>'):
soft_target[answer] = avg_acc
return soft_target
def processPunctuation(self, inText):
if inText == '<unk>':
return inText
outText = inText
for p in self.punct:
if (p + ' ' in inText or ' ' + p in inText) or (re.search(self.commaStrip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = self.periodStrip.sub("",
outText,
re.UNICODE)
return outText
def load_annotations(self):
tic = time.time()
database = []
if self.use_imdb:
db_cache_name = 'vqa2_imdb_boxes{}_{}'.format(self.boxes, '+'.join(self.image_sets))
else:
db_cache_name = 'vqa2_nonimdb_boxes{}_{}'.format(self.boxes, '+'.join(self.image_sets))
if self.with_precomputed_visual_feat:
db_cache_name += 'visualprecomp'
if self.zip_mode:
db_cache_name = db_cache_name + '_zipmode'
if self.test_mode:
db_cache_name = db_cache_name + '_testmode'
db_cache_root = os.path.join(self.root_path, 'cache')
db_cache_path = os.path.join(db_cache_root, '{}.pkl'.format(db_cache_name))
if os.path.exists(db_cache_path):
if not self.ignore_db_cache:
# reading cached database
print('cached database found in {}.'.format(db_cache_path))
with open(db_cache_path, 'rb') as f:
print('loading cached database from {}...'.format(db_cache_path))
tic = time.time()
database = cPickle.load(f)
print('Done (t={:.2f}s)'.format(time.time() - tic))
return database
else:
print('cached database ignored.')
# ignore or not find cached database, reload it from annotation file
print('loading database of split {}...'.format('+'.join(self.image_sets)))
tic = time.time()
if self.use_imdb:
for imdb_file, (coco_path, coco_annot), box_file \
in zip(self.imdb_files, self.coco_datasets, self.precomputed_box_files):
print("loading imdb: {}".format(imdb_file))
imdb = np.load(imdb_file, allow_pickle=True)
print("imdb info:")
pprint.pprint(imdb[0])
coco = COCO(coco_annot)
for item in imdb[1:]:
idb = {'image_id': item['image_id'],
'image_fn': coco_path.format(item['image_id']),
'width': coco.imgs[item['image_id']]['width'],
'height': coco.imgs[item['image_id']]['height'],
'box_fn': os.path.join(box_file, '{}.json'.format(item['image_id'])),
'question_id': item['question_id'],
'question_tokens': item['question_tokens'],
'answers': item['answers'] if not self.test_mode else None,
}
database.append(idb)
else:
for ann_file, q_file, (coco_path, coco_annot), box_file \
in zip(self.ann_files, self.q_files, self.coco_datasets, self.precomputed_box_files):
qs = self._load_json(q_file)['questions']
anns = self._load_json(ann_file)['annotations'] if not self.test_mode else ([None] * len(qs))
coco = COCO(coco_annot)
for ann, q in zip(anns, qs):
idb = {'image_id': q['image_id'],
'image_fn': coco_path.format(q['image_id']),
'width': coco.imgs[q['image_id']]['width'],
'height': coco.imgs[q['image_id']]['height'],
'box_fn': os.path.join(box_file, '{}.json'.format(q['image_id'])),
'question_id': q['question_id'],
'question': q['question'],
'answers': [a['answer'] for a in ann['answers']] if not self.test_mode else None,
'multiple_choice_answer': ann['multiple_choice_answer'] if not self.test_mode else None,
"question_type": ann['question_type'] if not self.test_mode else None,
"answer_type": ann['answer_type'] if not self.test_mode else None,
}
database.append(idb)
print('Done (t={:.2f}s)'.format(time.time() - tic))
# cache database via cPickle
if self.cache_db:
print('caching database to {}...'.format(db_cache_path))
tic = time.time()
if not os.path.exists(db_cache_root):
makedirsExist(db_cache_root)
with open(db_cache_path, 'wb') as f:
cPickle.dump(database, f)
print('Done (t={:.2f}s)'.format(time.time() - tic))
return database
@staticmethod
def group_aspect(database):
print('grouping aspect...')
t = time.time()
# get shape of all images
widths = torch.as_tensor([idb['width'] for idb in database])
heights = torch.as_tensor([idb['height'] for idb in database])
# group
group_ids = torch.zeros(len(database))
horz = widths >= heights
vert = 1 - horz
group_ids[horz] = 0
group_ids[vert] = 1
print('Done (t={:.2f}s)'.format(time.time() - t))
return group_ids
def load_precomputed_boxes(self, box_file):
if box_file in self.box_bank:
return self.box_bank[box_file]
else:
in_data = {}
with open(box_file, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['image_h'] = int(item['image_h'])
item['image_w'] = int(item['image_w'])
item['num_boxes'] = int(item['num_boxes'])
for field in (['boxes', 'features'] if self.with_precomputed_visual_feat else ['boxes']):
item[field] = np.frombuffer(base64.decodebytes(item[field].encode()),
dtype=np.float32).reshape((item['num_boxes'], -1))
in_data[item['image_id']] = item
self.box_bank[box_file] = in_data
return in_data
def __len__(self):
return len(self.database)
def _load_image(self, path):
if '.zip@' in path:
return self.zipreader.imread(path).convert('RGB')
else:
return Image.open(path).convert('RGB')
def _load_json(self, path):
if '.zip@' in path:
f = self.zipreader.read(path)
return json.loads(f.decode())
else:
with open(path, 'r') as f:
return json.load(f)
| 21,774 | 45.527778 | 127 | py |
VLC-BERT | VLC-BERT-master/vqa/data/samplers/grouped_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that elements from the same group should appear in groups of batch_size.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_uneven (bool): If ``True``, the sampler will drop the batches whose
size is less than ``batch_size``
"""
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert self.group_ids.dim() == 1
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
self._can_reuse_batches = False
def _prepare_batches(self):
dataset_size = len(self.group_ids)
# get the sampled indices from the sampler
sampled_ids = torch.as_tensor(list(self.sampler))
# potentially not all elements of the dataset were sampled
# by the sampler (e.g., DistributedSampler).
# construct a tensor which contains -1 if the element was
# not sampled, and a non-negative number indicating the
# order where the element was sampled.
# for example. if sampled_ids = [3, 1] and dataset_size = 5,
# the order is [-1, 1, -1, 0, -1]
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
# get a mask with the elements that were sampled
mask = order >= 0
# find the elements that belong to each individual cluster
clusters = [(self.group_ids == i) & mask for i in self.groups]
# get relative order of the elements inside each cluster
# that follows the order from the sampler
relative_order = [order[cluster] for cluster in clusters]
# with the relative order, find the absolute order in the
# sampled space
permutation_ids = [s[s.sort()[1]] for s in relative_order]
# permute each cluster so that they follow the order from
# the sampler
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
# splits each cluster in batch_size, and merge as a list of tensors
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
# now each batch internally has the right order, but
# they are grouped by clusters. Find the permutation between
# different batches that brings them as close as possible to
# the order that we have in the sampler. For that, we will consider the
# ordering as coming from the first element of each batch, and sort
# correspondingly
first_element_of_batch = [t[0].item() for t in merged]
# get and inverse mapping from sampled indices and the position where
# they occur (as returned by the sampler)
inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())}
# from the first element in each batch, get a relative ordering
first_index_of_batch = torch.as_tensor(
[inv_sampled_ids_map[s] for s in first_element_of_batch]
)
# permute the batches so that they approximately follow the order
# from the sampler
permutation_order = first_index_of_batch.sort(0)[1].tolist()
# finally, permute the batches
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
return batches
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
batches = self._prepare_batches()
self._batches = batches
return iter(batches)
def __len__(self):
if not hasattr(self, "_batches"):
self._batches = self._prepare_batches()
self._can_reuse_batches = True
return len(self._batches)
| 4,846 | 40.42735 | 88 | py |
VLC-BERT | VLC-BERT-master/vqa/data/samplers/distributed.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch | 2,568 | 37.924242 | 86 | py |
VLC-BERT | VLC-BERT-master/vqa/data/transforms/transforms.py | import random
import numpy as np
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, boxes, masks, im_info, flipped):
for t in self.transforms:
image, boxes, masks, im_info, flipped = t(image, boxes, masks, im_info, flipped)
return image, boxes, masks, im_info, flipped
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(max_size * min_original_size / max_original_size)
if (w <= h and w == size) or (h <= w and h == size):
return (w, h)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (ow, oh)
def __call__(self, image, boxes, masks, im_info, flipped):
origin_size = im_info[:2]
size = self.get_size(origin_size)
if image is not None:
image = F.resize(image, (size[1], size[0]))
ratios = [size[0] * 1.0 / origin_size[0], size[1] * 1.0 / origin_size[1]]
if boxes is not None:
boxes[:, [0, 2]] *= ratios[0]
boxes[:, [1, 3]] *= ratios[1]
im_info[0], im_info[1] = size
im_info[2], im_info[3] = ratios
return image, boxes, masks, im_info, flipped
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, boxes, masks, im_info, flipped):
if random.random() < self.prob:
w, h = im_info[:2]
if image is not None:
image = F.hflip(image)
if boxes is not None:
boxes[:, [0, 2]] = w - 1 - boxes[:, [2, 0]]
if masks is not None:
masks = torch.as_tensor(masks.numpy()[:, :, ::-1].tolist())
flipped = not flipped
return image, boxes, masks, im_info, flipped
class ToTensor(object):
def __call__(self, image, boxes, masks, im_info, flipped):
return F.to_tensor(image) if image is not None else image, boxes, masks, im_info, flipped
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, boxes, masks, im_info, flipped):
if image is not None:
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, boxes, masks, im_info, flipped
class FixPadding(object):
def __init__(self, min_size, max_size, pad=0):
self.min_size = min_size
self.max_size = max_size
self.pad = pad
def __call__(self, image, boxes, masks, im_info, flipped):
if image is not None:
# padding to fixed size for determinacy
c, h, w = image.shape
if h <= w:
h1 = self.min_size
w1 = self.max_size
else:
h1 = self.max_size
w1 = self.min_size
padded_image = image.new_zeros((c, h1, w1)).fill_(self.pad)
padded_image[:, :h, :w] = image
image = padded_image
return image, boxes, masks, im_info, flipped
| 4,104 | 30.821705 | 97 | py |
VLC-BERT | VLC-BERT-master/aokvqa/train_end2end.py | import _init_paths
import os
import argparse
import torch
import subprocess
from aokvqa.function.config import config, update_config
from aokvqa.function.train import train_net
from aokvqa.function.test import test_net
from external.PythonEvaluationTools.aokvqa_vqaEval import run_eval
def parse_args():
parser = argparse.ArgumentParser('Train Cognition Network')
parser.add_argument('--cfg', type=str, help='path to config file')
parser.add_argument('--model-dir', type=str, help='root path to store checkpoint')
parser.add_argument('--log-dir', type=str, help='tensorboard log dir')
parser.add_argument('--dist', help='whether to use distributed training', default=False, action='store_true')
parser.add_argument('--slurm', help='whether this is a slurm job', default=False, action='store_true')
parser.add_argument('--do-test', help='whether to generate csv result on test set',
default=True, action='store_true')
parser.add_argument('--cudnn-off', help='disable cudnn', default=False, action='store_true')
# easy test pretrain model
parser.add_argument('--partial-pretrain', type=str)
args = parser.parse_args()
if args.cfg is not None:
update_config(args.cfg)
if args.model_dir is not None:
config.OUTPUT_PATH = os.path.join(args.model_dir, config.OUTPUT_PATH)
if args.partial_pretrain is not None:
config.NETWORK.PARTIAL_PRETRAIN = args.partial_pretrain
if args.slurm:
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
addr = subprocess.getoutput(
'scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(29500)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
return args, config
def main():
args, config = parse_args()
rank, model = train_net(args, config)
if args.do_test and (rank is None or rank == 0):
res_path, save_path = test_net(args, config)
run_eval(res_path, split='val')
if __name__ == '__main__':
main()
| 2,328 | 34.830769 | 113 | py |
VLC-BERT | VLC-BERT-master/aokvqa/function/val.py | from collections import namedtuple
import torch
from common.trainer import to_cuda
@torch.no_grad()
def do_validation(net, val_loader, metrics, label_index_in_batch):
net.eval()
metrics.reset()
for nbatch, batch in enumerate(val_loader):
batch = to_cuda(batch)
label = batch[label_index_in_batch]
datas = [batch[i] for i in range(len(batch)) if i != label_index_in_batch % len(batch)]
outputs = net(*datas)
outputs.update({'label': label})
metrics.update(outputs)
| 528 | 26.842105 | 95 | py |
VLC-BERT | VLC-BERT-master/aokvqa/function/test.py | import os
import pprint
import shutil
import json
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn.functional as F
from common.utils.load import smart_load_model_state_dict
from common.trainer import to_cuda
from common.utils.create_logger import create_logger
from aokvqa.data.build import make_dataloader
from aokvqa.modules import *
@torch.no_grad()
def test_net(args, config, ckpt_path=None, save_path=None, save_name=None):
print('test net...')
pprint.pprint(args)
pprint.pprint(config)
device_ids = [int(d) for d in config.GPUS.split(',')]
# os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if ckpt_path is None:
_, train_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(train_output_path, config.MODEL_PREFIX)
ckpt_path = '{}-latest.model'.format(model_prefix)
print('Use latest checkpoint {}...'.format(ckpt_path))
if save_path is None:
logger, test_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TEST_IMAGE_SET,
split='test')
save_path = test_output_path
if not os.path.exists(save_path):
os.makedirs(save_path)
# shutil.copy2(ckpt_path,
# os.path.join(save_path, '{}_test_ckpt_{}.model'.format(config.MODEL_PREFIX, config.DATASET.TASK)))
# get network
model = eval(config.MODULE)(config)
if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()
else:
torch.cuda.set_device(device_ids[0])
model = model.cuda()
checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
smart_load_model_state_dict(model, checkpoint['state_dict'])
# loader
test_loader = make_dataloader(config, mode='test', distributed=False)
test_dataset = test_loader.dataset
test_database = test_dataset.database
# test
q_ids = []
answer_ids = []
attn_weights = []
model.eval()
cur_id = 0
for nbatch, batch in zip(trange(len(test_loader)), test_loader):
# for nbatch, batch in tqdm(enumerate(test_loader)):
bs = test_loader.batch_sampler.batch_size if test_loader.batch_sampler is not None else test_loader.batch_size
q_ids.extend([test_database[id]['question_id'] for id in range(cur_id, min(cur_id + bs, len(test_database)))])
batch = to_cuda(batch)
output = model(*batch)
answer_ids.extend(output['label_logits'].argmax(dim=1).detach().cpu().tolist())
attn_weights.extend(output['attn_weights'].detach().cpu().tolist())
cur_id += bs
result = [{'question_id': q_id, 'answer': test_dataset.answer_vocab[a_id], 'attn_weights': attn} for q_id, a_id, attn in zip(q_ids, answer_ids, attn_weights)]
cfg_name = os.path.splitext(os.path.basename(args.cfg))[0]
result_json_path = os.path.join(save_path, '{}_aokvqa_{}.json'.format(cfg_name if save_name is None else save_name,
config.DATASET.TEST_IMAGE_SET))
with open(result_json_path, 'w') as f:
json.dump(result, f)
print('result json saved to {}.'.format(result_json_path))
return result_json_path, save_path
| 3,526 | 40.494118 | 162 | py |
VLC-BERT | VLC-BERT-master/aokvqa/function/train.py | import os
import pprint
import shutil
import inspect
from tensorboardX import SummaryWriter
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.distributed as distributed
from torch.nn.parallel import DistributedDataParallel as DDP
from common.utils.create_logger import create_logger
from common.utils.misc import summary_parameters, bn_fp16_half_eval
from common.utils.load import smart_resume, smart_partial_load_model_state_dict
from common.trainer import train
from common.metrics.composite_eval_metric import CompositeEvalMetric
from common.metrics import vqa_metrics
from common.callbacks.batch_end_callbacks.speedometer import Speedometer
from common.callbacks.epoch_end_callbacks.validation_monitor import ValidationMonitor
from common.callbacks.epoch_end_callbacks.checkpoint import Checkpoint
from common.lr_scheduler import WarmupMultiStepLR
from common.nlp.bert.optimization import AdamW, WarmupLinearSchedule
from aokvqa.data.build import make_dataloader, build_dataset, build_transforms
from aokvqa.modules import *
from aokvqa.function.val import do_validation
try:
from apex import amp
from apex.parallel import DistributedDataParallel as Apex_DDP
except ImportError:
pass
#raise ImportError("Please install apex from https://www.github.com/nvidia/apex if you want to use fp16.")
def train_net(args, config):
# setup logger
logger, final_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(final_output_path, config.MODEL_PREFIX)
if args.log_dir is None:
args.log_dir = os.path.join(final_output_path, 'tensorboard_logs')
pprint.pprint(args)
logger.info('training args:{}\n'.format(args))
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
# manually set random seed
if config.RNG_SEED > -1:
np.random.seed(config.RNG_SEED)
torch.random.manual_seed(config.RNG_SEED)
torch.cuda.manual_seed_all(config.RNG_SEED)
# cudnn
torch.backends.cudnn.benchmark = False
if args.cudnn_off:
torch.backends.cudnn.enabled = False
if args.dist:
model = eval(config.MODULE)(config)
local_rank = int(os.environ.get('LOCAL_RANK') or 0)
config.GPUS = str(local_rank)
torch.cuda.set_device(local_rank)
master_address = os.environ['MASTER_ADDR']
master_port = int(os.environ['MASTER_PORT'] or 23456)
world_size = int(os.environ['WORLD_SIZE'] or 1)
rank = int(os.environ['RANK'] or 0)
if args.slurm:
distributed.init_process_group(backend='nccl')
else:
distributed.init_process_group(
backend='nccl',
init_method='tcp://{}:{}'.format(master_address, master_port),
world_size=world_size,
rank=rank,
group_name='mtorch')
print(f'native distributed, size: {world_size}, rank: {rank}, local rank: {local_rank}')
torch.cuda.set_device(local_rank)
config.GPUS = str(local_rank)
model = model.cuda()
if not config.TRAIN.FP16:
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
if rank == 0:
summary_parameters(model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model,
logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
writer = None
if args.log_dir is not None:
tb_log_dir = os.path.join(args.log_dir, 'rank{}'.format(rank))
if not os.path.exists(tb_log_dir):
os.makedirs(tb_log_dir)
writer = SummaryWriter(log_dir=tb_log_dir)
train_loader, train_sampler = make_dataloader(config,
mode='train',
distributed=True,
num_replicas=world_size,
rank=rank,
expose_sampler=True)
val_loader = make_dataloader(config,
mode='val',
distributed=True,
num_replicas=world_size,
rank=rank)
batch_size = world_size * (sum(config.TRAIN.BATCH_IMAGES)
if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
total_gpus = world_size
else:
#os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
model = eval(config.MODULE)(config)
summary_parameters(model, logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
num_gpus = len(config.GPUS.split(','))
assert num_gpus <= 1 or (not config.TRAIN.FP16), "Not support fp16 with torch.nn.DataParallel. " \
"Please use amp.parallel.DistributedDataParallel instead."
total_gpus = num_gpus
rank = None
writer = SummaryWriter(log_dir=args.log_dir) if args.log_dir is not None else None
# model
if num_gpus > 1:
model = torch.nn.DataParallel(model, device_ids=[int(d) for d in config.GPUS.split(',')]).cuda()
else:
torch.cuda.set_device(int(config.GPUS))
model.cuda()
# loader
train_loader = make_dataloader(config, mode='train', distributed=False)
val_loader = make_dataloader(config, mode='val', distributed=False)
train_sampler = None
batch_size = num_gpus * (sum(config.TRAIN.BATCH_IMAGES) if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
# partial load pretrain state dict
if config.NETWORK.PARTIAL_PRETRAIN != "":
pretrain_state_dict = torch.load(config.NETWORK.PARTIAL_PRETRAIN, map_location=lambda storage, loc: storage)['state_dict']
prefix_change = [prefix_change.split('->') for prefix_change in config.NETWORK.PARTIAL_PRETRAIN_PREFIX_CHANGES]
if len(prefix_change) > 0:
pretrain_state_dict_parsed = {}
for k, v in pretrain_state_dict.items():
no_match = True
for pretrain_prefix, new_prefix in prefix_change:
if k.startswith(pretrain_prefix):
k = new_prefix + k[len(pretrain_prefix):]
pretrain_state_dict_parsed[k] = v
no_match = False
break
if no_match:
pretrain_state_dict_parsed[k] = v
pretrain_state_dict = pretrain_state_dict_parsed
smart_partial_load_model_state_dict(model, pretrain_state_dict, vocab_size=config.NETWORK.VLBERT.type_vocab_size)
# pretrained classifier
if config.NETWORK.CLASSIFIER_PRETRAINED:
print('Initializing classifier weight from pretrained word embeddings...')
answers_word_embed = []
for k, v in model.state_dict().items():
if 'word_embeddings.weight' in k:
word_embeddings = v.detach().clone()
break
for answer in train_loader.dataset.answer_vocab:
a_tokens = train_loader.dataset.tokenizer.tokenize(answer)
a_ids = train_loader.dataset.tokenizer.convert_tokens_to_ids(a_tokens)
a_word_embed = (torch.stack([word_embeddings[a_id] for a_id in a_ids], dim=0)).mean(dim=0)
answers_word_embed.append(a_word_embed)
answers_word_embed_tensor = torch.stack(answers_word_embed, dim=0)
for name, module in model.named_modules():
if name.endswith('final_mlp'):
module[-1].weight.data = answers_word_embed_tensor.to(device=module[-1].weight.data.device)
# metrics
train_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist,
num_replicas=world_size if args.dist else 1)]
val_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist,
num_replicas=world_size if args.dist else 1)]
for output_name, display_name in config.TRAIN.LOSS_LOGGERS:
train_metrics_list.append(
vqa_metrics.LossLogger(output_name, display_name=display_name, allreduce=args.dist,
num_replicas=world_size if args.dist else 1))
train_metrics = CompositeEvalMetric()
val_metrics = CompositeEvalMetric()
for child_metric in train_metrics_list:
train_metrics.add(child_metric)
for child_metric in val_metrics_list:
val_metrics.add(child_metric)
# epoch end callbacks
epoch_end_callbacks = []
if (rank is None) or (rank == 0):
epoch_end_callbacks = [Checkpoint(model_prefix, config.CHECKPOINT_FREQUENT)]
validation_monitor = ValidationMonitor(do_validation, val_loader, val_metrics,
host_metric_name='SoftAcc',
label_index_in_batch=config.DATASET.LABEL_INDEX_IN_BATCH)
# optimizer initial lr before
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
# resume/auto-resume
if rank is None or rank == 0:
smart_resume(model, optimizer, validation_monitor, config, model_prefix, logger)
if args.dist:
begin_epoch = torch.tensor(config.TRAIN.BEGIN_EPOCH).cuda()
distributed.broadcast(begin_epoch, src=0)
config.TRAIN.BEGIN_EPOCH = begin_epoch.item()
# batch end callbacks
batch_size = len(config.GPUS.split(',')) * config.TRAIN.BATCH_IMAGES
batch_end_callbacks = [Speedometer(batch_size, config.LOG_FREQUENT,
batches_per_epoch=len(train_loader),
epochs=config.TRAIN.END_EPOCH - config.TRAIN.BEGIN_EPOCH)]
# setup lr step and lr scheduler
if config.TRAIN.LR_SCHEDULE == 'plateau':
print("Warning: not support resuming on plateau lr schedule!")
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='max',
factor=config.TRAIN.LR_FACTOR,
patience=1,
verbose=True,
threshold=1e-4,
threshold_mode='rel',
cooldown=2,
min_lr=0,
eps=1e-8)
elif config.TRAIN.LR_SCHEDULE == 'triangle':
lr_scheduler = WarmupLinearSchedule(optimizer,
config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
t_total=int(config.TRAIN.END_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS),
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
elif config.TRAIN.LR_SCHEDULE == 'step':
lr_iters = [int(epoch * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) for epoch in config.TRAIN.LR_STEP]
lr_scheduler = WarmupMultiStepLR(optimizer, milestones=lr_iters, gamma=config.TRAIN.LR_FACTOR,
warmup_factor=config.TRAIN.WARMUP_FACTOR,
warmup_iters=config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
warmup_method=config.TRAIN.WARMUP_METHOD,
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
else:
raise ValueError("Not support lr schedule: {}.".format(config.TRAIN.LR_SCHEDULE))
# broadcast parameter and optimizer state from rank 0 before training start
if args.dist:
for v in model.state_dict().values():
distributed.broadcast(v, src=0)
# for v in optimizer.state_dict().values():
# distributed.broadcast(v, src=0)
best_epoch = torch.tensor(validation_monitor.best_epoch).cuda()
best_val = torch.tensor(validation_monitor.best_val).cuda()
distributed.broadcast(best_epoch, src=0)
distributed.broadcast(best_val, src=0)
validation_monitor.best_epoch = best_epoch.item()
validation_monitor.best_val = best_val.item()
# apex: amp fp16 mixed-precision training
if config.TRAIN.FP16:
# model.apply(bn_fp16_half_eval)
model, optimizer = amp.initialize(model, optimizer,
opt_level='O2',
keep_batchnorm_fp32=False,
loss_scale=config.TRAIN.FP16_LOSS_SCALE,
min_loss_scale=32.0)
if args.dist:
model = Apex_DDP(model, delay_allreduce=True)
train(model, optimizer, lr_scheduler, train_loader, train_sampler, train_metrics,
config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH, logger,
rank=rank, batch_end_callbacks=batch_end_callbacks, epoch_end_callbacks=epoch_end_callbacks,
writer=writer, validation_monitor=validation_monitor, fp16=config.TRAIN.FP16,
clip_grad_norm=config.TRAIN.CLIP_GRAD_NORM,
gradient_accumulate_steps=config.TRAIN.GRAD_ACCUMULATE_STEPS)
return rank, model
| 17,600 | 51.228487 | 147 | py |
VLC-BERT | VLC-BERT-master/aokvqa/modules/resnet_vlbert_for_aokvqa.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from external.pytorch_pretrained_bert.modeling import BertPredictionHeadTransform
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_bert import VisualLinguisticBert
BERT_WEIGHTS_NAME = 'pytorch_model.bin'
class ResNetVLBERT(Module):
def __init__(self, config):
super(ResNetVLBERT, self).__init__(config)
self.enable_cnn_reg_loss = config.NETWORK.ENABLE_CNN_REG_LOSS
if not config.NETWORK.BLIND:
self.image_feature_extractor = FastRCNN(config,
average_pool=True,
final_dim=config.NETWORK.IMAGE_FINAL_DIM,
enable_cnn_reg_loss=self.enable_cnn_reg_loss)
if config.NETWORK.VLBERT.object_word_embed_mode == 1:
self.object_linguistic_embeddings = nn.Embedding(81, config.NETWORK.VLBERT.hidden_size)
elif config.NETWORK.VLBERT.object_word_embed_mode == 2:
self.object_linguistic_embeddings = nn.Embedding(1, config.NETWORK.VLBERT.hidden_size)
elif config.NETWORK.VLBERT.object_word_embed_mode == 3:
self.object_linguistic_embeddings = None
else:
raise NotImplementedError
self.image_feature_bn_eval = config.NETWORK.IMAGE_FROZEN_BN
self.use_expansions = config.DATASET.COMMONSENSE_EXP_NAME != ''
self.commonsense_exp_name = config.NETWORK.VLBERT.commonsense_emb_type
self.tokenizer = BertTokenizer.from_pretrained(config.NETWORK.BERT_MODEL_NAME)
language_pretrained_model_path = None
if config.NETWORK.BERT_PRETRAINED != '':
language_pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.BERT_PRETRAINED,
config.NETWORK.BERT_PRETRAINED_EPOCH)
elif os.path.isdir(config.NETWORK.BERT_MODEL_NAME):
weight_path = os.path.join(config.NETWORK.BERT_MODEL_NAME, BERT_WEIGHTS_NAME)
if os.path.isfile(weight_path):
language_pretrained_model_path = weight_path
self.language_pretrained_model_path = language_pretrained_model_path
if language_pretrained_model_path is None:
print("Warning: no pretrained language model found, training from scratch!!!")
self.vlbert = VisualLinguisticBert(config.NETWORK.VLBERT,
language_pretrained_model_path=language_pretrained_model_path)
# self.hm_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)
# self.hi_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)
dim = config.NETWORK.VLBERT.hidden_size
if config.NETWORK.CLASSIFIER_TYPE == "2fc":
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(dim, config.NETWORK.CLASSIFIER_HIDDEN_SIZE),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(config.NETWORK.CLASSIFIER_HIDDEN_SIZE, config.DATASET.ANSWER_VOCAB_SIZE),
)
elif config.NETWORK.CLASSIFIER_TYPE == "1fc":
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(dim, config.DATASET.ANSWER_VOCAB_SIZE)
)
elif config.NETWORK.CLASSIFIER_TYPE == 'mlm':
transform = BertPredictionHeadTransform(config.NETWORK.VLBERT)
linear = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.DATASET.ANSWER_VOCAB_SIZE)
self.final_mlp = nn.Sequential(
transform,
nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
linear
)
else:
raise ValueError("Not support classifier type: {}!".format(config.NETWORK.CLASSIFIER_TYPE))
# init weights
self.init_weight()
self.fix_params()
def init_weight(self):
# self.hm_out.weight.data.normal_(mean=0.0, std=0.02)
# self.hm_out.bias.data.zero_()
# self.hi_out.weight.data.normal_(mean=0.0, std=0.02)
# self.hi_out.bias.data.zero_()
self.image_feature_extractor.init_weight()
if self.object_linguistic_embeddings is not None:
self.object_linguistic_embeddings.weight.data.normal_(mean=0.0, std=0.02)
for m in self.final_mlp.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.constant_(m.bias, 0)
if self.config.NETWORK.CLASSIFIER_TYPE == 'mlm':
language_pretrained = torch.load(self.language_pretrained_model_path)
mlm_transform_state_dict = {}
pretrain_keys = []
for k, v in language_pretrained.items():
if k.startswith('cls.predictions.transform.'):
pretrain_keys.append(k)
k_ = k[len('cls.predictions.transform.'):]
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
mlm_transform_state_dict[k_] = v
print("loading pretrained classifier transform keys: {}.".format(pretrain_keys))
self.final_mlp[0].load_state_dict(mlm_transform_state_dict)
def train(self, mode=True):
super(ResNetVLBERT, self).train(mode)
# turn some frozen layers to eval mode
if self.image_feature_bn_eval:
self.image_feature_extractor.bn_eval()
def fix_params(self):
pass
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
span_tags_fixed = torch.clamp(span_tags, min=0) # In case there were masked values here
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def prepare_text_from_qa(self, question, question_tags, question_mask, answer, answer_tags, answer_mask):
batch_size, max_q_len = question.shape
_, max_a_len = answer.shape
max_len = (question_mask.sum(1) + answer_mask.sum(1)).max() + 3
cls_id, sep_id = self.tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]'])
q_end = 1 + question_mask.sum(1, keepdim=True)
a_end = q_end + 1 + answer_mask.sum(1, keepdim=True)
input_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
input_mask = torch.ones((batch_size, max_len), dtype=torch.bool, device=question.device)
input_type_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
text_tags = input_type_ids.new_zeros((batch_size, max_len))
grid_i, grid_j = torch.meshgrid(torch.arange(batch_size, device=question.device),
torch.arange(max_len, device=question.device))
input_mask[grid_j > a_end] = 0
input_type_ids[(grid_j > q_end) & (grid_j <= a_end)] = 1
q_input_mask = (grid_j > 0) & (grid_j < q_end)
a_input_mask = (grid_j > q_end) & (grid_j < a_end)
input_ids[:, 0] = cls_id
input_ids[grid_j == q_end] = sep_id
input_ids[grid_j == a_end] = sep_id
input_ids[q_input_mask] = question[question_mask]
input_ids[a_input_mask] = answer[answer_mask]
text_tags[q_input_mask] = question_tags[question_mask]
text_tags[a_input_mask] = answer_tags[answer_mask]
return input_ids, input_type_ids, text_tags, input_mask, (a_end - 1).squeeze(1)
def prepare_text_from_qea(self, question, question_tags, question_mask, expansions, expansions_tags, expansions_mask, answer, answer_tags, answer_mask):
batch_size, max_q_len = question.shape
_, max_e_len = expansions.shape
_, max_a_len = answer.shape
max_len = (question_mask.sum(1) + expansions_mask.sum(1) + answer_mask.sum(1)).max() + 4
cls_id, sep_id = self.tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]'])
q_end = 1 + question_mask.sum(1, keepdim=True)
e_end = q_end + 1 + expansions_mask.sum(1, keepdim=True)
a_end = e_end + 1 + answer_mask.sum(1, keepdim=True)
# Define a new input sequence
input_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
input_mask = torch.ones((batch_size, max_len), dtype=torch.bool, device=question.device)
input_type_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
text_tags = input_type_ids.new_zeros((batch_size, max_len))
grid_i, grid_j = torch.meshgrid(torch.arange(batch_size, device=question.device),
torch.arange(max_len, device=question.device))
input_mask[grid_j > a_end] = 0
input_type_ids[(grid_j > q_end) & (grid_j <= e_end)] = 3
input_type_ids[(grid_j > e_end) & (grid_j <= a_end)] = 1
q_input_mask = (grid_j > 0) & (grid_j < q_end)
e_input_mask = (grid_j > q_end) & (grid_j < e_end)
a_input_mask = (grid_j > e_end) & (grid_j < a_end)
input_ids[:, 0] = cls_id
input_ids[grid_j == q_end] = sep_id
input_ids[grid_j == e_end] = sep_id
input_ids[grid_j == a_end] = sep_id
input_ids[q_input_mask] = question[question_mask]
input_ids[e_input_mask] = expansions[expansions_mask]
input_ids[a_input_mask] = answer[answer_mask]
text_tags[q_input_mask] = question_tags[question_mask]
text_tags[e_input_mask] = expansions_tags[expansions_mask]
text_tags[a_input_mask] = answer_tags[answer_mask]
#print('Inputs: ', input_ids, input_type_ids, text_tags, input_mask)
return input_ids, input_type_ids, text_tags, input_mask, (a_end - 1).squeeze(1)
def train_forward(self,
image,
boxes,
im_info,
question,
expansions,
commonsense_emb,
label
):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > - 1.5)
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None)
question_ids = question
question_tags = question.new_zeros(question_ids.shape)
question_mask = (question > 0.5)
expansions_ids = expansions
expansions_tags = expansions.new_zeros(expansions_ids.shape)
expansions_mask = (expansions > 0.5)
answer_ids = question_ids.new_zeros((question_ids.shape[0], 1)).fill_(
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
answer_mask = question_mask.new_zeros(answer_ids.shape).fill_(1)
answer_tags = question_tags.new_zeros(answer_ids.shape)
############################################
# prepare text
if self.use_expansions:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qea(question_ids,
question_tags,
question_mask,
expansions_ids,
expansions_tags,
expansions_mask,
answer_ids,
answer_tags,
answer_mask)
else:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qa(question_ids,
question_tags,
question_mask,
answer_ids,
answer_tags,
answer_mask)
if self.config.NETWORK.NO_GROUNDING:
obj_rep_zeroed = obj_reps['obj_reps'].new_zeros(obj_reps['obj_reps'].shape)
text_tags.zero_()
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_rep_zeroed)
else:
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])
assert self.config.NETWORK.VLBERT.object_word_embed_mode == 2
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
hidden_states, hc, attn_weights = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask,
commonsense_embeddings=commonsense_emb,
output_all_encoded_layers=False,
output_commonsense_attn_weights=True)
_batch_inds = torch.arange(question.shape[0], device=question.device)
hm = hidden_states[_batch_inds, ans_pos]
# hm = F.tanh(self.hm_out(hidden_states[_batch_inds, ans_pos]))
# hi = F.tanh(self.hi_out(hidden_states[_batch_inds, ans_pos + 2]))
###########################################
outputs = {}
# classifier
# logits = self.final_mlp(hc * hm * hi)
# logits = self.final_mlp(hc)
logits = self.final_mlp(hm)
if self.config.NETWORK.WEAK_ATTN_LOSS:
max_c_len = 0-(self.config.DATASET.MAX_COMMONSENSE_LEN + 1)
attn_label = label[:, max_c_len:]
label = label[:, :max_c_len]
attn_weights = torch.mean(attn_weights, dim=1)
# loss
ans_loss = F.binary_cross_entropy_with_logits(logits, label) * label.size(1)
if self.config.NETWORK.WEAK_ATTN_LOSS:
loss_mask = attn_label.sum(1) > 0
attn_weights = attn_weights[loss_mask, :]
attn_label = attn_label[loss_mask, :]
if attn_label.sum() > 0:
attn_loss = F.binary_cross_entropy_with_logits(attn_weights, attn_label) * attn_label.size(1)
else:
attn_loss = 0
ans_loss = ans_loss + attn_loss
outputs.update({'label_logits': logits,
'label': label,
'ans_loss': ans_loss})
loss = ans_loss.mean()
return outputs, loss
def inference_forward(self,
image,
boxes,
im_info,
question,
expansions,
commonsense_emb):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > - 1.5)
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None)
question_ids = question
question_tags = question.new_zeros(question_ids.shape)
question_mask = (question > 0.5)
expansions_ids = expansions
expansions_tags = expansions.new_zeros(expansions_ids.shape)
expansions_mask = (expansions > 0.5)
answer_ids = question_ids.new_zeros((question_ids.shape[0], 1)).fill_(
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
answer_mask = question_mask.new_zeros(answer_ids.shape).fill_(1)
answer_tags = question_tags.new_zeros(answer_ids.shape)
############################################
# prepare text
if self.use_expansions:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qea(question_ids,
question_tags,
question_mask,
expansions_ids,
expansions_tags,
expansions_mask,
answer_ids,
answer_tags,
answer_mask)
else:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qa(question_ids,
question_tags,
question_mask,
answer_ids,
answer_tags,
answer_mask)
if self.config.NETWORK.NO_GROUNDING:
obj_rep_zeroed = obj_reps['obj_reps'].new_zeros(obj_reps['obj_reps'].shape)
text_tags.zero_()
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_rep_zeroed)
else:
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])
assert self.config.NETWORK.VLBERT.object_word_embed_mode == 2
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
hidden_states, hc, attn_weights = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask,
commonsense_embeddings=commonsense_emb,
output_all_encoded_layers=False,
output_commonsense_attn_weights=True)
_batch_inds = torch.arange(question.shape[0], device=question.device)
hm = hidden_states[_batch_inds, ans_pos]
# hm = F.tanh(self.hm_out(hidden_states[_batch_inds, ans_pos]))
# hi = F.tanh(self.hi_out(hidden_states[_batch_inds, ans_pos + 2]))
###########################################
outputs = {}
# classifier
# logits = self.final_mlp(hc * hm * hi)
# logits = self.final_mlp(hc)
logits = self.final_mlp(hm)
outputs.update({'label_logits': logits, 'attn_weights': attn_weights})
return outputs
| 22,529 | 50.674312 | 156 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/collate_batch.py | import torch
from common.utils.clip_pad import *
class BatchCollator(object):
def __init__(self, dataset, append_ind=False):
self.dataset = dataset
self.test_mode = self.dataset.test_mode
self.data_names = self.dataset.data_names
self.append_ind = append_ind
def __call__(self, batch):
if not isinstance(batch, list):
batch = list(batch)
if batch[0][self.data_names.index('image')] is not None:
max_shape = tuple(max(s) for s in zip(*[data[self.data_names.index('image')].shape for data in batch]))
image_none = False
else:
image_none = True
max_boxes = max([data[self.data_names.index('boxes')].shape[0] for data in batch])
max_question_length = max([len(data[self.data_names.index('question')]) for data in batch])
max_expansions_length = max([len(data[self.data_names.index('expansions')]) for data in batch])
for i, ibatch in enumerate(batch):
out = {}
if image_none:
out['image'] = None
else:
image = ibatch[self.data_names.index('image')]
out['image'] = clip_pad_images(image, max_shape, pad=0)
boxes = ibatch[self.data_names.index('boxes')]
out['boxes'] = clip_pad_boxes(boxes, max_boxes, pad=-2)
question = ibatch[self.data_names.index('question')]
out['question'] = clip_pad_1d(question, max_question_length, pad=0)
expansions = ibatch[self.data_names.index('expansions')]
out['expansions'] = clip_pad_1d(expansions, max_expansions_length, pad=0)
other_names = [data_name for data_name in self.data_names if data_name not in out]
for name in other_names:
out[name] = torch.as_tensor(ibatch[self.data_names.index(name)])
batch[i] = tuple(out[data_name] for data_name in self.data_names)
if self.append_ind:
batch[i] += (torch.tensor(i, dtype=torch.int64),)
out_tuple = ()
for items in zip(*batch):
if items[0] is None:
out_tuple += (None,)
else:
out_tuple += (torch.stack(tuple(items), dim=0), )
return out_tuple
| 2,295 | 37.266667 | 115 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/build.py | import torch.utils.data
from .datasets import *
from . import samplers
from .transforms.build import build_transforms
from .collate_batch import BatchCollator
import pprint
DATASET_CATALOGS = {'aokvqa': AOKVQA}
def build_dataset(dataset_name, *args, **kwargs):
assert dataset_name in DATASET_CATALOGS, "dataset not in catalogs"
return DATASET_CATALOGS[dataset_name](*args, **kwargs)
def make_data_sampler(dataset, shuffle, distributed, num_replicas, rank):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle, num_replicas=num_replicas, rank=rank)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def make_batch_data_sampler(dataset, sampler, aspect_grouping, batch_size):
if aspect_grouping:
group_ids = dataset.group_ids
batch_sampler = samplers.GroupedBatchSampler(
sampler, group_ids, batch_size, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last=False
)
return batch_sampler
def make_dataloader(cfg, dataset=None, mode='train', distributed=False, num_replicas=None, rank=None,
expose_sampler=False):
assert mode in ['train', 'val', 'test']
if mode == 'train':
ann_file = cfg.DATASET.TRAIN_ANNOTATION_FILE
image_set = cfg.DATASET.TRAIN_IMAGE_SET
aspect_grouping = cfg.TRAIN.ASPECT_GROUPING
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TRAIN.BATCH_IMAGES * num_gpu
shuffle = cfg.TRAIN.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
elif mode == 'val':
ann_file = cfg.DATASET.VAL_ANNOTATION_FILE
image_set = cfg.DATASET.VAL_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.VAL.BATCH_IMAGES * num_gpu
shuffle = cfg.VAL.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
else:
ann_file = cfg.DATASET.TEST_ANNOTATION_FILE
image_set = cfg.DATASET.TEST_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TEST.BATCH_IMAGES * num_gpu
shuffle = cfg.TEST.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
transform = build_transforms(cfg, mode)
if dataset is None:
dataset = build_dataset(dataset_name=cfg.DATASET.DATASET, ann_file=ann_file, image_set=image_set,
use_imdb=cfg.DATASET.USE_IMDB,
with_precomputed_visual_feat=cfg.NETWORK.IMAGE_FEAT_PRECOMPUTED,
boxes=cfg.DATASET.BOXES,
answer_vocab_file=cfg.DATASET.ANSWER_VOCAB_FILE,
root_path=cfg.DATASET.ROOT_PATH, data_path=cfg.DATASET.DATASET_PATH,
test_mode=(mode == 'test'), transform=transform,
zip_mode=cfg.DATASET.ZIP_MODE, cache_mode=cfg.DATASET.CACHE_MODE,
cache_db=True if (rank is None or rank == 0) else False,
ignore_db_cache=cfg.DATASET.IGNORE_DB_CACHE,
add_image_as_a_box=cfg.DATASET.ADD_IMAGE_AS_A_BOX,
aspect_grouping=aspect_grouping,
mask_size=(cfg.DATASET.MASK_SIZE, cfg.DATASET.MASK_SIZE),
pretrained_model_name=cfg.NETWORK.BERT_MODEL_NAME,
use_sbert = cfg.DATASET.USE_SBERT,
commonsense_exp_name = cfg.DATASET.COMMONSENSE_EXP_NAME,
max_commonsense_len = cfg.DATASET.MAX_COMMONSENSE_LEN,
commonsense_emb_type = cfg.NETWORK.VLBERT.commonsense_emb_type,
learn_attn= cfg.NETWORK.WEAK_ATTN_LOSS)
sampler = make_data_sampler(dataset, shuffle, distributed, num_replicas, rank)
batch_sampler = make_batch_data_sampler(dataset, sampler, aspect_grouping, batch_size)
collator = BatchCollator(dataset=dataset, append_ind=cfg.DATASET.APPEND_INDEX)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=False,
collate_fn=collator)
if expose_sampler:
return dataloader, sampler
return dataloader
| 4,753 | 44.27619 | 106 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/datasets/aokvqa.py | import os
import json
import _pickle as cPickle
from PIL import Image
import re
import base64
import numpy as np
import csv
import sys
import time
import logging
import pickle5 as pickle
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipreader import ZipReader
from common.utils.create_logger import makedirsExist
from pycocotools.coco import COCO
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
class AOKVQA(Dataset):
def __init__(self, image_set, root_path, data_path, answer_vocab_file, use_imdb=True,
with_precomputed_visual_feat=False, boxes="36",
transform=None, test_mode=False,
zip_mode=False, cache_mode=False, cache_db=True, ignore_db_cache=True,
tokenizer=None, pretrained_model_name=None,
add_image_as_a_box=False, mask_size=(14, 14),
aspect_grouping=False, use_sbert=False, commonsense_exp_name='', max_commonsense_len=5,
commonsense_emb_type='', learn_attn=False, **kwargs):
"""
Visual Question Answering Dataset
:param image_set: image folder name
:param root_path: root path to cache database loaded from annotation file
:param data_path: path to vcr dataset
:param transform: transform
:param test_mode: test mode means no labels available
:param zip_mode: reading images and metadata in zip archive
:param cache_mode: cache whole dataset to RAM first, then __getitem__ read them from RAM
:param ignore_db_cache: ignore previous cached database, reload it from annotation file
:param tokenizer: default is BertTokenizer from pytorch_pretrained_bert
:param add_image_as_a_box: add whole image as a box
:param mask_size: size of instance mask of each object
:param aspect_grouping: whether to group images via their aspect
:param kwargs:
"""
super(AOKVQA, self).__init__()
assert not cache_mode, 'currently not support cache mode!'
aokvqa_question = {
"train2017": "aokvqa/aokvqa_v1p0_train.json",
"val2017": "aokvqa/aokvqa_v1p0_val.json",
"test2017": "aokvqa/aokvqa_v1p0_test.json",
}
if boxes == "36":
precomputed_boxes = {
'train2017': ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
'val2017': ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
'test2017': ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome_36"),
}
elif boxes == "10-100ada":
precomputed_boxes = {
'train2017': ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
'val2017': ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
'test2017': ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome"),
}
else:
raise ValueError("Not support boxes: {}!".format(boxes))
coco_dataset = {
"train2017": ("train2017", "annotations/instances_train2017.json"),
"val2017": ("val2017", "annotations/instances_val2017.json"),
"test2017": ("test2017", "annotations/image_info_test2017.json"),
}
commonsense_path = "data/coco/aokvqa/commonsense/"
self.experiment_name = commonsense_exp_name
self.use_sbert = use_sbert
self.max_commonsense_len = max_commonsense_len
self.commonsense_emb_type = commonsense_emb_type
self.learn_attn = learn_attn
if self.experiment_name == 'semqo':
aokvqa_expansions = {
'train2017': commonsense_path+'expansions/semq.o_aokvqa_train.json',
'val2017': commonsense_path+'expansions/semq.o_aokvqa_val.json',
'test2017': commonsense_path+'expansions/semq.o_aokvqa_test.json',
}
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(\,)(\d)")
self.punct = [';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
print("Loading OK-VQA dataset: ", image_set)
self.boxes = boxes
self.test_mode = test_mode
self.with_precomputed_visual_feat = with_precomputed_visual_feat
self.data_path = data_path
self.root_path = root_path
with open(answer_vocab_file, 'r', encoding='utf8') as f:
self.answer_vocab = [w.lower().strip().strip('\r').strip('\n').strip('\r') for w in f.readlines()]
self.answer_vocab = list(filter(lambda x: x != '', self.answer_vocab))
self.answer_vocab = [self.processPunctuation(w) for w in self.answer_vocab]
self.image_sets = [iset.strip() for iset in image_set.split('+')]
self.q_files = [os.path.join(data_path, aokvqa_question[iset]) for iset in self.image_sets]
self.expansion_files = [aokvqa_expansions[iset] for iset in self.image_sets] \
if (self.experiment_name != '') else [None for iset in self.image_sets]
self.precomputed_box_files = [
os.path.join(data_path, precomputed_boxes[iset][0],
'{0}.zip@/{0}'.format(precomputed_boxes[iset][1])
if zip_mode else precomputed_boxes[iset][1])
for iset in self.image_sets]
self.box_bank = {}
self.coco_datasets = [(os.path.join(data_path,
coco_dataset[iset][0],
'{{:012d}}.jpg'.format(coco_dataset[iset][0]))
if not zip_mode else
os.path.join(data_path,
coco_dataset[iset][0] + '.zip@/' + coco_dataset[iset][0],
'{{:012d}}.jpg'.format(coco_dataset[iset][0])),
os.path.join(data_path, coco_dataset[iset][1]))
for iset in self.image_sets]
self.transform = transform
self.zip_mode = zip_mode
self.cache_mode = cache_mode
self.cache_db = cache_db
self.ignore_db_cache = ignore_db_cache
self.aspect_grouping = aspect_grouping
self.cache_dir = os.path.join(root_path, 'cache')
self.add_image_as_a_box = add_image_as_a_box
self.mask_size = mask_size
if not os.path.exists(self.cache_dir):
makedirsExist(self.cache_dir)
self.tokenizer = tokenizer if tokenizer is not None \
else BertTokenizer.from_pretrained(
'bert-base-uncased' if pretrained_model_name is None else pretrained_model_name,
cache_dir=self.cache_dir)
if zip_mode:
self.zipreader = ZipReader()
self.database = self.load_annotations()
if self.aspect_grouping:
self.group_ids = self.group_aspect(self.database)
self.attn_gt = None
if self.learn_attn and not self.test_mode:
self.attn_gt = self._load_json('data/coco/aokvqa/'+self.experiment_name+'_aokvqa_train_attn_annot_'+str(self.max_commonsense_len)+'.json')
@property
def data_names(self):
if self.test_mode:
return ['image', 'boxes', 'im_info', 'question', 'expansions', 'c_emb']
else:
return ['image', 'boxes', 'im_info', 'question', 'expansions', 'c_emb', 'label']
def __getitem__(self, index):
idb = self.database[index]
# image, boxes, im_info
boxes_data = self._load_json(idb['box_fn'])
if self.with_precomputed_visual_feat:
image = None
w0, h0 = idb['width'], idb['height']
boxes_features = torch.tensor(
np.frombuffer(self.b64_decode(boxes_data['features']), dtype=np.float32).reshape((boxes_data['num_boxes'], -1))
)
else:
image = self._load_image(idb['image_fn'])
w0, h0 = image.size
boxes = torch.tensor(
np.frombuffer(self.b64_decode(boxes_data['boxes']), dtype=np.float32).reshape(
(boxes_data['num_boxes'], -1))
)
if self.add_image_as_a_box:
image_box = torch.as_tensor([[0.0, 0.0, w0 - 1, h0 - 1]])
boxes = torch.cat((image_box, boxes), dim=0)
if self.with_precomputed_visual_feat:
if 'image_box_feature' in boxes_data:
image_box_feature = torch.as_tensor(
np.frombuffer(
self.b64_decode(boxes_data['image_box_feature']), dtype=np.float32
).reshape((1, -1))
)
else:
image_box_feature = boxes_features.mean(0, keepdim=True)
boxes_features = torch.cat((image_box_feature, boxes_features), dim=0)
im_info = torch.tensor([w0, h0, 1.0, 1.0])
flipped = False
if self.transform is not None:
image, boxes, _, im_info, flipped = self.transform(image, boxes, None, im_info, flipped)
# clamp boxes
w = im_info[0].item()
h = im_info[1].item()
boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=w - 1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=h - 1)
# flip: 'left' -> 'right', 'right' -> 'left'
q_tokens = self.tokenizer.tokenize(idb['question'])
if flipped:
q_tokens = self.flip_tokens(q_tokens, verbose=False)
if not self.test_mode:
answers = idb['answers']
if flipped:
answers_tokens = [a.split(' ') for a in answers]
answers_tokens = [self.flip_tokens(a_toks, verbose=False) for a_toks in answers_tokens]
answers = [' '.join(a_toks) for a_toks in answers_tokens]
label = self.get_soft_target(answers)
# question
q_retokens = q_tokens
q_ids = self.tokenizer.convert_tokens_to_ids(q_retokens)
# commonsense
exp_ids = []
commonsense_embeddings = torch.tensor([0])
if self.experiment_name != '':
# If we use SBERT, add [MASK] tokens exp_ids, and load the embeddings in commonsense_embeddings
if self.use_sbert:
if self.commonsense_emb_type == 'fusion':
commonsense_embeddings = self.get_cached_expansion_emb(idb['image_fn'].split('/')[-1], idb['question_id'], custom_tag='_ques')
else:
commonsense_embeddings = self.get_cached_expansion_emb(idb['image_fn'].split('/')[-1], idb['question_id'])
# Now that we have commonsense embeddings, we add the [MASK] tokens that will be replaced by the commonsense embeddings in training code
if self.commonsense_emb_type == 'fusion':
m_tokens = ['[MASK]']
else:
m_tokens = ['[MASK]']*self.max_commonsense_len
m_ids = self.tokenizer.convert_tokens_to_ids(m_tokens)
exp_ids += m_ids
# If not SBERT, clean the picked expansions and add them to exp_ids
else:
# We use picked expansions from knowlege selection process
picked_exp = idb['picked_exp']
if isinstance(picked_exp, list):
picked_exp = picked_exp[0]
picked_exp = picked_exp.split('.')
picked_exp = [sentence.strip() for sentence in picked_exp]
picked_exp = [sentence+'.' for sentence in picked_exp if sentence != '']
if len(picked_exp) >= self.max_commonsense_len:
picked_exp = picked_exp[:self.max_commonsense_len]
else:
picked_exp = picked_exp + [''] * (self.max_commonsense_len - len(picked_exp))
picked_exp = ' '.join(picked_exp)
picked_exp_tokens = self.tokenizer.tokenize(picked_exp)
exp_ids += self.tokenizer.convert_tokens_to_ids(picked_exp_tokens)
# concat box feature to box
if self.with_precomputed_visual_feat:
boxes = torch.cat((boxes, boxes_features), dim=-1)
if self.attn_gt is not None:
if str(idb['image_id']) in self.attn_gt and idb['question_id'] in self.attn_gt[str(idb['image_id'])]:
attn_weight_label = torch.tensor(self.attn_gt[str(idb['image_id'])][idb['question_id']])
else:
attn_weight_label = torch.zeros(self.max_commonsense_len+1)
label = torch.cat((label, attn_weight_label), dim=0)
if self.test_mode:
return image, boxes, im_info, q_ids, exp_ids, commonsense_embeddings
else:
return image, boxes, im_info, q_ids, exp_ids, commonsense_embeddings, label
@staticmethod
def flip_tokens(tokens, verbose=True):
changed = False
tokens_new = [tok for tok in tokens]
for i, tok in enumerate(tokens):
if tok == 'left':
tokens_new[i] = 'right'
changed = True
elif tok == 'right':
tokens_new[i] = 'left'
changed = True
if verbose and changed:
logging.info('[Tokens Flip] {} -> {}'.format(tokens, tokens_new))
return tokens_new
@staticmethod
def b64_decode(string):
return base64.decodebytes(string.encode())
def answer_to_ind(self, answer):
if answer in self.answer_vocab:
return self.answer_vocab.index(answer)
else:
return self.answer_vocab.index('<unk>')
def get_soft_target(self, answers):
soft_target = torch.zeros(len(self.answer_vocab), dtype=torch.float)
answer_indices = [self.answer_to_ind(answer) for answer in answers]
gt_answers = list(enumerate(answer_indices))
unique_answers = set(answer_indices)
for answer in unique_answers:
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [item for item in other_answers if item[1] == answer]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
avg_acc = sum(accs) / len(accs)
if answer != self.answer_vocab.index('<unk>'):
soft_target[answer] = avg_acc
return soft_target
def processPunctuation(self, inText):
if inText == '<unk>':
return inText
outText = inText
for p in self.punct:
if (p + ' ' in inText or ' ' + p in inText) or (re.search(self.commaStrip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = self.periodStrip.sub("",
outText,
re.UNICODE)
return outText
def load_annotations(self):
tic = time.time()
database = []
db_cache_name = 'aokvqa_boxes{}_{}'.format(self.boxes, '+'.join(self.image_sets))
if self.with_precomputed_visual_feat:
db_cache_name += 'visualprecomp'
if self.zip_mode:
db_cache_name = db_cache_name + '_zipmode'
if self.test_mode:
db_cache_name = db_cache_name + '_testmode'
if self.experiment_name != '':
db_cache_name = db_cache_name + '_' + self.experiment_name
db_cache_root = os.path.join(self.root_path, 'cache')
db_cache_path = os.path.join(db_cache_root, '{}.pkl'.format(db_cache_name))
if os.path.exists(db_cache_path):
if not self.ignore_db_cache:
# reading cached database
print('cached database found in {}.'.format(db_cache_path))
with open(db_cache_path, 'rb') as f:
print('loading cached database from {}...'.format(db_cache_path))
tic = time.time()
database = cPickle.load(f)
print('Done (t={:.2f}s)'.format(time.time() - tic))
return database
else:
print('cached database ignored.')
# ignore or not find cached database, reload it from annotation file
print('loading database of split {}...'.format('+'.join(self.image_sets)))
tic = time.time()
for q_file, expansion_file, (coco_path, coco_annot), box_file \
in zip(self.q_files, self.expansion_files, self.coco_datasets, self.precomputed_box_files):
qs = self._load_json(q_file)
expansion_data = self._load_json(expansion_file)
coco = COCO(coco_annot)
for q in qs:
idb = {'image_id': q['image_id'],
'image_fn': coco_path.format(q['image_id']),
'width': coco.imgs[q['image_id']]['width'],
'height': coco.imgs[q['image_id']]['height'],
'box_fn': os.path.join(box_file, '{}.json'.format(q['image_id'])),
'question_id': q['question_id'],
'question': q['question'],
"picked_exp": expansion_data[str(coco_path.format(q['image_id']).split('/')[-1])][str(q['question_id'])] if (self.experiment_name != '') else None,
"rationales": q['rationales'] if self.experiment_name == 'rationales' else None,
'answers': q['direct_answers'] if not self.test_mode else None,
"question_type": "other" if not self.test_mode else None,
"answer_type": "other" if not self.test_mode else None,
}
database.append(idb)
print('Done (t={:.2f}s)'.format(time.time() - tic))
# cache database via cPickle
if self.cache_db:
print('caching database to {}...'.format(db_cache_path))
tic = time.time()
if not os.path.exists(db_cache_root):
makedirsExist(db_cache_root)
with open(db_cache_path, 'wb') as f:
cPickle.dump(database, f)
print('Done (t={:.2f}s)'.format(time.time() - tic))
return database
@staticmethod
def group_aspect(database):
print('grouping aspect...')
t = time.time()
# get shape of all images
widths = torch.as_tensor([idb['width'] for idb in database])
heights = torch.as_tensor([idb['height'] for idb in database])
# group
group_ids = torch.zeros(len(database))
horz = widths >= heights
vert = 1 - horz
group_ids[horz] = 0
group_ids[vert] = 1
print('Done (t={:.2f}s)'.format(time.time() - t))
return group_ids
def load_precomputed_boxes(self, box_file):
if box_file in self.box_bank:
return self.box_bank[box_file]
else:
in_data = {}
with open(box_file, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['image_h'] = int(item['image_h'])
item['image_w'] = int(item['image_w'])
item['num_boxes'] = int(item['num_boxes'])
for field in (['boxes', 'features'] if self.with_precomputed_visual_feat else ['boxes']):
item[field] = np.frombuffer(base64.decodebytes(item[field].encode()),
dtype=np.float32).reshape((item['num_boxes'], -1))
in_data[item['image_id']] = item
self.box_bank[box_file] = in_data
return in_data
def get_cached_expansion_emb(self, image_id, question_id, custom_tag=''):
commonsense_embeddings = None
for subset in self.image_sets:
savepath = 'data/coco/sbert/aokvqa/'+self.experiment_name+'/'+str(self.max_commonsense_len)+custom_tag+'/'+subset
image_id = str(image_id)
question_id = str(question_id)
if not os.path.exists(savepath+'/'+image_id+'.pkl'):
continue
with open(savepath+'/'+image_id+'.pkl', 'rb') as handle:
unserialized_data = pickle.load(handle)
commonsense_embeddings = torch.tensor(unserialized_data[question_id])
assert commonsense_embeddings is not None, 'No expansion embedding found at {}'.format(savepath+'/'+image_id+'.pkl')
return commonsense_embeddings
def __len__(self):
return len(self.database)
def _load_image(self, path):
if '.zip@' in path:
return self.zipreader.imread(path).convert('RGB')
else:
return Image.open(path).convert('RGB')
def _load_json(self, path):
if path == None:
return None
elif '.zip@' in path:
f = self.zipreader.read(path)
return json.loads(f.decode())
else:
with open(path, 'r') as f:
return json.load(f)
| 21,774 | 42.812877 | 171 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/samplers/grouped_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that elements from the same group should appear in groups of batch_size.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_uneven (bool): If ``True``, the sampler will drop the batches whose
size is less than ``batch_size``
"""
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert self.group_ids.dim() == 1
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
self._can_reuse_batches = False
def _prepare_batches(self):
dataset_size = len(self.group_ids)
# get the sampled indices from the sampler
sampled_ids = torch.as_tensor(list(self.sampler))
# potentially not all elements of the dataset were sampled
# by the sampler (e.g., DistributedSampler).
# construct a tensor which contains -1 if the element was
# not sampled, and a non-negative number indicating the
# order where the element was sampled.
# for example. if sampled_ids = [3, 1] and dataset_size = 5,
# the order is [-1, 1, -1, 0, -1]
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
# get a mask with the elements that were sampled
mask = order >= 0
# find the elements that belong to each individual cluster
clusters = [(self.group_ids == i) & mask for i in self.groups]
# get relative order of the elements inside each cluster
# that follows the order from the sampler
relative_order = [order[cluster] for cluster in clusters]
# with the relative order, find the absolute order in the
# sampled space
permutation_ids = [s[s.sort()[1]] for s in relative_order]
# permute each cluster so that they follow the order from
# the sampler
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
# splits each cluster in batch_size, and merge as a list of tensors
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
# now each batch internally has the right order, but
# they are grouped by clusters. Find the permutation between
# different batches that brings them as close as possible to
# the order that we have in the sampler. For that, we will consider the
# ordering as coming from the first element of each batch, and sort
# correspondingly
first_element_of_batch = [t[0].item() for t in merged]
# get and inverse mapping from sampled indices and the position where
# they occur (as returned by the sampler)
inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())}
# from the first element in each batch, get a relative ordering
first_index_of_batch = torch.as_tensor(
[inv_sampled_ids_map[s] for s in first_element_of_batch]
)
# permute the batches so that they approximately follow the order
# from the sampler
permutation_order = first_index_of_batch.sort(0)[1].tolist()
# finally, permute the batches
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
return batches
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
batches = self._prepare_batches()
self._batches = batches
return iter(batches)
def __len__(self):
if not hasattr(self, "_batches"):
self._batches = self._prepare_batches()
self._can_reuse_batches = True
return len(self._batches)
| 4,846 | 40.42735 | 88 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/samplers/distributed.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch | 2,568 | 37.924242 | 86 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/transforms/transforms.py | import random
import numpy as np
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, boxes, masks, im_info, flipped):
for t in self.transforms:
image, boxes, masks, im_info, flipped = t(image, boxes, masks, im_info, flipped)
return image, boxes, masks, im_info, flipped
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(max_size * min_original_size / max_original_size)
if (w <= h and w == size) or (h <= w and h == size):
return (w, h)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (ow, oh)
def __call__(self, image, boxes, masks, im_info, flipped):
origin_size = im_info[:2]
size = self.get_size(origin_size)
if image is not None:
image = F.resize(image, (size[1], size[0]))
ratios = [size[0] * 1.0 / origin_size[0], size[1] * 1.0 / origin_size[1]]
if boxes is not None:
boxes[:, [0, 2]] *= ratios[0]
boxes[:, [1, 3]] *= ratios[1]
im_info[0], im_info[1] = size
im_info[2], im_info[3] = ratios
return image, boxes, masks, im_info, flipped
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, boxes, masks, im_info, flipped):
if random.random() < self.prob:
w, h = im_info[:2]
if image is not None:
image = F.hflip(image)
if boxes is not None:
boxes[:, [0, 2]] = w - 1 - boxes[:, [2, 0]]
if masks is not None:
masks = torch.as_tensor(masks.numpy()[:, :, ::-1].tolist())
flipped = not flipped
return image, boxes, masks, im_info, flipped
class ToTensor(object):
def __call__(self, image, boxes, masks, im_info, flipped):
return F.to_tensor(image) if image is not None else image, boxes, masks, im_info, flipped
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, boxes, masks, im_info, flipped):
if image is not None:
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, boxes, masks, im_info, flipped
class FixPadding(object):
def __init__(self, min_size, max_size, pad=0):
self.min_size = min_size
self.max_size = max_size
self.pad = pad
def __call__(self, image, boxes, masks, im_info, flipped):
if image is not None:
# padding to fixed size for determinacy
c, h, w = image.shape
if h <= w:
h1 = self.min_size
w1 = self.max_size
else:
h1 = self.max_size
w1 = self.min_size
padded_image = image.new_zeros((c, h1, w1)).fill_(self.pad)
padded_image[:, :h, :w] = image
image = padded_image
return image, boxes, masks, im_info, flipped
| 4,104 | 30.821705 | 97 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 - x
SCHEDULES = {
'warmup_cosine':warmup_cosine,
'warmup_constant':warmup_constant,
'warmup_linear':warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| 6,803 | 40.742331 | 116 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/optimization_openai.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for OpenAI GPT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
def warmup_cosine(x, warmup=0.002):
s = 1 if x <= warmup else 0
return s*(x/warmup) + (1-s)*(0.5 * (1 + torch.cos(math.pi * x)))
def warmup_constant(x, warmup=0.002):
s = 1 if x <= warmup else 0
return s*(x/warmup) + (1-s)*1
def warmup_linear(x, warmup=0.002):
s = 1 if x <= warmup else 0
return (s*(x/warmup) + (1-s))*(1-x)
SCHEDULES = {
'warmup_cosine':warmup_cosine,
'warmup_constant':warmup_constant,
'warmup_linear':warmup_linear,
}
class OpenAIAdam(Optimizer):
"""Implements Open AI version of Adam algorithm with weight decay fix.
"""
def __init__(self, params, lr=required, schedule='warmup_linear', warmup=-1, t_total=-1,
b1=0.9, b2=0.999, e=1e-8, weight_decay=0,
vector_l2=False, max_grad_norm=-1, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {}".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {}".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {}".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay, vector_l2=vector_l2,
max_grad_norm=max_grad_norm)
super(OpenAIAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['b1'], group['b2']
state['step'] += 1
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['e'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Add weight decay at the end (fixed version)
if (len(p.size()) > 1 or group['vector_l2']) and group['weight_decay'] > 0:
p.data.add_(-lr_scheduled * group['weight_decay'], p.data)
return loss
| 5,661 | 39.156028 | 116 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/__main__.py | # coding: utf8
def main():
import sys
if (len(sys.argv) != 4 and len(sys.argv) != 5) or sys.argv[1] not in [
"convert_tf_checkpoint_to_pytorch",
"convert_openai_checkpoint",
"convert_transfo_xl_checkpoint",
"convert_gpt2_checkpoint",
]:
print(
"Should be used as one of: \n"
">> `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`, \n"
">> `pytorch_pretrained_bert convert_openai_checkpoint OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`, \n"
">> `pytorch_pretrained_bert convert_transfo_xl_checkpoint TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG]` or \n"
">> `pytorch_pretrained_bert convert_gpt2_checkpoint TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG]`")
else:
if sys.argv[1] == "convert_tf_checkpoint_to_pytorch":
try:
from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) != 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
else:
PYTORCH_DUMP_OUTPUT = sys.argv.pop()
TF_CONFIG = sys.argv.pop()
TF_CHECKPOINT = sys.argv.pop()
convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "convert_openai_checkpoint":
from .convert_openai_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
OPENAI_GPT_CONFIG = sys.argv[4]
else:
OPENAI_GPT_CONFIG = ""
convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH,
OPENAI_GPT_CONFIG,
PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "convert_transfo_xl_checkpoint":
try:
from .convert_transfo_xl_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if 'ckpt' in sys.argv[2].lower():
TF_CHECKPOINT = sys.argv[2]
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = sys.argv[2]
TF_CHECKPOINT = ""
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE)
else:
try:
from .convert_gpt2_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
TF_CHECKPOINT = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
if __name__ == '__main__':
main()
| 4,393 | 51.309524 | 145 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/convert_gpt2_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
from __future__ import absolute_import, division, print_function
import argparse
from io import open
import torch
from external.pytorch_pretrained_bert.modeling_gpt2 import (CONFIG_NAME, WEIGHTS_NAME,
GPT2Config,
GPT2Model,
load_tf_weights_in_gpt2)
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
# Construct model
if gpt2_config_file == "":
config = GPT2Config()
else:
config = GPT2Config(gpt2_config_file)
model = GPT2Model(config)
# Load weights from numpy
load_tf_weights_in_gpt2(model, gpt2_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--gpt2_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
parser.add_argument("--gpt2_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.")
args = parser.parse_args()
convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path,
args.gpt2_config_file,
args.pytorch_dump_folder_path)
| 3,046 | 40.739726 | 111 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/convert_openai_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
from __future__ import absolute_import, division, print_function
import argparse
from io import open
import torch
from external.pytorch_pretrained_bert.modeling_openai import (CONFIG_NAME, WEIGHTS_NAME,
OpenAIGPTConfig,
OpenAIGPTModel,
load_tf_weights_in_openai_gpt)
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
# Construct model
if openai_config_file == "":
config = OpenAIGPTConfig()
else:
config = OpenAIGPTConfig(openai_config_file)
model = OpenAIGPTModel(config)
# Load weights from numpy
load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--openai_checkpoint_folder_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
parser.add_argument("--openai_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.")
args = parser.parse_args()
convert_openai_checkpoint_to_pytorch(args.openai_checkpoint_folder_path,
args.openai_config_file,
args.pytorch_dump_folder_path)
| 3,141 | 42.041096 | 118 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
TF_WEIGHTS_NAME = 'model.ckpt'
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, output_attention_probs=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if output_attention_probs:
return context_layer, attention_probs
else:
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, output_attention_probs=False):
self_output = self.self(input_tensor, attention_mask, output_attention_probs=output_attention_probs)
if output_attention_probs:
self_output, attention_probs = self_output
attention_output = self.output(self_output, input_tensor)
if output_attention_probs:
return attention_output, attention_probs
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, output_attention_probs=False):
attention_output = self.attention(hidden_states, attention_mask, output_attention_probs=output_attention_probs)
if output_attention_probs:
attention_output, attention_probs = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if output_attention_probs:
return layer_output, attention_probs
else:
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, output_attention_probs=False):
all_encoder_layers = []
all_attention_probs = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask, output_attention_probs=output_attention_probs)
if output_attention_probs:
hidden_states, attention_probs = hidden_states
all_attention_probs.append(attention_probs)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if output_attention_probs:
return all_encoder_layers, all_attention_probs
else:
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None,
from_tf=False, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None)
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForPreTraining(BertPreTrainedModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(BertPreTrainedModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
seq_relationship_score = self.cls( pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForMultipleChoice(BertPreTrainedModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(BertPreTrainedModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
| 60,198 | 48.18219 | 139 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/modeling_gpt2.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
import collections
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .file_utils import cached_path
from .modeling import BertLayerNorm as LayerNorm
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin"}
PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json"}
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'w' or l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'wpe' or l[0] == 'wte':
pointer = getattr(pointer, l[0])
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class GPT2Config(object):
"""Configuration class to store the configuration of a `GPT2Model`.
"""
def __init__(
self,
vocab_size_or_config_json_file=50257,
n_positions=1024,
n_ctx=1024,
n_embd=768,
n_layer=12,
n_head=12,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
):
"""Constructs GPT2Config.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `GPT2Config` from a Python dictionary of parameters."""
config = GPT2Config(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `GPT2Config` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns-nd:ns, :ns]
w = w * b - 1e10 * (1 - b)
w = nn.Softmax(dim=-1)(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
return a, present
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return h2
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None):
a, present = self.attn(self.ln_1(x), layer_past=layer_past)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
return x, present
class GPT2LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model_embeddings_weights, config):
super(GPT2LMHead, self).__init__()
self.n_embd = config.n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(hidden_state)
return lm_logits
class GPT2MultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(GPT2MultipleChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.linear = nn.Linear(config.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# (bsz, num_choices, hidden_size)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class GPT2PreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(GPT2PreTrainedModel, self).__init__()
if not isinstance(config, GPT2Config):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `GPT2Config`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
self.config = config
def set_tied(self):
pass
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
):
"""
Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `openai-gpt`
- a path or url to a pretrained model archive containing:
. `gpt2_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a GPT2Model instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. a TensorFlow checkpoint with trained weights
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
archive_file, config_file
)
)
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = GPT2Config.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu' if not torch.cuda.is_available() else None)
if from_tf:
# Directly load from a TensorFlow checkpoint (stored as NumPy array)
return load_tf_weights_in_gpt2(model, resolved_archive_file)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
)
# Make sure we are still sharing the output and input embeddings after loading weights
model.set_tied()
return model
class GPT2Model(GPT2PreTrainedModel):
"""OpenAI GPT-2 model ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
Outputs:
`hidden_states`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
(or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2Model(config)
hidden_states = model(input_ids)
```
"""
def __init__(self, config):
super(GPT2Model, self).__init__(config)
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
block = Block(config.n_ctx, config, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.apply(self.init_weights)
def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
presents = []
for block, layer_past in zip(self.h, past):
hidden_states, present = block(hidden_states, layer_past)
presents.append(present)
hidden_states = self.ln_f(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
return hidden_states.view(*output_shape), presents
class GPT2LMHeadModel(GPT2PreTrainedModel):
"""OpenAI GPT-2 model with a Language Modeling head ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `lm_labels` is not `None`:
Outputs the language modeling loss.
else:
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, config.vocab_size]
(or more generally [d_1, ..., d_n, config.vocab_size] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2LMHeadModel(config)
lm_logits = model(input_ids)
```
"""
def __init__(self, config):
super(GPT2LMHeadModel, self).__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
self.apply(self.init_weights)
def set_tied(self):
""" Make sure we are sharing the embeddings
"""
self.lm_head.set_embeddings_weights(self.transformer.wte.weight)
def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None):
hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1))
return loss
return lm_logits, presents
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
"""OpenAI GPT-2 model with a Language Modeling and a Multiple Choice head ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
indices selected in the range [0, config.vocab_size[
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with indices selected in [-1, 0, ..., config.vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., config.vocab_size]
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `lm_labels` and `multiple_choice_labels` are not `None`:
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
else: a tuple with
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, config.vocab_size]
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2LMHeadModel(config)
lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
```
"""
def __init__(self, config):
super(GPT2DoubleHeadsModel, self).__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
self.multiple_choice_head = GPT2MultipleChoiceHead(config)
self.apply(self.init_weights)
def set_tied(self):
""" Make sure we are sharing the embeddings
"""
self.lm_head.set_embeddings_weights(self.transformer.wte.weight)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None, past=None):
hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
return lm_logits, mc_logits, presents
| 29,887 | 42.632117 | 146 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
import collections
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .file_utils import cached_path
from .modeling import BertLayerNorm as LayerNorm
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
PRETRAINED_CONFIG_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"}
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
print("Loading weights...")
names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'w':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}
class OpenAIGPTConfig(object):
"""Configuration class to store the configuration of a `OpenAIGPTModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file=40478,
n_special=0,
n_positions=512,
n_ctx=512,
n_embd=768,
n_layer=12,
n_head=12,
afn="gelu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
):
"""Constructs OpenAIGPTConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
afn: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
layer_norm_epsilon: epsilon to use in the layer norm layers
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_special = n_special
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.afn = afn
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
def total_tokens_embeddings(self):
return self.vocab_size + self.n_special
@classmethod
def from_dict(cls, json_object):
"""Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `OpenAIGPTConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e9 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x):
a = self.attn(x)
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
return h
class OpenAIGPTLMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model_embeddings_weights, config):
super(OpenAIGPTLMHead, self).__init__()
self.n_embd = config.n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(hidden_state)
return lm_logits
class OpenAIGPTMultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(OpenAIGPTMultipleChoiceHead, self).__init__()
self.n_embd = config.n_embd
# self.multiple_choice_token = multiple_choice_token
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
self.linear = nn.Linear(config.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# (bsz, num_choices, hidden_size)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class OpenAIGPTPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(OpenAIGPTPreTrainedModel, self).__init__()
if not isinstance(config, OpenAIGPTConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `OpenAIGPTConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def set_num_special_tokens(self, num_special_tokens):
pass
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path, num_special_tokens=None, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
):
"""
Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `openai-gpt`
- a path or url to a pretrained model archive containing:
. `openai_gpt_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. a series of NumPy files containing OpenAI TensorFlow trained weights
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
archive_file, config_file
)
)
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = OpenAIGPTConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu' if not torch.cuda.is_available() else None)
if from_tf:
# Directly load from a TensorFlow checkpoint (stored as NumPy array)
return load_tf_weights_in_openai_gpt(model, resolved_archive_file)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
)
# Add additional embeddings for special tokens if needed
# This step also make sure we are still sharing the output and input embeddings after loading weights
model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
return model
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
Outputs:
`hidden_states`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
(or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTModel(config)
hidden_states = model(input_ids)
```
"""
def __init__(self, config):
super(OpenAIGPTModel, self).__init__(config)
num_tokens = config.vocab_size + config.n_special
self.tokens_embed = nn.Embedding(num_tokens, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
block = Block(config.n_ctx, config, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.apply(self.init_weights)
# nn.init.normal_(self.embed.weight, std=0.02)
def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = num_special_tokens
# # Build new embeddings and initialize
old_embed = self.tokens_embed
self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
# Initialize all new embeddings (in particular the special tokens)
self.init_weights(self.tokens_embed)
# Copy word and positional embeddings from the previous weights
self.tokens_embed.weight.data[: self.config.vocab_size, :] = old_embed.weight.data[: self.config.vocab_size, :]
self.tokens_embed.weight.data[-self.config.n_positions :, :] = old_embed.weight.data[-self.config.n_positions :, :]
def forward(self, input_ids, position_ids=None, token_type_ids=None):
if position_ids is None:
# This was used when we had a single embedding matrice from position and token embeddings
# start = self.config.vocab_size + self.config.n_special
# end = start + input_ids.size(-1)
# position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
# Add the position information to the input embeddings
# h = e.sum(dim=2)
hidden_states = inputs_embeds + position_embeds + token_type_embeds
for block in self.h:
hidden_states = block(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
return hidden_states.view(*output_shape)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `lm_labels` is not `None`:
Outputs the language modeling loss.
else:
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings]
(or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTLMHeadModel(config)
lm_logits = model(input_ids)
```
"""
def __init__(self, config):
super(OpenAIGPTLMHeadModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1))
return loss
return lm_logits
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
indices selected in the range [0, total_tokens_embeddings[
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., total_tokens_embeddings]
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `lm_labels` and `multiple_choice_labels` are not `None`:
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
else: a tuple with
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTLMHeadModel(config)
lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
```
"""
def __init__(self, config):
super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
return lm_logits, mc_logits
| 37,647 | 45.421702 | 152 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/convert_transfo_xl_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Transformer XL checkpoint and datasets."""
from __future__ import absolute_import, division, print_function
import argparse
import os
import sys
from io import open
import torch
import external.pytorch_pretrained_bert.tokenization_transfo_xl as data_utils
from external.pytorch_pretrained_bert import (CONFIG_NAME,
WEIGHTS_NAME,
TransfoXLConfig,
TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl)
from external.pytorch_pretrained_bert.tokenization_transfo_xl import (CORPUS_NAME,
VOCAB_NAME)
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
data_utils.Vocab = data_utils.TransfoXLTokenizer
data_utils.Corpus = data_utils.TransfoXLCorpus
sys.modules['data_utils'] = data_utils
sys.modules['vocabulary'] = data_utils
def convert_transfo_xl_checkpoint_to_pytorch(tf_checkpoint_path,
transfo_xl_config_file,
pytorch_dump_folder_path,
transfo_xl_dataset_file):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(transfo_xl_dataset_file, "rb") as fp:
corpus = pickle.load(fp, encoding="latin1")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
pytorch_vocab_dump_path = pytorch_dump_folder_path + '/' + VOCAB_NAME
print("Save vocabulary to {}".format(pytorch_vocab_dump_path))
corpus_vocab_dict = corpus.vocab.__dict__
torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
corpus_dict_no_vocab = corpus.__dict__
corpus_dict_no_vocab.pop('vocab', None)
pytorch_dataset_dump_path = pytorch_dump_folder_path + '/' + CORPUS_NAME
print("Save dataset to {}".format(pytorch_dataset_dump_path))
torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
config_path = os.path.abspath(transfo_xl_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting Transformer XL checkpoint from {} with config at {}".format(tf_path, config_path))
# Initialise PyTorch model
if transfo_xl_config_file == "":
config = TransfoXLConfig()
else:
config = TransfoXLConfig(transfo_xl_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = TransfoXLLMHeadModel(config)
model = load_tf_weights_in_transfo_xl(model, config, tf_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the folder to store the PyTorch model or dataset/vocab.")
parser.add_argument("--tf_checkpoint_path",
default = "",
type = str,
help = "An optional path to a TensorFlow checkpoint path to be converted.")
parser.add_argument("--transfo_xl_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--transfo_xl_dataset_file",
default = "",
type = str,
help = "An optional dataset file to be converted in a vocabulary.")
args = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file)
| 5,642 | 47.230769 | 121 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
except AttributeError:
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,280 | 32.124 | 112 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import torch
from external.pytorch_pretrained_bert.modeling import BertConfig, BertForPreTraining, load_tf_weights_in_bert
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = BertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = BertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_bert(model, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--tf_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--bert_config_file",
default = None,
type = str,
required = True,
help = "The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--pytorch_dump_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.bert_config_file,
args.pytorch_dump_path)
| 2,538 | 39.301587 | 109 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/modeling_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import collections
import sys
from io import open
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling import BertLayerNorm as LayerNorm
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax, sample_logits
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-config.json",
}
CONFIG_NAME = 'config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
TF_WEIGHTS_NAME = 'model.ckpt'
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update({
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias})
for i, (out_l, proj_l, tie_proj) in enumerate(zip(
model.crit.out_layers,
model.crit.out_projs,
config.tie_projs)):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({
layer_str + 'b': out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({
layer_str + 'lookup_table': out_l.weight,
layer_str + 'b': out_l.bias})
if not tie_proj:
tf_to_pt_map.update({
layer_str + 'proj': proj_l
})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({
layer_str + 'lookup_table': embed_l.weight,
layer_str + 'proj_W': proj_l
})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({
'transformer/r_r_bias': r_r_list,
'transformer/r_w_bias': r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if 'kernel' in name or 'proj' in name:
array = np.transpose(array)
if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1:
# Here we will split the TF weigths
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
print("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + '/Adam', None)
tf_weights.pop(name + '/Adam_1', None)
print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
class TransfoXLConfig(object):
"""Configuration class to store the configuration of a `TransfoXLModel`.
"""
def __init__(self,
vocab_size_or_config_json_file=267735,
cutoffs=[20000, 40000, 200000],
d_model=1024,
d_embed=1024,
n_head=16,
d_head=64,
d_inner=4096,
div_val=4,
pre_lnorm=False,
n_layer=18,
tgt_len=128,
ext_len=0,
mem_len=1600,
clamp_len=1000,
same_length=True,
proj_share_all_but_first=True,
attn_type=0,
sample_softmax=-1,
adaptive=True,
tie_weight=True,
dropout=0.1,
dropatt=0.0,
untie_r=True,
init="normal",
init_range=0.01,
proj_init_std=0.01,
init_std=0.02):
"""Constructs TransfoXLConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `TransfoXLModel` or a configuration json file.
cutoffs: cutoffs for the adaptive softmax
d_model: Dimensionality of the model's hidden states.
d_embed: Dimensionality of the embeddings
d_head: Dimensionality of the model's heads.
div_val: divident value for adapative input and softmax
pre_lnorm: apply LayerNorm to the input instead of the output
d_inner: Inner dimension in FF
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
tgt_len: number of tokens to predict
ext_len: length of the extended context
mem_len: length of the retained previous heads
same_length: use the same attn length for all tokens
proj_share_all_but_first: True to share all but first projs, False not to share.
attn_type: attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
clamp_len: use the same pos embeddings after clamp_len
sample_softmax: number of samples in sampled softmax
adaptive: use adaptive softmax
tie_weight: tie the word embedding and softmax weights
dropout: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
dropatt: The dropout ratio for the attention probabilities.
untie_r: untie relative position biases
embd_pdrop: The dropout ratio for the embeddings.
init: parameter initializer to use
init_range: parameters initialized by U(-init_range, init_range).
proj_init_std: parameters initialized by N(0, init_std)
init_std: parameters initialized by N(0, init_std)
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.n_token = vocab_size_or_config_json_file
self.cutoffs = []
self.cutoffs.extend(cutoffs)
self.tie_weight = tie_weight
if proj_share_all_but_first:
self.tie_projs = [False] + [True] * len(self.cutoffs)
else:
self.tie_projs = [False] + [False] * len(self.cutoffs)
self.d_model = d_model
self.d_embed = d_embed
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.pre_lnorm = pre_lnorm
self.n_layer = n_layer
self.n_head = n_head
self.tgt_len = tgt_len
self.ext_len = ext_len
self.mem_len = mem_len
self.same_length = same_length
self.attn_type = attn_type
self.clamp_len = clamp_len
self.sample_softmax = sample_softmax
self.adaptive = adaptive
self.dropout = dropout
self.dropatt = dropatt
self.untie_r = untie_r
self.init = init
self.init_range = init_range
self.proj_init_std = proj_init_std
self.init_std = init_std
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `TransfoXLConfig` from a Python dictionary of parameters."""
config = TransfoXLConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `TransfoXLConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:,None,:].expand(-1, bsz, -1)
else:
return pos_emb[:,None,:]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False, r_r_bias=None, r_w_bias=None):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [hlen x bsz x n_head x d_head]
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
##### layer normalization
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
# [qlen x klen x bsz x n_head]
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head]
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = h + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False,
r_r_bias=None, r_w_bias=None):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m,:m] = torch.triu(mask[:m,:m])
mask[-m:,-m:] = torch.tril(mask[-m:,-m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:,:,None,None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:,:,None,None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None,:,:,None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:,:,:,None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout,
**kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax>0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj],
dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(TransfoXLPreTrainedModel, self).__init__()
if not isinstance(config, TransfoXLConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `TransfoXLConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weight(self, weight):
if self.config.init == 'uniform':
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == 'normal':
nn.init.normal_(weight, 0.0, self.config.init_std)
def init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
self.init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
self.init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
self.init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
self.init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
self.init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
self.init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
self.init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
self.init_bias(m.r_bias)
def set_num_special_tokens(self, num_special_tokens):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None,
from_tf=False, *inputs, **kwargs):
"""
Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `transfo-xl`
- a path or url to a pretrained model archive containing:
. `transfo_xl_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
archive_file, config_file))
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = TransfoXLConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu' if not torch.cuda.is_available() else None)
if from_tf:
# Directly load from a TensorFlow checkpoint
return load_tf_weights_in_transfo_xl(model, config, pretrained_model_name_or_path)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'transformer') and any(s.startswith('transformer.') for s in state_dict.keys()):
start_prefix = 'transformer.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
# Make sure we are still sharing the input and output embeddings
if hasattr(model, 'tie_weights'):
model.tie_weights()
return model
class TransfoXLModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`mems`: optional memomry of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`last_hidden_state`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, self.config.d_model]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, new_mems)
```
"""
def __init__(self, config):
super(TransfoXLModel, self).__init__(config)
self.n_token = config.n_token
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(config.n_token, config.d_embed, config.d_model, config.cutoffs,
div_val=config.div_val)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type == 1: # learnable embeddings
for i in range(config.n_layer):
self.layers.append(
RelLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type in [2, 3]: # absolute embeddings
for i in range(config.n_layer):
self.layers.append(
DecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 1: # learnable
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.r_bias = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head))
elif self.attn_type == 2: # absolute standard
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 3: # absolute deeper SA
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.apply(self.init_weights)
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self, data):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, data.size(1), self.config.d_model,
dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None: return None
# mems is not None
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1+mlen)
+ torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1+mlen).byte()[:,:,None]
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 1: # learnable
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len :]
r_bias = self.r_bias[i][-self.clamp_len :]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, r_emb, self.r_w_bias[i],
r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, input_ids, mems=None):
""" Params:
input_ids :: [bsz, len]
mems :: optional mems from previous forwar passes (or init_mems)
list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Returns:
tuple (last_hidden, new_mems) where:
new_mems: list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
last_hidden: output of the last layer:
shape :: [bsz, len, self.config.d_model]
"""
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
input_ids = input_ids.transpose(0, 1).contiguous()
if mems is None:
mems = self.init_mems(input_ids)
last_hidden, new_mems = self._forward(input_ids, mems=mems)
# We transpose back here to shape [bsz, len, hidden_dim]
last_hidden = last_hidden.transpose(0, 1).contiguous()
return (last_hidden, new_mems)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
This model add an (adaptive) softmax head on top of the TransfoXLModel
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Call self.tie_weights() if you update/load the weights of the transformer to keep the weights tied.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`target`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the target token indices selected in the range [0, self.config.n_token[
`mems`: an optional memory of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`softmax_output`: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape [batch_size, sequence_length]
else:
log probabilities of tokens, shape [batch_size, sequence_length, n_tokens]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, mems=new_mems)
```
"""
def __init__(self, config):
super(TransfoXLLMHeadModel, self).__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
# use sampled softmax
if config.sample_softmax > 0:
self.out_layer = nn.Linear(config.d_model, config.n_token)
self.sampler = LogUniformSampler(config.n_token, config.sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(config.n_token, config.d_embed, config.d_model,
config.cutoffs, div_val=config.div_val)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Run this to be sure output and input (adaptive) softmax weights are tied """
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight
# adaptive softmax (including standard softmax)
else:
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, data):
return self.transformer.init_mems(data)
def forward(self, input_ids, target=None, mems=None):
""" Params:
input_ids :: [bsz, len]
target :: [bsz, len]
Returns:
tuple(softmax_output, new_mems) where:
new_mems: list (num layers) of hidden states at the entry of each layer
shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids
softmax_output: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape :: [bsz, len]
else:
log probabilities of tokens, shape :: [bsz, len, n_tokens]
"""
bsz = input_ids.size(0)
tgt_len = input_ids.size(1)
last_hidden, new_mems = self.transformer(input_ids, mems)
pred_hid = last_hidden[:, -tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.config.tie_weight
logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler)
softmax_output = -F.log_softmax(logit, -1)[:, :, 0]
else:
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target)
if target is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
else:
softmax_output = softmax_output.view(bsz, tgt_len)
# We transpose back
return (softmax_output, new_mems)
| 58,702 | 41.476845 | 131 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/tokenization_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import glob
import logging
import os
import sys
from collections import Counter, OrderedDict
from io import open
import unicodedata
import torch
import numpy as np
from .file_utils import cached_path
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-vocab.bin",
}
VOCAB_NAME = 'vocab.bin'
PRETRAINED_CORPUS_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-corpus.bin",
}
CORPUS_NAME = 'corpus.bin'
class TransfoXLTokenizer(object):
"""
Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl
"""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a TransfoXLTokenizer.
The TransfoXLTokenizer.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
# Instantiate tokenizer.
tokenizer = cls(*inputs, **kwargs)
vocab_dict = torch.load(resolved_vocab_file)
for key, value in vocab_dict.items():
tokenizer.__dict__[key] = value
return tokenizer
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=False,
delimiter=None, vocab_file=None, never_split=("<unk>", "<eos>", "<formula>")):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
self.never_split = never_split
def count_file(self, path, verbose=False, add_eos=False):
if verbose: print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose: print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
if '<UNK>' in self.sym2idx:
self.unk_idx = self.sym2idx['<UNK>']
elif '<unk>' in self.sym2idx:
self.unk_idx = self.sym2idx['<unk>']
else:
raise ValueError('No <unkown> token in vocabulary')
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose: print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose: print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of vocabulary range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
# assert '<eos>' not in sym
if hasattr(self, 'unk_idx'):
return self.sym2idx.get(sym, self.unk_idx)
# Backward compatibility with pre-trained models
elif '<unk>' in self.sym2idx:
return self.sym2idx['<unk>']
elif '<UNK>' in self.sym2idx:
return self.sym2idx['<UNK>']
else:
raise ValueError('Token not in vocabulary and no <unk> token in vocabulary for replacement')
def convert_ids_to_tokens(self, indices):
"""Converts a sequence of indices in symbols using the vocab."""
return [self.get_sym(idx) for idx in indices]
def convert_tokens_to_ids(self, symbols):
"""Converts a sequence of symbols into ids using the vocab."""
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.convert_tokens_to_ids(symbols))
def decode(self, indices, exclude=None):
"""Converts a sequence of indices in a string."""
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def whitespace_tokenize(self, text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
if self.delimiter == '':
tokens = text
else:
tokens = text.split(self.delimiter)
return tokens
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = self._clean_text(line)
line = line.strip()
symbols = self.whitespace_tokenize(line)
split_symbols = []
for symbol in symbols:
if self.lower_case and symbol not in self.never_split:
symbol = symbol.lower()
symbol = self._run_strip_accents(symbol)
split_symbols.extend(self._run_split_on_punc(symbol))
if add_double_eos: # lm1b
return ['<S>'] + split_symbols + ['<S>']
elif add_eos:
return split_symbols + ['<eos>']
else:
return split_symbols
class LMOrderedIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None):
"""
data -- LongTensor -- the LongTensor is strictly ordered
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
# Work out how cleanly we can divide the dataset into bsz parts.
self.n_step = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, self.n_step * bsz)
# Evenly divide the data across the bsz batches.
self.data = data.view(bsz, -1).t().contiguous().to(device)
# Number of mini-batches
self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
def get_batch(self, i, bptt=None):
if bptt is None: bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx]
target = self.data[i+1:i+1+seq_len]
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
return data_out, target_out, seq_len
def get_fixlen_iter(self, start=0):
for i in range(start, self.data.size(0) - 1, self.bptt):
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_len = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \
else np.array(range(len(self.data)))
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.bsz
data = torch.LongTensor(self.bptt, self.bsz)
target = torch.LongTensor(self.bptt, self.bsz)
n_retain = 0
while True:
# data : [n_retain+bptt x bsz]
# target : [bptt x bsz]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.bsz):
n_filled = 0
try:
while n_filled < self.bptt:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
# first n_retain tokens are retained from last batch
data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \
streams[i][:n_new]
target[n_filled:n_filled+n_new, i] = \
streams[i][1:n_new+1]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
yield data_out, target_out, self.bptt
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.bptt, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class LMMultiFileIterator(LMShuffledIterator):
def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None,
shuffle=False):
self.paths = paths
self.vocab = vocab
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self, path):
sents = self.vocab.encode_file(path, add_double_eos=True)
if self.shuffle:
np.random.shuffle(sents)
sent_stream = iter(sents)
return sent_stream
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.paths)
for path in self.paths:
# sent_stream is an iterator
sent_stream = self.get_sent_stream(path)
for batch in self.stream_iterator(sent_stream):
yield batch
class TransfoXLCorpus(object):
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a pre-processed corpus.
"""
vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP:
corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME)
# redirect to the cache, if necessary
try:
resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Corpus '{}' was not found in corpus list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
corpus_file))
return None
if resolved_corpus_file == corpus_file:
logger.info("loading corpus file {}".format(corpus_file))
else:
logger.info("loading corpus file {} from cache at {}".format(
corpus_file, resolved_corpus_file))
# Instantiate tokenizer.
corpus = cls(*inputs, **kwargs)
corpus_dict = torch.load(resolved_corpus_file)
for key, value in corpus_dict.items():
corpus.__dict__[key] = value
corpus.vocab = vocab
if corpus.train is not None:
corpus.train = torch.tensor(corpus.train, dtype=torch.long)
if corpus.valid is not None:
corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
if corpus.test is not None:
corpus.test = torch.tensor(corpus.test, dtype=torch.long)
return corpus
def __init__(self, *args, **kwargs):
self.vocab = TransfoXLTokenizer(*args, **kwargs)
self.dataset = None
self.train = None
self.valid = None
self.test = None
def build_corpus(self, path, dataset):
self.dataset = dataset
if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
self.vocab.count_file(os.path.join(path, 'train.txt'))
self.vocab.count_file(os.path.join(path, 'valid.txt'))
self.vocab.count_file(os.path.join(path, 'test.txt'))
elif self.dataset == 'wt103':
self.vocab.count_file(os.path.join(path, 'train.txt'))
elif self.dataset == 'lm1b':
train_path_pattern = os.path.join(
path, '1-billion-word-language-modeling-benchmark-r13output',
'training-monolingual.tokenized.shuffled', 'news.en-*')
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ['ptb', 'wt2', 'wt103']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True)
elif self.dataset in ['enwik8', 'text8']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
elif self.dataset == 'lm1b':
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)
def get_iterator(self, split, *args, **kwargs):
if split == 'train':
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == 'lm1b':
kwargs['shuffle'] = True
data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
elif split in ['valid', 'test']:
data = self.valid if split == 'valid' else self.test
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == 'lm1b':
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset):
fn = os.path.join(datadir, 'cache.pt')
fn_pickle = os.path.join(datadir, 'cache.pkl')
if os.path.exists(fn):
print('Loading cached dataset...')
corpus = torch.load(fn_pickle)
elif os.path.exists(fn):
print('Loading cached dataset from pickle...')
with open(fn, "rb") as fp:
corpus = pickle.load(fp)
else:
print('Producing dataset {}...'.format(dataset))
kwargs = {}
if dataset in ['wt103', 'wt2']:
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = False
elif dataset == 'ptb':
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = True
elif dataset == 'lm1b':
kwargs['special'] = []
kwargs['lower_case'] = False
kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt')
elif dataset in ['enwik8', 'text8']:
pass
corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
torch.save(corpus, fn)
return corpus
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 24,851 | 35.927192 | 109 | py |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/modeling_transfo_xl_utilities.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utilities for PyTorch Transformer XL model.
Directly adapted from https://github.com/kimiyoung/transformer-xl.
"""
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
# CUDA_MINOR = int(torch.version.cuda.split('.')[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_emb_i))
)
self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, target=None, keep_order=False):
'''
Params:
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
Return:
if target is None:
out :: [len*bsz] Negative log likelihood
else:
out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary
We could replace this implementation by the native PyTorch one
if their's had an option to set bias on all clusters in the native one.
here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
'''
if target is not None:
target = target.view(-1)
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
if target is not None:
output = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
output = F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
if target is None:
out = hidden.new_empty((head_logit.size(0), self.n_token))
else:
out = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
if target is not None:
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
hidden_i = hidden.index_select(0, indices_i)
else:
hidden_i = hidden
if i == 0:
if target is not None:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
if target is not None:
logprob_i = head_logprob_i[:, cluster_prob_idx] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
out[:, l_idx:r_idx] = logprob_i
if target is not None:
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
out.index_copy_(0, indices_i, -logprob_i)
else:
out[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def log_prob(self, hidden):
r""" Computes log probabilities for all :math:`n\_classes`
From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py
Args:
hidden (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a
parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
Shape:
- Input: :math:`(N, in\_features)`
- Output: :math:`(N, n\_classes)`
"""
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
return F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
out = hidden.new_empty((head_logit.size(0), self.n_token))
head_logprob = F.log_softmax(head_logit, dim=1)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob[:, -i] + tail_logprob_i
out[:, start_idx, stop_idx] = logprob_i
return out
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
"""
Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
expected count can be approximated by 1 - (1 - p)^n
and we use a numerically stable version -expm1(num_tries * log1p(-p))
Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
"""
with torch.no_grad():
self.range_max = range_max
log_indices = torch.arange(1., range_max+2., 1.).log_()
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# print('P', self.dist.numpy().tolist()[-30:])
self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
self.n_sample = n_sample
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[: -n_sample].view(b1, b2, -1)
sample_w = all_w[- n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[: -n_sample].view(b1, b2)
sample_b = all_b[- n_sample:]
hit = (labels[:, :, None] == neg_samples).detach()
true_logits = torch.einsum('ijk,ijk->ij',
[true_w, inputs]) + true_b - true_log_probs
sample_logits = torch.einsum('lk,ijk->ijl',
[sample_w, inputs]) + sample_b - samp_log_probs
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits
# class LogUniformSampler(object):
# def __init__(self, range_max, unique=False):
# """
# Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
# `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
# """
# self.range_max = range_max
# log_indices = torch.arange(1., range_max+2., 1.).log_()
# self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# self.unique = unique
# if self.unique:
# self.exclude_mask = torch.ByteTensor(range_max).fill_(0)
# def sample(self, n_sample, labels):
# pos_sample, new_labels = labels.unique(return_inverse=True)
# n_pos_sample = pos_sample.size(0)
# n_neg_sample = n_sample - n_pos_sample
# if self.unique:
# self.exclude_mask.index_fill_(0, pos_sample, 1)
# sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0)
# self.exclude_mask.index_fill_(0, pos_sample, 0)
# else:
# sample_dist = self.dist
# neg_sample = torch.multinomial(sample_dist, n_neg_sample)
# sample = torch.cat([pos_sample, neg_sample])
# sample_prob = self.dist[sample]
# return new_labels, sample, sample_prob
if __name__ == '__main__':
S, B = 3, 4
n_vocab = 10000
n_sample = 5
H = 32
labels = torch.LongTensor(S, B).random_(0, n_vocab)
# sampler = LogUniformSampler(n_vocab, unique=False)
# new_labels, sample, sample_prob = sampler.sample(n_sample, labels)
sampler = LogUniformSampler(n_vocab, n_sample)#, unique=True)
# true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels)
# print('true_probs', true_probs.numpy().tolist())
# print('samp_probs', samp_probs.numpy().tolist())
# print('neg_samples', neg_samples.numpy().tolist())
# print('sum', torch.sum(sampler.dist).item())
# assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item()
embedding = nn.Embedding(n_vocab, H)
bias = torch.zeros(n_vocab)
inputs = torch.Tensor(S, B, H).normal_()
logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample)
print('logits', logits.detach().numpy().tolist())
print('logits shape', logits.size())
print('out_labels', out_labels.detach().numpy().tolist())
print('out_labels shape', out_labels.size())
| 16,113 | 38.985112 | 132 | py |
VLC-BERT | VLC-BERT-master/common/lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from bisect import bisect_right
import torch
# FIXME ideally this would be achieved with a CombinedLRScheduler,
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 1,810 | 33.169811 | 80 | py |
VLC-BERT | VLC-BERT-master/common/fast_rcnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from common.backbone.resnet.resnet import *
from common.backbone.resnet.resnet import Bottleneck, BasicBlock
from common.backbone.resnet.resnet import model_urls
from common.lib.roi_pooling.roi_pool import ROIPool
from common.lib.roi_pooling.roi_align import ROIAlign
from common.utils.flatten import Flattener
from common.utils.pad_sequence import pad_sequence
from common.utils.bbox import coordinate_embeddings
class FastRCNN(nn.Module):
def __init__(self, config, average_pool=True, final_dim=768, enable_cnn_reg_loss=False):
"""
:param config:
:param average_pool: whether or not to average pool the representations
:param final_dim:
:param is_train:
"""
super(FastRCNN, self).__init__()
self.average_pool = average_pool
self.enable_cnn_reg_loss = enable_cnn_reg_loss
self.final_dim = final_dim
self.image_feat_precomputed = config.NETWORK.IMAGE_FEAT_PRECOMPUTED
if self.image_feat_precomputed:
if config.NETWORK.IMAGE_SEMANTIC:
self.object_embed = torch.nn.Embedding(num_embeddings=81, embedding_dim=128)
else:
self.object_embed = None
else:
self.stride_in_1x1 = config.NETWORK.IMAGE_STRIDE_IN_1x1
self.c5_dilated = config.NETWORK.IMAGE_C5_DILATED
self.num_layers = config.NETWORK.IMAGE_NUM_LAYERS
self.pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.IMAGE_PRETRAINED,
config.NETWORK.IMAGE_PRETRAINED_EPOCH) if config.NETWORK.IMAGE_PRETRAINED != '' else None
self.output_conv5 = config.NETWORK.OUTPUT_CONV5
if self.num_layers == 18:
self.backbone = resnet18(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4])
block = BasicBlock
elif self.num_layers == 34:
self.backbone = resnet34(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4])
block = BasicBlock
elif self.num_layers == 50:
self.backbone = resnet50(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4], stride_in_1x1=self.stride_in_1x1)
block = Bottleneck
elif self.num_layers == 101:
self.backbone = resnet101(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4], stride_in_1x1=self.stride_in_1x1)
block = Bottleneck
elif self.num_layers == 152:
self.backbone = resnet152(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4], stride_in_1x1=self.stride_in_1x1)
block = Bottleneck
else:
raise NotImplemented
output_size = (14, 14)
self.roi_align = ROIAlign(output_size=output_size, spatial_scale=1.0 / 16)
if config.NETWORK.IMAGE_SEMANTIC:
self.object_embed = torch.nn.Embedding(num_embeddings=81, embedding_dim=128)
else:
self.object_embed = None
self.mask_upsample = None
self.roi_head_feature_extractor = self.backbone._make_layer(block=block, planes=512, blocks=3,
stride=2 if not self.c5_dilated else 1,
dilation=1 if not self.c5_dilated else 2,
stride_in_1x1=self.stride_in_1x1)
if average_pool:
self.head = torch.nn.Sequential(
self.roi_head_feature_extractor,
nn.AvgPool2d(7 if not self.c5_dilated else 14, stride=1),
Flattener()
)
else:
self.head = self.roi_head_feature_extractor
if config.NETWORK.IMAGE_FROZEN_BN:
for module in self.roi_head_feature_extractor.modules():
if isinstance(module, nn.BatchNorm2d):
for param in module.parameters():
param.requires_grad = False
frozen_stages = config.NETWORK.IMAGE_FROZEN_BACKBONE_STAGES
if 5 in frozen_stages:
for p in self.roi_head_feature_extractor.parameters():
p.requires_grad = False
frozen_stages = [stage for stage in frozen_stages if stage != 5]
self.backbone.frozen_parameters(frozen_stages=frozen_stages,
frozen_bn=config.NETWORK.IMAGE_FROZEN_BN)
if self.enable_cnn_reg_loss:
self.regularizing_predictor = torch.nn.Linear(2048, 81)
self.obj_downsample = torch.nn.Sequential(
torch.nn.Dropout(p=0.1),
torch.nn.Linear(2 * 2048 + (128 if config.NETWORK.IMAGE_SEMANTIC else 0), final_dim),
torch.nn.ReLU(inplace=True),
)
def init_weight(self):
if not self.image_feat_precomputed:
if self.pretrained_model_path is None:
pretrained_model = model_zoo.load_url(model_urls['resnet{}'.format(self.num_layers)])
else:
pretrained_model = torch.load(self.pretrained_model_path, map_location=lambda storage, loc: storage)
roi_head_feat_dict = {k[len('layer4.'):]: v for k, v in pretrained_model.items() if k.startswith('layer4.')}
self.roi_head_feature_extractor.load_state_dict(roi_head_feat_dict)
if self.output_conv5:
self.conv5.load_state_dict(roi_head_feat_dict)
def bn_eval(self):
if not self.image_feat_precomputed:
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.eval()
def forward(self, images, boxes, box_mask, im_info, classes=None, segms=None, mvrc_ops=None, mask_visual_embed=None):
"""
:param images: [batch_size, 3, im_height, im_width]
:param boxes: [batch_size, max_num_objects, 4] Padded boxes
:param box_mask: [batch_size, max_num_objects] Mask for whether or not each box is OK
:return: object reps [batch_size, max_num_objects, dim]
"""
box_inds = box_mask.nonzero()
obj_labels = classes[box_inds[:, 0], box_inds[:, 1]].type(torch.long) if classes is not None else None
assert box_inds.shape[0] > 0
if self.image_feat_precomputed:
post_roialign = boxes[box_inds[:, 0], box_inds[:, 1]][:, 4:]
boxes = boxes[:, :, :4]
else:
img_feats = self.backbone(images)
rois = torch.cat((
box_inds[:, 0, None].type(boxes.dtype),
boxes[box_inds[:, 0], box_inds[:, 1]],
), 1)
roi_align_res = self.roi_align(img_feats['body4'], rois).type(images.dtype)
if segms is not None:
pool_layers = self.head[1:]
post_roialign = self.roi_head_feature_extractor(roi_align_res)
post_roialign = post_roialign * segms[box_inds[:, 0], None, box_inds[:, 1]].to(dtype=post_roialign.dtype)
for _layer in pool_layers:
post_roialign = _layer(post_roialign)
else:
post_roialign = self.head(roi_align_res)
# Add some regularization, encouraging the model to keep giving decent enough predictions
if self.enable_cnn_reg_loss:
obj_logits = self.regularizing_predictor(post_roialign)
cnn_regularization = F.cross_entropy(obj_logits, obj_labels)[None]
feats_to_downsample = post_roialign if (self.object_embed is None or obj_labels is None) else \
torch.cat((post_roialign, self.object_embed(obj_labels)), -1)
if mvrc_ops is not None and mask_visual_embed is not None:
_to_masked = (mvrc_ops == 1)[box_inds[:, 0], box_inds[:, 1]]
feats_to_downsample[_to_masked] = mask_visual_embed
coord_embed = coordinate_embeddings(
torch.cat((boxes[box_inds[:, 0], box_inds[:, 1]], im_info[box_inds[:, 0], :2]), 1),
256
)
feats_to_downsample = torch.cat((coord_embed.view((coord_embed.shape[0], -1)), feats_to_downsample), -1)
final_feats = self.obj_downsample(feats_to_downsample)
# Reshape into a padded sequence - this is expensive and annoying but easier to implement and debug...
obj_reps = pad_sequence(final_feats, box_mask.sum(1).tolist())
post_roialign = pad_sequence(post_roialign, box_mask.sum(1).tolist())
# DataParallel compatibility
obj_reps_padded = obj_reps.new_zeros((obj_reps.shape[0], boxes.shape[1], obj_reps.shape[2]))
obj_reps_padded[:, :obj_reps.shape[1]] = obj_reps
obj_reps = obj_reps_padded
post_roialign_padded = post_roialign.new_zeros((post_roialign.shape[0], boxes.shape[1], post_roialign.shape[2]))
post_roialign_padded[:, :post_roialign.shape[1]] = post_roialign
post_roialign = post_roialign_padded
# Output
output_dict = {
'obj_reps_raw': post_roialign,
'obj_reps': obj_reps,
}
if (not self.image_feat_precomputed) and self.enable_cnn_reg_loss:
output_dict.update({'obj_logits': obj_logits,
'obj_labels': obj_labels,
'cnn_regularization_loss': cnn_regularization})
if (not self.image_feat_precomputed) and self.output_conv5:
image_feature = self.img_head(img_feats['body4'])
output_dict['image_feature'] = image_feature
return output_dict
| 10,223 | 49.117647 | 155 | py |
VLC-BERT | VLC-BERT-master/common/visual_linguistic_bert.py | import torch
import torch.nn as nn
from easydict import EasyDict as edict
from external.pytorch_pretrained_bert.modeling import BertLayerNorm, BertEncoder, BertPooler, ACT2FN, BertOnlyMLMHead
from common.commonsense_fusion import SimpleFusionLayer
# todo: add this to config
NUM_SPECIAL_WORDS = 1000
class BaseModel(nn.Module):
def __init__(self, config, **kwargs):
self.config = config
super(BaseModel, self).__init__()
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, *args, **kwargs):
raise NotImplemented
class VisualLinguisticBert(BaseModel):
def __init__(self, config, language_pretrained_model_path=None):
super(VisualLinguisticBert, self).__init__(config)
self.config = config
# embeddings
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.end_embedding = nn.Embedding(1, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.embedding_LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.embedding_dropout = nn.Dropout(config.hidden_dropout_prob)
if config.commonsense_emb_type == 'linear':
self.commonsense_linear = nn.Linear(config.hidden_size, config.hidden_size)
elif config.commonsense_emb_type == 'fusion':
commonsense_conf= edict({'num_heads': 3, 'hidden_size': 768, 'reduce_attention_output': False, 'attend_ques': True})
self.commonsense_fusion = SimpleFusionLayer(commonsense_conf)
# for compatibility of roberta
self.position_padding_idx = config.position_padding_idx
# visual transform
self.visual_1x1_text = None
self.visual_1x1_object = None
if config.visual_size != config.hidden_size:
self.visual_1x1_text = nn.Linear(config.visual_size, config.hidden_size)
self.visual_1x1_object = nn.Linear(config.visual_size, config.hidden_size)
if config.visual_ln:
self.visual_ln_text = BertLayerNorm(config.hidden_size, eps=1e-12)
self.visual_ln_object = BertLayerNorm(config.hidden_size, eps=1e-12)
else:
visual_scale_text = nn.Parameter(torch.as_tensor(self.config.visual_scale_text_init, dtype=torch.float),
requires_grad=True)
self.register_parameter('visual_scale_text', visual_scale_text)
visual_scale_object = nn.Parameter(torch.as_tensor(self.config.visual_scale_object_init, dtype=torch.float),
requires_grad=True)
self.register_parameter('visual_scale_object', visual_scale_object)
self.encoder = BertEncoder(config)
if self.config.with_pooler:
self.pooler = BertPooler(config)
# init weights
self.apply(self.init_weights)
if config.visual_ln:
self.visual_ln_text.weight.data.fill_(self.config.visual_scale_text_init)
self.visual_ln_object.weight.data.fill_(self.config.visual_scale_object_init)
# load language pretrained model
if language_pretrained_model_path is not None:
self.load_language_pretrained_model(language_pretrained_model_path)
if config.word_embedding_frozen:
for p in self.word_embeddings.parameters():
p.requires_grad = False
self.special_word_embeddings = nn.Embedding(NUM_SPECIAL_WORDS, config.hidden_size)
self.special_word_embeddings.weight.data.copy_(self.word_embeddings.weight.data[:NUM_SPECIAL_WORDS])
def word_embeddings_wrapper(self, input_ids, commonsense_embeddings):
word_embeddings = self.word_embeddings(input_ids)
if self.config.word_embedding_frozen:
word_embeddings[input_ids < NUM_SPECIAL_WORDS] \
= self.special_word_embeddings(input_ids[input_ids < NUM_SPECIAL_WORDS])
attn_weights = torch.zeros(commonsense_embeddings.size(0), 1, dtype=torch.float, device=commonsense_embeddings.device)
if commonsense_embeddings is not None and len(commonsense_embeddings.shape) > 2:
if self.config.commonsense_emb_type == 'linear':
commonsense_emb = self.commonsense_linear(commonsense_embeddings)
elif self.config.commonsense_emb_type == 'fusion':
commonsense_emb, attn_weights = self.commonsense_fusion(commonsense_embeddings)
else:
commonsense_emb = commonsense_embeddings
# Replace word embeddings for [MASK] with loaded commonsense embeddings
for i in range(commonsense_emb.shape[0]):
sep_idxs = (input_ids[i] == 102).nonzero(as_tuple=True)[0]
end_idx = sep_idxs[2] if len(sep_idxs) > 3 else sep_idxs[1]
start_idx = end_idx - commonsense_emb.shape[1]
for idx in range(start_idx, end_idx):
word_embeddings[i, idx] = commonsense_emb[i, idx-start_idx]
return word_embeddings, attn_weights
def forward(self,
text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask,
commonsense_embeddings=None, # [batch_size, 768]
output_all_encoded_layers=True,
output_text_and_object_separately=False,
output_attention_probs=False,
output_commonsense_attn_weights=False):
# get seamless concatenate embeddings and mask
embedding_output, attention_mask, text_mask_new, object_mask_new, attn_weights = self.embedding(text_input_ids,
text_token_type_ids,
commonsense_embeddings,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# extended_attention_mask = 1.0 - extended_attention_mask
# extended_attention_mask[extended_attention_mask != 0] = float('-inf')
if output_attention_probs:
encoded_layers, attention_probs = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
output_attention_probs=output_attention_probs)
else:
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
output_attention_probs=output_attention_probs)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output) if self.config.with_pooler else None
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
if output_text_and_object_separately:
if not output_all_encoded_layers:
encoded_layers = [encoded_layers]
encoded_layers_text = []
encoded_layers_object = []
for encoded_layer in encoded_layers:
max_text_len = text_input_ids.shape[1]
max_object_len = object_vl_embeddings.shape[1]
encoded_layer_text = encoded_layer[:, :max_text_len]
encoded_layer_object = encoded_layer.new_zeros(
(encoded_layer.shape[0], max_object_len, encoded_layer.shape[2]))
encoded_layer_object[object_mask] = encoded_layer[object_mask_new]
encoded_layers_text.append(encoded_layer_text)
encoded_layers_object.append(encoded_layer_object)
if not output_all_encoded_layers:
encoded_layers_text = encoded_layers_text[0]
encoded_layers_object = encoded_layers_object[0]
if output_attention_probs:
return encoded_layers_text, encoded_layers_object, pooled_output, attention_probs
else:
return encoded_layers_text, encoded_layers_object, pooled_output
else:
if output_attention_probs:
return encoded_layers, pooled_output, attention_probs
elif output_commonsense_attn_weights:
return encoded_layers, pooled_output, attn_weights
else:
return encoded_layers, pooled_output
def embedding(self,
text_input_ids,
text_token_type_ids,
commonsense_embeddings,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask):
text_linguistic_embedding, attn_weights = self.word_embeddings_wrapper(text_input_ids, commonsense_embeddings)
if self.visual_1x1_text is not None:
text_visual_embeddings = self.visual_1x1_text(text_visual_embeddings)
if self.config.visual_ln:
text_visual_embeddings = self.visual_ln_text(text_visual_embeddings)
else:
text_visual_embeddings *= self.visual_scale_text
text_vl_embeddings = text_linguistic_embedding + text_visual_embeddings
object_visual_embeddings = object_vl_embeddings[:, :, :self.config.visual_size]
if self.visual_1x1_object is not None:
object_visual_embeddings = self.visual_1x1_object(object_visual_embeddings)
if self.config.visual_ln:
object_visual_embeddings = self.visual_ln_object(object_visual_embeddings)
else:
object_visual_embeddings *= self.visual_scale_object
object_linguistic_embeddings = object_vl_embeddings[:, :, self.config.visual_size:]
object_vl_embeddings = object_linguistic_embeddings + object_visual_embeddings
bs = text_vl_embeddings.size(0)
vl_embed_size = text_vl_embeddings.size(-1)
max_length = (text_mask.sum(1) + object_mask.sum(1)).max() + 1
grid_ind, grid_pos = torch.meshgrid(torch.arange(bs, dtype=torch.long, device=text_vl_embeddings.device),
torch.arange(max_length, dtype=torch.long, device=text_vl_embeddings.device))
text_end = text_mask.sum(1, keepdim=True)
object_end = text_end + object_mask.sum(1, keepdim=True)
# seamlessly concatenate visual linguistic embeddings of text and object
_zero_id = torch.zeros((bs, ), dtype=torch.long, device=text_vl_embeddings.device)
vl_embeddings = text_vl_embeddings.new_zeros((bs, max_length, vl_embed_size))
vl_embeddings[grid_pos < text_end] = text_vl_embeddings[text_mask]
vl_embeddings[(grid_pos >= text_end) & (grid_pos < object_end)] = object_vl_embeddings[object_mask]
vl_embeddings[grid_pos == object_end] = self.end_embedding(_zero_id)
# token type embeddings/ segment embeddings
token_type_ids = text_token_type_ids.new_zeros((bs, max_length))
token_type_ids[grid_pos < text_end] = text_token_type_ids[text_mask]
token_type_ids[(grid_pos >= text_end) & (grid_pos <= object_end)] = 2
token_type_embeddings = self.token_type_embeddings(token_type_ids)
# position embeddings
position_ids = grid_pos + self.position_padding_idx + 1
if self.config.obj_pos_id_relative:
position_ids[(grid_pos >= text_end) & (grid_pos < object_end)] \
= text_end.expand((bs, max_length))[(grid_pos >= text_end) & (grid_pos < object_end)] \
+ self.position_padding_idx + 1
position_ids[grid_pos == object_end] = (text_end + 1).squeeze(1) + self.position_padding_idx + 1
else:
assert False, "Don't use position id 510/511 for objects and [END]!!!"
position_ids[(grid_pos >= text_end) & (grid_pos < object_end)] = self.config.max_position_embeddings - 2
position_ids[grid_pos == object_end] = self.config.max_position_embeddings - 1
position_embeddings = self.position_embeddings(position_ids)
mask = text_mask.new_zeros((bs, max_length))
mask[grid_pos <= object_end] = 1
embeddings = vl_embeddings + position_embeddings + token_type_embeddings
embeddings = self.embedding_LayerNorm(embeddings)
embeddings = self.embedding_dropout(embeddings)
return embeddings, mask, grid_pos < text_end, (grid_pos >= text_end) & (grid_pos < object_end), attn_weights
def load_language_pretrained_model(self, language_pretrained_model_path):
pretrained_state_dict = torch.load(language_pretrained_model_path, map_location=lambda storage, loc: storage)
encoder_pretrained_state_dict = {}
pooler_pretrained_state_dict = {}
embedding_ln_pretrained_state_dict = {}
unexpected_keys = []
for k, v in pretrained_state_dict.items():
if k.startswith('bert.'):
k = k[len('bert.'):]
elif k.startswith('roberta.'):
k = k[len('roberta.'):]
else:
unexpected_keys.append(k)
continue
if 'gamma' in k:
k = k.replace('gamma', 'weight')
if 'beta' in k:
k = k.replace('beta', 'bias')
if k.startswith('encoder.'):
k_ = k[len('encoder.'):]
if k_ in self.encoder.state_dict():
encoder_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(k)
elif k.startswith('embeddings.'):
k_ = k[len('embeddings.'):]
if k_ == 'word_embeddings.weight':
self.word_embeddings.weight.data = v.to(dtype=self.word_embeddings.weight.data.dtype,
device=self.word_embeddings.weight.data.device)
elif k_ == 'position_embeddings.weight':
self.position_embeddings.weight.data = v.to(dtype=self.position_embeddings.weight.data.dtype,
device=self.position_embeddings.weight.data.device)
elif k_ == 'token_type_embeddings.weight':
self.token_type_embeddings.weight.data[:v.size(0)] = v.to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
if v.size(0) == 1:
# Todo: roberta token type embedding
self.token_type_embeddings.weight.data[1] = v[0].clone().to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
self.token_type_embeddings.weight.data[2] = v[0].clone().to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
elif k_.startswith('LayerNorm.'):
k__ = k_[len('LayerNorm.'):]
if k__ in self.embedding_LayerNorm.state_dict():
embedding_ln_pretrained_state_dict[k__] = v
else:
unexpected_keys.append(k)
else:
unexpected_keys.append(k)
elif self.config.with_pooler and k.startswith('pooler.'):
k_ = k[len('pooler.'):]
if k_ in self.pooler.state_dict():
pooler_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(k)
else:
unexpected_keys.append(k)
if len(unexpected_keys) > 0:
print("Warnings: Unexpected keys: {}.".format(unexpected_keys))
self.embedding_LayerNorm.load_state_dict(embedding_ln_pretrained_state_dict)
self.encoder.load_state_dict(encoder_pretrained_state_dict)
if self.config.with_pooler and len(pooler_pretrained_state_dict) > 0:
self.pooler.load_state_dict(pooler_pretrained_state_dict)
class VisualLinguisticBertForPretraining(VisualLinguisticBert):
def __init__(self, config, language_pretrained_model_path=None,
with_rel_head=True, with_mlm_head=True, with_mvrc_head=True):
super(VisualLinguisticBertForPretraining, self).__init__(config, language_pretrained_model_path=None)
self.with_rel_head = with_rel_head
self.with_mlm_head = with_mlm_head
self.with_mvrc_head = with_mvrc_head
if with_rel_head:
self.relationsip_head = VisualLinguisticBertRelationshipPredictionHead(config)
if with_mlm_head:
self.mlm_head = BertOnlyMLMHead(config, self.word_embeddings.weight)
if with_mvrc_head:
self.mvrc_head = VisualLinguisticBertMVRCHead(config)
# init weights
self.apply(self.init_weights)
if config.visual_ln:
self.visual_ln_text.weight.data.fill_(self.config.visual_scale_text_init)
self.visual_ln_object.weight.data.fill_(self.config.visual_scale_object_init)
# load language pretrained model
if language_pretrained_model_path is not None:
self.load_language_pretrained_model(language_pretrained_model_path)
if config.word_embedding_frozen:
for p in self.word_embeddings.parameters():
p.requires_grad = False
if config.pos_embedding_frozen:
for p in self.position_embeddings.parameters():
p.requires_grad = False
def forward(self,
text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask,
output_all_encoded_layers=True,
output_text_and_object_separately=False):
text_out, object_out, pooled_rep = super(VisualLinguisticBertForPretraining, self).forward(
text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask,
output_all_encoded_layers=False,
output_text_and_object_separately=True
)
if self.with_rel_head:
relationship_logits = self.relationsip_head(pooled_rep)
else:
relationship_logits = None
if self.with_mlm_head:
mlm_logits = self.mlm_head(text_out)
else:
mlm_logits = None
if self.with_mvrc_head:
mvrc_logits = self.mvrc_head(object_out)
else:
mvrc_logits = None
return relationship_logits, mlm_logits, mvrc_logits
def load_language_pretrained_model(self, language_pretrained_model_path):
pretrained_state_dict = torch.load(language_pretrained_model_path, map_location=lambda storage, loc: storage)
encoder_pretrained_state_dict = {}
pooler_pretrained_state_dict = {}
embedding_ln_pretrained_state_dict = {}
relationship_head_pretrained_state_dict = {}
mlm_head_pretrained_state_dict = {}
unexpected_keys = []
for _k, v in pretrained_state_dict.items():
if _k.startswith('bert.') or _k.startswith('roberta.'):
k = _k[len('bert.'):] if _k.startswith('bert.') else _k[len('roberta.'):]
if 'gamma' in k:
k = k.replace('gamma', 'weight')
if 'beta' in k:
k = k.replace('beta', 'bias')
if k.startswith('encoder.'):
k_ = k[len('encoder.'):]
if k_ in self.encoder.state_dict():
encoder_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif k.startswith('embeddings.'):
k_ = k[len('embeddings.'):]
if k_ == 'word_embeddings.weight':
self.word_embeddings.weight.data = v.to(dtype=self.word_embeddings.weight.data.dtype,
device=self.word_embeddings.weight.data.device)
elif k_ == 'position_embeddings.weight':
self.position_embeddings.weight.data = v.to(dtype=self.position_embeddings.weight.data.dtype,
device=self.position_embeddings.weight.data.device)
elif k_ == 'token_type_embeddings.weight':
self.token_type_embeddings.weight.data[:v.size(0)] = v.to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
if v.size(0) == 1:
# Todo: roberta token type embedding
self.token_type_embeddings.weight.data[1] = v[0].to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
elif k_.startswith('LayerNorm.'):
k__ = k_[len('LayerNorm.'):]
if k__ in self.embedding_LayerNorm.state_dict():
embedding_ln_pretrained_state_dict[k__] = v
else:
unexpected_keys.append(_k)
else:
unexpected_keys.append(_k)
elif self.config.with_pooler and k.startswith('pooler.'):
k_ = k[len('pooler.'):]
if k_ in self.pooler.state_dict():
pooler_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif _k.startswith('cls.seq_relationship.') and self.with_rel_head:
k_ = _k[len('cls.seq_relationship.'):]
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
if k_ in self.relationsip_head.caption_image_relationship.state_dict():
relationship_head_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif (_k.startswith('cls.predictions.') or _k.startswith('lm_head.')) and self.with_mlm_head:
k_ = _k[len('cls.predictions.'):] if _k.startswith('cls.predictions.') else _k[len('lm_head.'):]
if _k.startswith('lm_head.'):
if 'dense' in k_ or 'layer_norm' in k_:
k_ = 'transform.' + k_
if 'layer_norm' in k_:
k_ = k_.replace('layer_norm', 'LayerNorm')
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
if k_ in self.mlm_head.predictions.state_dict():
mlm_head_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
else:
unexpected_keys.append(_k)
if len(unexpected_keys) > 0:
print("Warnings: Unexpected keys: {}.".format(unexpected_keys))
self.embedding_LayerNorm.load_state_dict(embedding_ln_pretrained_state_dict)
self.encoder.load_state_dict(encoder_pretrained_state_dict)
if self.config.with_pooler and len(pooler_pretrained_state_dict) > 0:
self.pooler.load_state_dict(pooler_pretrained_state_dict)
if self.with_rel_head and len(relationship_head_pretrained_state_dict) > 0:
self.relationsip_head.caption_image_relationship.load_state_dict(relationship_head_pretrained_state_dict)
if self.with_mlm_head:
self.mlm_head.predictions.load_state_dict(mlm_head_pretrained_state_dict)
class VisualLinguisticBertMVRCHeadTransform(BaseModel):
def __init__(self, config):
super(VisualLinguisticBertMVRCHeadTransform, self).__init__(config)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.act = ACT2FN[config.hidden_act]
self.apply(self.init_weights)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.act(hidden_states)
return hidden_states
class VisualLinguisticBertMVRCHead(BaseModel):
def __init__(self, config):
super(VisualLinguisticBertMVRCHead, self).__init__(config)
self.transform = VisualLinguisticBertMVRCHeadTransform(config)
self.region_cls_pred = nn.Linear(config.hidden_size, config.visual_region_classes)
self.apply(self.init_weights)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
logits = self.region_cls_pred(hidden_states)
return logits
class VisualLinguisticBertRelationshipPredictionHead(BaseModel):
def __init__(self, config):
super(VisualLinguisticBertRelationshipPredictionHead, self).__init__(config)
self.caption_image_relationship = nn.Linear(config.hidden_size, 2)
self.apply(self.init_weights)
def forward(self, pooled_rep):
relationship_logits = self.caption_image_relationship(pooled_rep)
return relationship_logits
| 28,112 | 49.56295 | 128 | py |
VLC-BERT | VLC-BERT-master/common/commonsense_fusion.py | import torch.nn as nn
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.uniform_(m.bias)
def prepare_mask(key_mask, query_mask):
len_k = key_mask.size(1)
len_q = query_mask.size(1)
padding_mask1 = query_mask.unsqueeze(1).expand(-1, len_k, -1).transpose(1,2)
padding_mask2 = key_mask.unsqueeze(1).expand(-1, len_q, -1)
return padding_mask1*padding_mask2
class SimpleFusionLayer(nn.Module):
def __init__(self, config):
super(SimpleFusionLayer, self).__init__()
self.num_heads = config.num_heads
# this is the dimension of the feature vector for both query and key/value
self.embed_dim = config.hidden_size
self.reduce_attention_output = config.reduce_attention_output
self.attend_ques = config.attend_ques
if self.reduce_attention_output:
self.dim_layer = nn.Linear(self.embed_dim, self.embed_dim)
self.norm = nn.LayerNorm(self.embed_dim, eps=1e-5)
self.fusion = nn.MultiheadAttention(self.embed_dim, self.num_heads, dropout=0.1, batch_first=True)
self.fusion.apply(init_weights)
def forward(self, embeddings, image_fusion=False):
"""
If specified, a mask of shape (N, S) indicating which elements within key to ignore for the purpose of attention (i.e. treat as "padding").
embeddings: tensor of shape (N, S, D) where the last item in the sequence is the query, eg. S = 6 when first 5 are commonsense embeddings and last 1 is question
"""
# note: you can also do batch size as first dimension, by setting batch_first=True.
# SHAPE: Batch size * Sequence length * embedding dimension
if image_fusion:
first = embeddings[:, -2:, :]
else:
first = embeddings[:, -1, :].unsqueeze(1)
if not self.attend_ques:
embeddings = embeddings[:,:-1,:]
# Normalize the input
first = self.norm(first)
second = self.norm(embeddings)
query = first
key = second
value = second
attn_output, attn_output_weights = self.fusion(query, key, value)
if self.reduce_attention_output:
# reduce the attention output to 1024 dimensions
attn_output = self.dim_layer(attn_output)
return attn_output, attn_output_weights | 2,409 | 35.515152 | 169 | py |
VLC-BERT | VLC-BERT-master/common/module.py | from collections import namedtuple
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
class Module(nn.Module):
def __init__(self, config):
super(Module, self).__init__()
self.config = config
def init_weight(self):
raise NotImplementedError()
def fix_params(self):
raise NotImplementedError()
def forward(self, *inputs, **kwargs):
inputs, kwargs = self.preprocess(*inputs, **kwargs)
if self.training:
return self.train_forward(*inputs, **kwargs)
else:
return self.inference_forward(*inputs, **kwargs)
def train_forward(self, *inputs, **kwargs):
"""
def train_forward(self, data, label, **kwargs):
# this is a toy example for 1 output, 2 loss function
output = None
loss1 = torch.tensor(0.0)
loss2 = torch.tensor(0.0)
outputs = {'output': output,
'loss1': loss1,
'loss2': loss2}
loss = loss1 + loss2
return outputs, loss
"""
raise NotImplemented
def inference_forward(self, *inputs, **kwargs):
"""
def inference_forward(self, data, **kwargs):
output = None
outputs = {'output': output}
return outputs
"""
raise NotImplemented
def preprocess(self, *inputs, **kwargs):
if self.training:
return self.train_preprocess(*inputs, **kwargs)
else:
return self.inference_preprocess(*inputs, **kwargs)
def train_preprocess(self, *inputs, **kwargs):
return inputs, kwargs
def inference_preprocess(self, *inputs, **kwargs):
return inputs, kwargs
| 1,786 | 26.921875 | 65 | py |
VLC-BERT | VLC-BERT-master/common/trainer.py | import os
import time
from collections import namedtuple
import torch
try:
from apex import amp
from apex.amp import _amp_state
except ImportError:
pass
#raise ImportError("Please install apex from https://www.github.com/nvidia/apex if you want to use fp16.")
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'rank',
'add_step',
'data_in_time',
'data_transfer_time',
'forward_time',
'backward_time',
'optimizer_time',
'metric_time',
'eval_metric',
'locals'])
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def to_cuda(batch):
batch = list(batch)
for i in range(len(batch)):
if isinstance(batch[i], torch.Tensor):
batch[i] = batch[i].cuda(non_blocking=True)
elif isinstance(batch[i], list):
for j, o in enumerate(batch[i]):
if isinstance(batch[i], torch.Tensor):
batch[i][j] = o.cuda(non_blocking=True)
return batch
def train(net,
optimizer,
lr_scheduler,
train_loader,
train_sampler,
metrics,
begin_epoch,
end_epoch,
logger,
rank=None,
batch_end_callbacks=None,
epoch_end_callbacks=None,
writer=None,
validation_monitor=None,
fp16=False,
clip_grad_norm=-1,
gradient_accumulate_steps=1):
assert isinstance(gradient_accumulate_steps, int) and gradient_accumulate_steps >= 1
for epoch in range(begin_epoch, end_epoch):
print('PROGRESS: %.2f%%' % (100.0 * epoch / end_epoch))
# set epoch as random seed of sampler while distributed training
if train_sampler is not None and hasattr(train_sampler, 'set_epoch'):
train_sampler.set_epoch(epoch)
# reset metrics
metrics.reset()
# set net to train mode
net.train()
# clear the paramter gradients
# optimizer.zero_grad()
# init end time
end_time = time.time()
if isinstance(lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
name, value = validation_monitor.metrics.get()
val = value[name.index(validation_monitor.host_metric_name)]
lr_scheduler.step(val, epoch)
# training
for nbatch, batch in enumerate(train_loader):
global_steps = len(train_loader) * epoch + nbatch
os.environ['global_steps'] = str(global_steps)
# record time
data_in_time = time.time() - end_time
# transfer data to GPU
data_transfer_time = time.time()
batch = to_cuda(batch)
data_transfer_time = time.time() - data_transfer_time
# forward
forward_time = time.time()
outputs, loss = net(*batch)
loss = loss.mean()
if gradient_accumulate_steps > 1:
loss = loss / gradient_accumulate_steps
forward_time = time.time() - forward_time
# backward
backward_time = time.time()
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
backward_time = time.time() - backward_time
optimizer_time = time.time()
if (global_steps + 1) % gradient_accumulate_steps == 0:
# clip gradient
if clip_grad_norm > 0:
if fp16:
total_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer),
clip_grad_norm)
else:
total_norm = torch.nn.utils.clip_grad_norm_(net.parameters(),
clip_grad_norm)
if writer is not None:
writer.add_scalar(tag='grad-para/Total-Norm',
scalar_value=float(total_norm),
global_step=global_steps)
optimizer.step()
# step LR scheduler
if lr_scheduler is not None and not isinstance(lr_scheduler,
torch.optim.lr_scheduler.ReduceLROnPlateau):
lr_scheduler.step()
# clear the parameter gradients
optimizer.zero_grad()
optimizer_time = time.time() - optimizer_time
# update metric
metric_time = time.time()
metrics.update(outputs)
if writer is not None:
with torch.no_grad():
for group_i, param_group in enumerate(optimizer.param_groups):
writer.add_scalar(tag='Initial-LR/Group_{}'.format(group_i),
scalar_value=param_group['initial_lr'],
global_step=global_steps)
writer.add_scalar(tag='LR/Group_{}'.format(group_i),
scalar_value=param_group['lr'],
global_step=global_steps)
writer.add_scalar(tag='Train-Loss',
scalar_value=float(loss.item()),
global_step=global_steps)
name, value = metrics.get()
for n, v in zip(name, value):
writer.add_scalar(tag='Train-' + n,
scalar_value=v,
global_step=global_steps)
metric_time = time.time() - metric_time
# execute batch_end_callbacks
if batch_end_callbacks is not None:
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch, add_step=True, rank=rank,
data_in_time=data_in_time, data_transfer_time=data_transfer_time,
forward_time=forward_time, backward_time=backward_time,
optimizer_time=optimizer_time, metric_time=metric_time,
eval_metric=metrics, locals=locals())
_multiple_callbacks(batch_end_callbacks, batch_end_params)
# update end time
end_time = time.time()
# excute epoch_end_callbacks
if validation_monitor is not None:
validation_monitor(epoch, net, optimizer, writer)
if epoch_end_callbacks is not None:
_multiple_callbacks(epoch_end_callbacks, epoch, net, optimizer, writer, validation_monitor=validation_monitor)
| 7,611 | 37.251256 | 122 | py |
VLC-BERT | VLC-BERT-master/common/backbone/resnet/resnet.py | """
Modified from torchvision, but exposes features from different stages
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
import warnings
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
model_layers = {
'resnet18': [2, 2, 2, 2],
'resnet34': [3, 4, 6, 3],
'resnet50': [3, 4, 6, 3],
'resnet101': [3, 4, 23, 3],
'resnet152': [3, 8, 36, 3],
}
def conv3x3(in_planes, out_planes, stride=1, dilation=1, padding=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, dilation=dilation,
padding=padding, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, **kwargs):
super(BasicBlock, self).__init__()
# if dilation == 1:
# self.conv1 = conv3x3(inplanes, planes, stride, dilation)
# elif dilation == 2:
# self.conv1 = conv3x3(inplanes, planes, stride, dilation, padding=2)
# else:
# raise ValueError('dilation must be 1 or 2!')
self.conv1 = conv3x3(inplanes, planes, stride, dilation, padding=dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, stride_in_1x1=False):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1 if not stride_in_1x1 else stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
# if dilation == 1:
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride if not stride_in_1x1 else 1,
# dilation=dilation, padding=1, bias=False)
# elif dilation == 2:
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride if not stride_in_1x1 else 1,
# dilation=dilation, padding=2, bias=False)
# else:
# raise ValueError('dilation must be 1 or 2!')
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride if not stride_in_1x1 else 1,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=None, expose_stages=None, dilations=None, stride_in_1x1=False):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
layers_planes = [64, 128, 256, 512]
layers_strides = [1, 2, 2, 2]
layers_dilations = dilations if dilations is not None else [1, 1, 1, 1]
for i, dilation in enumerate(layers_dilations):
if dilation == 2:
layers_strides[i] = 1
layers_planes = layers_planes[:len(layers)]
layers_strides = layers_strides[:len(layers)]
layers_dilations = layers_dilations[:len(layers)]
for i, (planes, blocks, stride, dilation) in enumerate(zip(layers_planes, layers, layers_strides, layers_dilations)):
layer = self._make_layer(block, planes, blocks, stride=stride, dilation=dilation, stride_in_1x1=stride_in_1x1)
self.__setattr__('layer{}'.format(i + 1), layer)
self.num_layers = i + 1
self.has_fc_head = 6 in expose_stages
self.expose_stages = expose_stages
if self.has_fc_head:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.expose_stages.remove(6)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, stride_in_1x1=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dilation, stride_in_1x1=stride_in_1x1))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
expose_feats = {}
feats = {}
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
feats['body1'] = x
for i in range(self.num_layers):
x = self.__getattr__("layer{}".format(i + 1))(x)
feats['body{}'.format(i + 2)] = x
if self.has_fc_head:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
expose_feats['cls_score'] = x
if self.expose_stages is not None:
for expose_stage in self.expose_stages:
feat_name = 'body{}'.format(expose_stage)
expose_feats[feat_name] = feats[feat_name]
return expose_feats
def load_pretrained_state_dict(self, state_dict):
"""Load state dict of pretrained model
Args:
state_dict (dict): state dict to load
"""
new_state_dict = self.state_dict()
miss_keys = []
for k in new_state_dict.keys():
if k in state_dict.keys():
new_state_dict[k] = state_dict[k]
else:
miss_keys.append(k)
if len(miss_keys) > 0:
warnings.warn('miss keys: {}'.format(miss_keys))
self.load_state_dict(new_state_dict)
def frozen_parameters(self, frozen_stages=None, frozen_bn=False):
if frozen_bn:
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
for param in module.parameters():
param.requires_grad = False
if frozen_stages is not None:
for stage in frozen_stages:
assert (stage >= 1) and (stage <= 6)
if stage == 1:
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.bn1.parameters():
param.requires_grad = False
elif stage < 6:
for param in self.__getattr__("layer{}".format(stage - 1)).parameters():
param.requires_grad = False
else:
for param in self.fc.parameters():
param.requires_grad = False
def bn_eval(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.eval()
def resnet18(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, **kwargs):
"""Constructs a ResNet-18 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet18'][:end_stage - 1]
model = ResNet(block=BasicBlock, layers=layers, num_classes=num_classes, expose_stages=expose_stages, dilations=dilations)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet18'])
model.load_pretrained_state_dict(state_dict)
return model
def resnet34(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, **kwargs):
"""Constructs a ResNet-34 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet34'][:end_stage - 1]
model = ResNet(block=BasicBlock, layers=layers, num_classes=num_classes, expose_stages=expose_stages,
dilations=dilations)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet34'])
model.load_pretrained_state_dict(state_dict)
return model
def resnet50(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, stride_in_1x1=False):
"""Constructs a ResNet-50 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet50'][:end_stage - 1]
model = ResNet(block=Bottleneck, layers=layers, num_classes=num_classes, expose_stages=expose_stages,
dilations=dilations, stride_in_1x1=stride_in_1x1)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet50'])
model.load_pretrained_state_dict(state_dict)
return model
def resnet101(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, stride_in_1x1=False):
"""Constructs a ResNet-101 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet101'][:end_stage - 1]
model = ResNet(block=Bottleneck, layers=layers, num_classes=num_classes, expose_stages=expose_stages,
dilations=dilations, stride_in_1x1=stride_in_1x1)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet101'])
model.load_pretrained_state_dict(state_dict)
return model
def resnet152(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, stride_in_1x1=False):
"""Constructs a ResNet-152 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet152'][:end_stage - 1]
model = ResNet(block=Bottleneck, layers=layers, num_classes=num_classes, expose_stages=expose_stages,
dilations=dilations, stride_in_1x1=stride_in_1x1)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet152'])
model.load_pretrained_state_dict(state_dict)
return model
| 17,247 | 40.461538 | 135 | py |
VLC-BERT | VLC-BERT-master/common/callbacks/epoch_end_callbacks/checkpoint.py | import torch
class Checkpoint(object):
def __init__(self, prefix, frequent):
super(Checkpoint, self).__init__()
self.prefix = prefix
self.frequent = frequent
def __call__(self, epoch_num, net, optimizer, writer, validation_monitor=None):
checkpoint_dict = dict()
checkpoint_dict['state_dict'] = net.state_dict()
checkpoint_dict['optimizer'] = optimizer.state_dict()
latest_param_name = '{}-latest.model'.format(self.prefix)
torch.save(checkpoint_dict, latest_param_name)
save_to_best = False
if validation_monitor is not None:
checkpoint_dict['validation_monitor'] = validation_monitor.state_dict()
if validation_monitor.best_epoch == epoch_num:
save_to_best = True
if save_to_best:
best_param_name = '{}-best.model'.format(self.prefix)
torch.save(checkpoint_dict, best_param_name)
print('Save new best model to {}.'.format(best_param_name))
if (epoch_num + 1) % self.frequent == 0:
param_name = '{}-{:04d}.model'.format(self.prefix, epoch_num)
torch.save(checkpoint_dict, param_name) | 1,212 | 36.90625 | 83 | py |
VLC-BERT | VLC-BERT-master/common/nlp/misc.py | import torch
import random
def get_align_matrix(aligned_ids, sparse=False, device=None, dtype=torch.float32):
"""
Get aligned matrix for feature alignment in sentence embedding
:param aligned_ids: list, aligned_ids[k] means original index of k-th token
:param sparse: whether to return sparse matrix
:param device: device of returned align matrix
:param dtype: dtype of returned align matrix
:return: align_matrix: torch.FloatTensor, shape: (L, L')
Example:
>> aligned_ids = [0, 0, 1, 2, 2, 2]
>> get_align_matrix(aligned_ids)
tensor([[0.5000, 0.5000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.3333, 0.3333, 0.3333]])
"""
l0 = max(aligned_ids) + 1
l1 = len(aligned_ids)
if sparse:
raise NotImplementedError
else:
align_matrix = torch.zeros((l0, l1), dtype=dtype, device=device)
align_matrix[aligned_ids, torch.arange(l1)] = 1
align_matrix = align_matrix / align_matrix.sum(dim=1, keepdim=True)
return align_matrix
def get_all_ngrams(words):
"""
Get all n-grams of words
:param words: list of str
:return: ngrams, list of (list of str)
"""
ngrams = []
N = len(words)
for n in range(1, N + 1):
for i in range(0, N - n + 1):
ngrams.append([words[j] for j in range(i, i + n)])
return ngrams
def random_word_with_token_ids(token_ids, tokenizer):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param token_ids: list of int, list of token id.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
mask_id = tokenizer.convert_tokens_to_ids(['[MASK]'])[0]
for i, token_id in enumerate(token_ids):
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
token_ids[i] = mask_id
# 10% randomly change token to random token
elif prob < 0.9:
token_ids[i] = random.choice(list(tokenizer.vocab.items()))[1]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
output_label.append(token_id)
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return token_ids, output_label
| 2,726 | 30.344828 | 104 | py |
VLC-BERT | VLC-BERT-master/common/nlp/time_distributed.py | """
A wrapper that unrolls the second (time) dimension of a tensor
into the first (batch) dimension, applies some other ``Module``,
and then rolls the time dimension back up.
"""
import torch
class TimeDistributed(torch.nn.Module):
"""
Given an input shaped like ``(batch_size, time_steps, [rest])`` and a ``Module`` that takes
inputs like ``(batch_size, [rest])``, ``TimeDistributed`` reshapes the input to be
``(batch_size * time_steps, [rest])``, applies the contained ``Module``, then reshapes it back.
Note that while the above gives shapes with ``batch_size`` first, this ``Module`` also works if
``batch_size`` is second - we always just combine the first two dimensions, then split them.
"""
def __init__(self, module):
super(TimeDistributed, self).__init__()
self._module = module
def forward(self, *inputs, **kwargs): # pylint: disable=arguments-differ
reshaped_inputs = []
for input_tensor in inputs:
input_size = input_tensor.size()
if len(input_size) <= 2:
raise RuntimeError("No dimension to distribute: " + str(input_size))
# Squash batch_size and time_steps into a single axis; result has shape
# (batch_size * time_steps, input_size).
squashed_shape = [-1] + [x for x in input_size[2:]]
reshaped_inputs.append(input_tensor.contiguous().view(*squashed_shape))
reshaped_outputs = self._module(*reshaped_inputs, **kwargs)
if isinstance(reshaped_outputs, torch.Tensor):
# Now get the output back into the right shape.
# (batch_size, time_steps, [hidden_size])
new_shape = [input_size[0], input_size[1]] + [x for x in reshaped_outputs.size()[1:]]
outputs = reshaped_outputs.contiguous().view(*new_shape)
elif isinstance(reshaped_outputs, tuple):
outputs = []
for output in reshaped_outputs:
new_shape = [input_size[0], input_size[1]] + [x for x in output.size()[1:]]
outputs.append(output.contiguous().view(*new_shape))
outputs = tuple(outputs)
else:
raise ValueError("Not support!")
return outputs
| 2,245 | 42.192308 | 99 | py |
VLC-BERT | VLC-BERT-master/common/nlp/encoder_base.py | from typing import Tuple, Union, Optional, Callable
import torch
from torch.nn.utils.rnn import pack_padded_sequence, PackedSequence
# We have two types here for the state, because storing the state in something
# which is Iterable (like a tuple, below), is helpful for internal manipulation
# - however, the states are consumed as either Tensors or a Tuple of Tensors, so
# returning them in this format is unhelpful.
RnnState = Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]] # pylint: disable=invalid-name
RnnStateStorage = Tuple[torch.Tensor, ...] # pylint: disable=invalid-name
def get_lengths_from_binary_sequence_mask(mask: torch.Tensor):
"""
Compute sequence lengths for each batch element in a tensor using a
binary mask.
Parameters
----------
mask : torch.Tensor, required.
A 2D binary mask of shape (batch_size, sequence_length) to
calculate the per-batch sequence lengths from.
Returns
-------
A torch.LongTensor of shape (batch_size,) representing the lengths
of the sequences in the batch.
"""
return mask.long().sum(-1)
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
"""
Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permuation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise Exception("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device)
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
class _EncoderBase(torch.nn.Module):
# pylint: disable=abstract-method
"""
This abstract class serves as a base for the 3 ``Encoder`` abstractions in AllenNLP.
- :class:`~allennlp.modules.seq2seq_encoders.Seq2SeqEncoders`
- :class:`~allennlp.modules.seq2vec_encoders.Seq2VecEncoders`
Additionally, this class provides functionality for sorting sequences by length
so they can be consumed by Pytorch RNN classes, which require their inputs to be
sorted by length. Finally, it also provides optional statefulness to all of it's
subclasses by allowing the caching and retrieving of the hidden states of RNNs.
"""
def __init__(self, stateful: bool = False) -> None:
super(_EncoderBase, self).__init__()
self.stateful = stateful
self._states: Optional[RnnStateStorage] = None
def sort_and_run_forward(self,
module: Callable[[PackedSequence, Optional[RnnState]],
Tuple[Union[PackedSequence, torch.Tensor], RnnState]],
inputs: torch.Tensor,
mask: torch.Tensor,
hidden_state: Optional[RnnState] = None):
"""
This function exists because Pytorch RNNs require that their inputs be sorted
before being passed as input. As all of our Seq2xxxEncoders use this functionality,
it is provided in a base class. This method can be called on any module which
takes as input a ``PackedSequence`` and some ``hidden_state``, which can either be a
tuple of tensors or a tensor.
As all of our Seq2xxxEncoders have different return types, we return `sorted`
outputs from the module, which is called directly. Additionally, we return the
indices into the batch dimension required to restore the tensor to it's correct,
unsorted order and the number of valid batch elements (i.e the number of elements
in the batch which are not completely masked). This un-sorting and re-padding
of the module outputs is left to the subclasses because their outputs have different
types and handling them smoothly here is difficult.
Parameters
----------
module : ``Callable[[PackedSequence, Optional[RnnState]],
Tuple[Union[PackedSequence, torch.Tensor], RnnState]]``, required.
A function to run on the inputs. In most cases, this is a ``torch.nn.Module``.
inputs : ``torch.Tensor``, required.
A tensor of shape ``(batch_size, sequence_length, embedding_size)`` representing
the inputs to the Encoder.
mask : ``torch.Tensor``, required.
A tensor of shape ``(batch_size, sequence_length)``, representing masked and
non-masked elements of the sequence for each element in the batch.
hidden_state : ``Optional[RnnState]``, (default = None).
A single tensor of shape (num_layers, batch_size, hidden_size) representing the
state of an RNN with or a tuple of
tensors of shapes (num_layers, batch_size, hidden_size) and
(num_layers, batch_size, memory_size), representing the hidden state and memory
state of an LSTM-like RNN.
Returns
-------
module_output : ``Union[torch.Tensor, PackedSequence]``.
A Tensor or PackedSequence representing the output of the Pytorch Module.
The batch size dimension will be equal to ``num_valid``, as sequences of zero
length are clipped off before the module is called, as Pytorch cannot handle
zero length sequences.
final_states : ``Optional[RnnState]``
A Tensor representing the hidden state of the Pytorch Module. This can either
be a single tensor of shape (num_layers, num_valid, hidden_size), for instance in
the case of a GRU, or a tuple of tensors, such as those required for an LSTM.
restoration_indices : ``torch.LongTensor``
A tensor of shape ``(batch_size,)``, describing the re-indexing required to transform
the outputs back to their original batch order.
"""
# In some circumstances you may have sequences of zero length. ``pack_padded_sequence``
# requires all sequence lengths to be > 0, so remove sequences of zero length before
# calling self._module, then fill with zeros.
# First count how many sequences are empty.
batch_size = mask.size(0)
num_valid = torch.sum(mask[:, 0]).int().item()
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices, sorting_indices = \
sort_batch_by_length(inputs, sequence_lengths)
# Now create a PackedSequence with only the non-empty, sorted sequences.
packed_sequence_input = pack_padded_sequence(sorted_inputs[:num_valid, :, :],
sorted_sequence_lengths[:num_valid].data.tolist(),
batch_first=True)
# Prepare the initial states.
if not self.stateful:
if hidden_state is None:
initial_states = hidden_state
elif isinstance(hidden_state, tuple):
initial_states = [state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous()
for state in hidden_state]
else:
initial_states = hidden_state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous()
else:
initial_states = self._get_initial_states(batch_size, num_valid, sorting_indices)
# Actually call the module on the sorted PackedSequence.
module_output, final_states = module(packed_sequence_input, initial_states)
return module_output, final_states, restoration_indices
def _get_initial_states(self,
batch_size: int,
num_valid: int,
sorting_indices: torch.LongTensor) -> Optional[RnnState]:
"""
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``.
"""
# We don't know the state sizes the first time calling forward,
# so we let the module define what it's initial hidden state looks like.
if self._states is None:
return None
# Otherwise, we have some previous states.
if batch_size > self._states[0].size(1):
# This batch is larger than the all previous states.
# If so, resize the states.
num_states_to_concat = batch_size - self._states[0].size(1)
resized_states = []
# state has shape (num_layers, batch_size, hidden_size)
for state in self._states:
# This _must_ be inside the loop because some
# RNNs have states with different last dimension sizes.
zeros = state.new_zeros(state.size(0),
num_states_to_concat,
state.size(2))
resized_states.append(torch.cat([state, zeros], 1))
self._states = tuple(resized_states)
correctly_shaped_states = self._states
elif batch_size < self._states[0].size(1):
# This batch is smaller than the previous one.
correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states)
else:
correctly_shaped_states = self._states
# At this point, our states are of shape (num_layers, batch_size, hidden_size).
# However, the encoder uses sorted sequences and additionally removes elements
# of the batch which are fully padded. We need the states to match up to these
# sorted and filtered sequences, so we do that in the next two blocks before
# returning the state/s.
if len(self._states) == 1:
# GRUs only have a single state. This `unpacks` it from the
# tuple and returns the tensor directly.
correctly_shaped_state = correctly_shaped_states[0]
sorted_state = correctly_shaped_state.index_select(1, sorting_indices)
return sorted_state[:, :num_valid, :]
else:
# LSTMs have a state tuple of (state, memory).
sorted_states = [state.index_select(1, sorting_indices)
for state in correctly_shaped_states]
return tuple(state[:, :num_valid, :] for state in sorted_states)
def _update_states(self,
final_states: RnnStateStorage,
restoration_indices: torch.LongTensor) -> None:
"""
After the RNN has run forward, the states need to be updated.
This method just sets the state to the updated new state, performing
several pieces of book-keeping along the way - namely, unsorting the
states and ensuring that the states of completely padded sequences are
not updated. Finally, it also detaches the state variable from the
computational graph, such that the graph can be garbage collected after
each batch iteration.
Parameters
----------
final_states : ``RnnStateStorage``, required.
The hidden states returned as output from the RNN.
restoration_indices : ``torch.LongTensor``, required.
The indices that invert the sorting used in ``sort_and_run_forward``
to order the states with respect to the lengths of the sequences in
the batch.
"""
# TODO(Mark): seems weird to sort here, but append zeros in the subclasses.
# which way around is best?
new_unsorted_states = [state.index_select(1, restoration_indices)
for state in final_states]
if self._states is None:
# We don't already have states, so just set the
# ones we receive to be the current state.
self._states = tuple(state.data for state in new_unsorted_states)
else:
# Now we've sorted the states back so that they correspond to the original
# indices, we need to figure out what states we need to update, because if we
# didn't use a state for a particular row, we want to preserve its state.
# Thankfully, the rows which are all zero in the state correspond exactly
# to those which aren't used, so we create masks of shape (new_batch_size,),
# denoting which states were used in the RNN computation.
current_state_batch_size = self._states[0].size(1)
new_state_batch_size = final_states[0].size(1)
# Masks for the unused states of shape (1, new_batch_size, 1)
used_new_rows_mask = [(state[0, :, :].sum(-1)
!= 0.0).float().view(1, new_state_batch_size, 1)
for state in new_unsorted_states]
new_states = []
if current_state_batch_size > new_state_batch_size:
# The new state is smaller than the old one,
# so just update the indices which we used.
for old_state, new_state, used_mask in zip(self._states,
new_unsorted_states,
used_new_rows_mask):
# zero out all rows in the previous state
# which _were_ used in the current state.
masked_old_state = old_state[:, :new_state_batch_size, :] * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
old_state[:, :new_state_batch_size, :] = new_state + masked_old_state
new_states.append(old_state.detach())
else:
# The states are the same size, so we just have to
# deal with the possibility that some rows weren't used.
new_states = []
for old_state, new_state, used_mask in zip(self._states,
new_unsorted_states,
used_new_rows_mask):
# zero out all rows which _were_ used in the current state.
masked_old_state = old_state * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
new_state += masked_old_state
new_states.append(new_state.detach())
# It looks like there should be another case handled here - when
# the current_state_batch_size < new_state_batch_size. However,
# this never happens, because the states themeselves are mutated
# by appending zeros when calling _get_inital_states, meaning that
# the new states are either of equal size, or smaller, in the case
# that there are some unused elements (zero-length) for the RNN computation.
self._states = tuple(new_states)
def reset_states(self):
self._states = None
| 18,404 | 52.502907 | 109 | py |
VLC-BERT | VLC-BERT-master/common/nlp/bert_encoder_wrapper.py | import torch
import torch.nn as nn
from external.pytorch_pretrained_bert.modeling import BertEncoder, BertLayerNorm
class BertEncoderWrapper(nn.Module):
def __init__(self, bert_config, input_size, output_all_encoded_layers=False):
super(BertEncoderWrapper, self).__init__()
self.bert_config = bert_config
self.output_all_encoded_layers = output_all_encoded_layers
self.input_transform = nn.Linear(input_size, bert_config.hidden_size)
self.with_position_embeddings = False if 'with_position_embeddings' not in bert_config \
else bert_config.with_position_embeddings
if self.with_position_embeddings:
self.position_embedding = nn.Embedding(bert_config.max_position_embeddings, bert_config.hidden_size)
self.LayerNorm = BertLayerNorm(bert_config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
self.bert_encoder = BertEncoder(bert_config)
self.apply(self.init_bert_weights)
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def get_output_dim(self):
return self.bert_config.hidden_size
def forward(self, inputs, mask):
inputs = self.input_transform(inputs)
if self.with_position_embeddings:
seq_length = inputs.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=inputs.device)
position_ids = position_ids.unsqueeze(0).expand((inputs.shape[0], inputs.shape[1]))
position_embeddings = self.position_embedding(position_ids)
inputs = inputs + position_embeddings
inputs = self.LayerNorm(inputs)
inputs = self.dropout(inputs)
extended_attention_mask = mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
output = self.bert_encoder(inputs,
extended_attention_mask,
output_all_encoded_layers=self.output_all_encoded_layers)
if not self.output_all_encoded_layers:
output = output[0]
return output
| 3,207 | 49.125 | 112 | py |
VLC-BERT | VLC-BERT-master/common/nlp/input_variational_dropout.py | import torch
class InputVariationalDropout(torch.nn.Dropout):
"""
Apply the dropout technique in Gal and Ghahramani, "Dropout as a Bayesian Approximation:
Representing Model Uncertainty in Deep Learning" (https://arxiv.org/abs/1506.02142) to a
3D tensor.
This module accepts a 3D tensor of shape ``(batch_size, num_timesteps, embedding_dim)``
and samples a single dropout mask of shape ``(batch_size, embedding_dim)`` and applies
it to every time step.
"""
def forward(self, input_tensor):
# pylint: disable=arguments-differ
"""
Apply dropout to input tensor.
Parameters
----------
input_tensor: ``torch.FloatTensor``
A tensor of shape ``(batch_size, num_timesteps, embedding_dim)``
Returns
-------
output: ``torch.FloatTensor``
A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` with dropout applied.
"""
ones = input_tensor.data.new_ones(input_tensor.shape[0], input_tensor.shape[-1])
dropout_mask = torch.nn.functional.dropout(ones, self.p, self.training, inplace=False)
if self.inplace:
input_tensor *= dropout_mask.unsqueeze(1)
return None
else:
return dropout_mask.unsqueeze(1) * input_tensor | 1,324 | 37.970588 | 98 | py |
VLC-BERT | VLC-BERT-master/common/nlp/roberta/utils.py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import os
try:
from functools import lru_cache
except ImportError:
# Just a dummy decorator to get the checks to run on python2
# because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now.
def lru_cache():
return lambda func: func
import logging
import json
import six
from io import open
from functools import wraps
import boto3
import requests
from botocore.exceptions import ClientError
import shutil
from hashlib import sha256
import fnmatch
import tempfile
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers')
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv('PYTORCH_TRANSFORMERS_CACHE', os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_TRANSFORMERS_CACHE',
os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
default_cache_path))
PYTORCH_TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE # Kept for backward compatibility
logger = logging.getLogger(__name__)
SPECIAL_TOKENS_MAP_FILE = 'special_tokens_map.json'
ADDED_TOKENS_FILE = 'added_tokens.json'
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
_chr = unichr if sys.version_info[0] == 2 else chr
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [_chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
class PreTrainedTokenizer(object):
""" Base class for all tokenizers.
Handle all the shared methods for tokenization and special tokens as well as methods dowloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size.
Parameters:
- ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token``
- ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token``
- ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token``
- ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens``
"""
vocab_files_names = {}
pretrained_vocab_files_map = {}
max_model_input_sizes = {}
SPECIAL_TOKENS_ATTRIBUTES = ["bos_token", "eos_token", "unk_token", "sep_token",
"pad_token", "cls_token", "mask_token",
"additional_special_tokens"]
@property
def bos_token(self):
""" Beginning of sentence token (string). Log an error if used while not having been set. """
if self._bos_token is None:
logger.error("Using bos_token, but it is not set yet.")
return self._bos_token
@property
def eos_token(self):
""" End of sentence token (string). Log an error if used while not having been set. """
if self._eos_token is None:
logger.error("Using eos_token, but it is not set yet.")
return self._eos_token
@property
def unk_token(self):
""" Unknown token (string). Log an error if used while not having been set. """
if self._unk_token is None:
logger.error("Using unk_token, but it is not set yet.")
return self._unk_token
@property
def sep_token(self):
""" Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
if self._sep_token is None:
logger.error("Using sep_token, but it is not set yet.")
return self._sep_token
@property
def pad_token(self):
""" Padding token (string). Log an error if used while not having been set. """
if self._pad_token is None:
logger.error("Using pad_token, but it is not set yet.")
return self._pad_token
@property
def cls_token(self):
""" Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
if self._cls_token is None:
logger.error("Using cls_token, but it is not set yet.")
return self._cls_token
@property
def mask_token(self):
""" Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
if self._mask_token is None:
logger.error("Using mask_token, but it is not set yet.")
return self._mask_token
@property
def additional_special_tokens(self):
""" All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """
if self._additional_special_tokens is None:
logger.error("Using additional_special_tokens, but it is not set yet.")
return self._additional_special_tokens
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
def __init__(self, max_len=None, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._additional_special_tokens = []
self.max_len = max_len if max_len is not None else int(1e12)
self.added_tokens_encoder = {}
self.added_tokens_decoder = {}
for key, value in kwargs.items():
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == 'additional_special_tokens':
assert isinstance(value, (list, tuple)) and all(
isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
else:
assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
setattr(self, key, value)
@classmethod
def from_pretrained(cls, *inputs, **kwargs):
r"""
Instantiate a :class:`~pytorch_transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~pytorch_transformers.PreTrainedTokenizer` for details.
Examples::
# We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""
return cls._from_pretrained(*inputs, **kwargs)
@classmethod
def _from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
cache_dir = kwargs.pop('cache_dir', None)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
if pretrained_model_name_or_path in s3_models:
# Get the vocabulary from AWS S3 bucket
for file_id, map_list in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
else:
# Get the vocabulary from local files
logger.info(
"Model name '{}' not found in model shortcut name list ({}). "
"Assuming '{}' is a path or url to a directory containing tokenizer files.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path))
# Look for the tokenizer main vocabulary files
for file_id, file_name in cls.vocab_files_names.items():
if os.path.isdir(pretrained_model_name_or_path):
# If a directory is provided we look for the standard filenames
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
else:
# If a path to a file is provided we use it (will only work for non-BPE tokenizer using a single vocabulary file)
full_file_name = pretrained_model_name_or_path
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
# Look for the additional tokens files
all_vocab_files_names = {'added_tokens_file': ADDED_TOKENS_FILE,
'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE}
# If a path to a file was provided, get the parent directory
saved_directory = pretrained_model_name_or_path
if os.path.exists(saved_directory) and not os.path.isdir(saved_directory):
saved_directory = os.path.dirname(saved_directory)
for file_id, file_name in all_vocab_files_names.items():
full_file_name = os.path.join(saved_directory, file_name)
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
if all(full_file_name is None for full_file_name in vocab_files.values()):
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find tokenizer files"
"at this path or url.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path, ))
return None
# Get files from url, cache, or disk depending on the case
try:
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in s3_models:
logger.error("Couldn't reach server to download vocabulary.")
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path, str(vocab_files.keys())))
return None
for file_id, file_path in vocab_files.items():
if file_path == resolved_vocab_files[file_id]:
logger.info("loading file {}".format(file_path))
else:
logger.info("loading file {} from cache at {}".format(
file_path, resolved_vocab_files[file_id]))
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
max_len = cls.max_model_input_sizes[pretrained_model_name_or_path]
if max_len is not None and isinstance(max_len, (int, float)):
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Merge resolved_vocab_files arguments in kwargs.
added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None)
special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in kwargs:
kwargs[args_name] = file_path
if special_tokens_map_file is not None:
special_tokens_map = json.load(open(special_tokens_map_file, encoding="utf-8"))
for key, value in special_tokens_map.items():
if key not in kwargs:
kwargs[key] = value
# Instantiate tokenizer.
tokenizer = cls(*inputs, **kwargs)
# Add supplementary tokens.
if added_tokens_file is not None:
added_tok_encoder = json.load(open(added_tokens_file, encoding="utf-8"))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
tokenizer.added_tokens_encoder.update(added_tok_encoder)
tokenizer.added_tokens_decoder.update(added_tok_decoder)
return tokenizer
def save_pretrained(self, save_directory):
""" Save the tokenizer vocabulary files (with added tokens) and the
special-tokens-to-class-attributes-mapping to a directory.
This method make sure the full tokenizer can then be re-loaded using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
if not os.path.isdir(save_directory):
logger.error("Saving directory ({}) should be a directory".format(save_directory))
return
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
with open(special_tokens_map_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
with open(added_tokens_file, 'w', encoding='utf-8') as f:
if self.added_tokens_encoder:
out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
else:
out_str = u"{}"
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
def save_vocabulary(self, save_directory):
""" Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
Please use :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full Tokenizer state if you want to reload it using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
raise NotImplementedError
def vocab_size(self):
""" Size of the base vocabulary (without the added tokens) """
raise NotImplementedError
def __len__(self):
""" Size of the full vocabulary with the added tokens """
return self.vocab_size + len(self.added_tokens_encoder)
def add_tokens(self, new_tokens):
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
if not new_tokens:
return 0
to_add_tokens = []
for token in new_tokens:
assert isinstance(token, str) or (six.PY2 and isinstance(token, unicode))
if token != self.unk_token and \
self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token):
to_add_tokens.append(token)
logger.info("Adding %s to the vocabulary", token)
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(to_add_tokens))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
return len(to_add_tokens)
def add_special_tokens(self, special_tokens_dict):
"""
Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
to class attributes. If special tokens are NOT in the vocabulary, they are added
to it (indexed starting from the last index of the current vocabulary).
Args:
special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
[``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer.cls_token == '<CLS>'
"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES
if key == 'additional_special_tokens':
assert isinstance(value, (list, tuple)) and all(
isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
added_tokens += self.add_tokens(value)
else:
assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
added_tokens += self.add_tokens([value])
logger.info("Assigning %s to the %s key of the tokenizer", value, key)
setattr(self, key, value)
return added_tokens
def tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Take care of added tokens.
"""
def split_on_tokens(tok_list, text):
if not text:
return []
if not tok_list:
return self._tokenize(text, **kwargs)
tok = tok_list[0]
split_text = text.split(tok)
return sum((split_on_tokens(tok_list[1:], sub_text.strip()) + [tok] \
for sub_text in split_text), [])[:-1]
added_tokens = list(self.added_tokens_encoder.keys()) + self.all_special_tokens
tokenized_text = split_on_tokens(added_tokens, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens):
""" Converts a single token, or a sequence of tokens, (str/unicode) in a single integer id
(resp. a sequence of ids), using the vocabulary.
"""
if isinstance(tokens, str) or (six.PY2 and isinstance(tokens, unicode)):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
if len(ids) > self.max_len:
logger.warning("Token indices sequence length is longer than the specified maximum sequence length "
"for this model ({} > {}). Running this sequence through the model will result in "
"indexing errors".format(len(ids), self.max_len))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def encode(self, text, text_pair=None, add_special_tokens=False):
"""
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text: The first sequence to be encoded.
text_pair: Optional second sequence to be encoded.
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
"""
if text_pair is None:
if add_special_tokens:
return self.add_special_tokens_single_sentence(self.convert_tokens_to_ids(self.tokenize(text)))
else:
return self.convert_tokens_to_ids(self.tokenize(text))
first_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text)]
second_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text_pair)]
if add_special_tokens:
return self.add_special_tokens_sentences_pair(first_sentence_tokens, second_sentence_tokens)
else:
return first_sentence_tokens, second_sentence_tokens
def add_special_tokens_single_sentence(self, token_ids):
raise NotImplementedError
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
raise NotImplementedError
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
""" Converts a single index or a sequence of indices (integers) in a token "
(resp.) a sequence of tokens (str/unicode), using the vocabulary and added tokens.
Args:
skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
if index in self.all_special_ids and skip_special_tokens:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index):
raise NotImplementedError
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string.
The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
but we often want to remove sub-word tokenization artifacts at the same time.
"""
return ' '.join(self.convert_ids_to_tokens(tokens))
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
"""
Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
"""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
text = self.convert_tokens_to_string(filtered_tokens)
if self.sep_token is not None and self.sep_token in text:
text = text.replace(self.cls_token, self.sep_token)
split_text = list(filter(lambda sentence: len(sentence) > 0, text.split(self.sep_token)))
if clean_up_tokenization_spaces:
clean_text = [self.clean_up_tokenization(text) for text in split_text]
return clean_text
else:
return split_text
else:
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
@property
def special_tokens_map(self):
""" A dictionary mapping special token class attribute (cls_token, unk_token...) to their
values ('<unk>', '<cls>'...)
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self):
""" List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
(cls_token, unk_token...).
"""
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = all_toks + (attr_value if isinstance(attr_value, (list, tuple)) else [attr_value])
all_toks = list(set(all_toks))
return all_toks
@property
def all_special_ids(self):
""" List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
class attributes (cls_token, unk_token...).
"""
all_toks = self.all_special_tokens
all_ids = list(self._convert_token_to_id(t) for t in all_toks)
return all_ids
@staticmethod
def clean_up_tokenization(out_string):
""" Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
"""
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
).replace(" ' ",
"'").replace(
" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're",
"'re")
return out_string
| 40,379 | 45.736111 | 380 | py |
VLC-BERT | VLC-BERT-master/common/nlp/roberta/modeling_roberta.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from external.pytorch_pretrained_bert.modeling import (BertConfig, BertEmbeddings,
BertLayerNorm, BertModel,
BertPreTrainedModel, gelu)
logger = logging.getLogger(__name__)
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {
'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin",
'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin",
'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin",
}
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-config.json",
'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-config.json",
'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-config.json",
}
class RobertaEmbeddings(BertEmbeddings):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super(RobertaEmbeddings, self).__init__(config)
self.padding_idx = 1
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
# Position numbers begin at padding_idx+1. Padding symbols are ignored.
# cf. fairseq's `utils.make_positions`
position_ids = torch.arange(self.padding_idx + 1, seq_length + self.padding_idx + 1, dtype=torch.long,
device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
return super(RobertaEmbeddings, self).forward(input_ids, token_type_ids=token_type_ids,
position_ids=position_ids)
class RobertaConfig(BertConfig):
pretrained_config_archive_map = ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
ROBERTA_START_DOCSTRING = r""" The RoBERTa model was proposed in
`RoBERTa: A Robustly Optimized BERT Pretraining Approach`_
by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer,
Veselin Stoyanov. It is based on Google's BERT model released in 2018.
It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining
objective and training with much larger mini-batches and learning rates.
This implementation is the same as BertModel with a tiny embeddings tweak as well as a setup for Roberta pretrained
models.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`RoBERTa: A Robustly Optimized BERT Pretraining Approach`:
https://arxiv.org/abs/1907.11692
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~pytorch_transformers.RobertaConfig`): Model configuration class with all the parameters of the
model.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, RoBERTa input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP][SEP] no it is not . [SEP]``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
Fully encoded sequences or sequence pairs can be obtained using the RobertaTokenizer.encode function with
the ``add_special_tokens`` parameter set to ``True``.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1[``.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
class RobertaModel(BertModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaModel.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaModel, self).__init__(config)
self.embeddings = RobertaEmbeddings(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
if input_ids[:, 0].sum().item() != 0:
logger.warning("A sequence with no special tokens has been passed to the RoBERTa model. "
"This model requires special tokens in order to work. "
"Please specify add_special_tokens=True in your encoding.")
return super(RobertaModel, self).forward(input_ids, token_type_ids, attention_mask, position_ids, head_mask)
class RobertaForMaskedLM(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMaskedLM.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaForMaskedLM, self).__init__(config)
self.roberta = RobertaModel(config)
self.lm_head = RobertaLMHead(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head.decoder, self.roberta.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, position_ids=None,
head_mask=None):
outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super(RobertaLMHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x) + self.bias
return x
class RobertaForSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RoertaTokenizer.from_pretrained('roberta-base')
model = RobertaForSequenceClassification.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.classifier = RobertaClassificationHead(config)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super(RobertaClassificationHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
| 17,448 | 53.021672 | 134 | py |
VLC-BERT | VLC-BERT-master/common/nlp/bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import logging
import math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
logger = logging.getLogger(__name__)
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
class WarmupCosineWithHardRestartsSchedule(LambdaLR):
""" Linear warmup and then cosine cycles with hard restarts.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
learning rate (with hard restarts).
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=1., last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineWithHardRestartsSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1. + math.cos(math.pi * ((float(self.cycles) * progress) % 1.0))))
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
correct_bias=correct_bias)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state['step']
bias_correction2 = 1.0 - beta2 ** state['step']
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group['weight_decay'] > 0.0:
p.data.add_(p.data, alpha=-group['lr'] * group['weight_decay'])
return loss
| 8,653 | 45.031915 | 130 | py |
VLC-BERT | VLC-BERT-master/common/metrics/eval_metric.py | import torch
import torch.distributed as distributed
class EvalMetric(object):
"""Base class for all evaluation metrics.
.. note::
This is a base class that provides common metric interfaces.
One should not use this class directly, but instead create new metric
classes that extend it.
Args
name (str): Name of this metric instance for display.
"""
def __init__(self, name, allreduce=False, num_replicas=1, **kwargs):
self.name = str(name)
self.allreduce=allreduce
self.num_replicas = num_replicas
self._kwargs = kwargs
self.reset()
def __str__(self):
return "EvalMetric: {}".format(dict(self.get_name_value()))
def update(self, outputs):
"""Updates the internal evaluation result.
Args
labels (list of `NDArray`): The labels of the data.
preds (list of `NDArray`): Predicted values.
"""
raise NotImplementedError()
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.num_inst = torch.tensor(0.)
self.sum_metric = torch.tensor(0.)
def get(self):
"""Returns the current evaluation result.
Returns:
names (list of str): Name of the metrics.
values (list of float): Value of the evaluations.
"""
if self.num_inst.item() == 0:
return (self.name, float('nan'))
else:
if self.allreduce:
num_inst = self.num_inst.clone().cuda()
sum_metric = self.sum_metric.clone().cuda()
distributed.all_reduce(num_inst, op=distributed.ReduceOp.SUM)
distributed.all_reduce(sum_metric, op=distributed.ReduceOp.SUM)
metric_tensor = (sum_metric / num_inst).detach().cpu()
else:
metric_tensor = (self.sum_metric / self.num_inst).detach().cpu()
return (self.name, metric_tensor.item())
def get_name_value(self):
"""Returns zipped name and value pairs.
Returns
A (list of tuples): (name, value) tuple list.
"""
name, value = self.get()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
| 2,371 | 33.376812 | 80 | py |
VLC-BERT | VLC-BERT-master/common/metrics/vqa_metrics.py | import torch
from .eval_metric import EvalMetric
class LossLogger(EvalMetric):
def __init__(self, output_name, display_name=None,
allreduce=False, num_replicas=1):
self.output_name = output_name
if display_name is None:
display_name = output_name
super(LossLogger, self).__init__(display_name, allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
if self.output_name in outputs:
self.sum_metric += float(outputs[self.output_name].mean().item())
self.num_inst += 1
class SoftAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(SoftAccuracy, self).__init__('SoftAcc', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
cls_logits = outputs['label_logits']
label = outputs['label']
bs, num_classes = cls_logits.shape
batch_inds = torch.arange(bs, device=cls_logits.device)
self.sum_metric += float(label[batch_inds, cls_logits.argmax(1)].sum().item())
self.num_inst += cls_logits.shape[0]
| 1,174 | 31.638889 | 90 | py |
VLC-BERT | VLC-BERT-master/common/metrics/refcoco_metrics.py | import torch
from .eval_metric import EvalMetric
class LossLogger(EvalMetric):
def __init__(self, output_name, display_name=None,
allreduce=False, num_replicas=1):
self.output_name = output_name
if display_name is None:
display_name = output_name
super(LossLogger, self).__init__(display_name, allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
if self.output_name in outputs:
self.sum_metric += float(outputs[self.output_name].mean().item())
self.num_inst += 1
class RefAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(RefAccuracy, self).__init__('RefAcc', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
cls_logits = outputs['label_logits']
label = outputs['label']
bs, _ = cls_logits.shape
batch_inds = torch.arange(bs, device=cls_logits.device)
self.sum_metric += float((label[batch_inds, cls_logits.argmax(1)] > 0.5).sum().item())
self.num_inst += cls_logits.shape[0]
class ClsAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(ClsAccuracy, self).__init__('ClsAcc', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
cls_logits = outputs['label_logits']
cls_pred = (cls_logits > 0).long()
label = outputs['label'].long()
keep = (label >= 0)
self.sum_metric += float((cls_pred[keep] == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class ClsPosAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(ClsPosAccuracy, self).__init__('ClsPosAcc', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
cls_logits = outputs['label_logits']
cls_pred = (cls_logits > 0).long()
label = outputs['label'].long()
keep = (label == 1)
self.sum_metric += float((cls_pred[keep] == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class ClsPosFraction(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(ClsPosFraction, self).__init__('ClsPosFrac', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
label = outputs['label'].long()
num_pos = (label == 1).sum().item()
num_valid = (label >= 0).sum().item()
self.sum_metric += float(num_pos)
self.num_inst += float(num_valid)
| 2,715 | 33.379747 | 98 | py |
VLC-BERT | VLC-BERT-master/common/metrics/pretrain_metrics.py | import torch
from .eval_metric import EvalMetric
class LossLogger(EvalMetric):
def __init__(self, output_name, display_name=None,
allreduce=False, num_replicas=1):
self.output_name = output_name
if display_name is None:
display_name = output_name
super(LossLogger, self).__init__(display_name, allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
if self.output_name in outputs:
self.sum_metric += float(outputs[self.output_name].mean().item())
self.num_inst += 1
class RelationshipAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(RelationshipAccuracy, self).__init__('RelAcc', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['relationship_logits']
label = outputs['relationship_label']
self.sum_metric += float((logits.argmax(dim=1) == label).sum().item())
self.num_inst += logits.shape[0]
class MLMAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MLMAccuracy, self).__init__('MLMAcc', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mlm_logits']
label = outputs['mlm_label']
keep = (label != -1)
if keep.sum() > 0:
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class MLMAccuracyWVC(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MLMAccuracyWVC, self).__init__('MLMAccWVC', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mlm_logits_wvc']
label = outputs['mlm_label_wvc']
keep = (label != -1)
if keep.sum() > 0:
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class MLMAccuracyAUX(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MLMAccuracyAUX, self).__init__('MLMAccAUX', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mlm_logits_aux']
label = outputs['mlm_label_aux']
keep = (label != -1)
if keep.sum() > 0:
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class MVRCAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MVRCAccuracy, self).__init__('MVRCAccuracy', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mvrc_logits']
label = outputs['mvrc_label']
keep = (label.sum(2) - 1.0).abs() < 0.1
if keep.sum() > 0:
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep].argmax(dim=1)).sum().item())
self.num_inst += keep.sum().item()
| 3,263 | 35.266667 | 112 | py |
VLC-BERT | VLC-BERT-master/common/metrics/composite_eval_metric.py | import numpy as np
from .eval_metric import EvalMetric
import torch
class CompositeEvalMetric(EvalMetric):
"""Manages multiple evaluation metrics.
Args:
metrics (list of EvalMetric): List of child metrics.
name (str): Name of this metric instance for display.
"""
def __init__(self, metrics=None, name='composite'):
super(CompositeEvalMetric, self).__init__(name)
if metrics is None:
metrics = []
self.metrics = metrics
def add(self, metric):
"""Adds a child metric.
Args:
metric (EvalMetric): A metric instance.
"""
self.metrics.append(metric)
def get_metric(self, index):
"""Returns a child metric.
Args:
index (int): Index of child metric in the list of metrics.
"""
try:
return self.metrics[index]
except IndexError:
return ValueError("Metric index {} is out of range 0 and {}".format(
index, len(self.metrics)))
def update(self, outputs):
"""Updates the internal evaluation result.
Args:
labels (dict of `NDArray`): The labels of the data.
preds (dict of `NDArray`): Predicted values.
"""
for metric in self.metrics:
metric.update(outputs)
def reset(self):
"""Resets the internal evaluation result to initial state."""
try:
for metric in self.metrics:
metric.reset()
except AttributeError:
pass
def get(self):
"""Returns the current evaluation result.
Returns:
names (list of str): Name of the metrics.
values (list of float): Value of the evaluations.
"""
names = []
values = []
for metric in self.metrics:
name, value = metric.get()
if isinstance(name, str):
name = [name]
if isinstance(value, (float, int, np.generic,torch.Tensor)):
value = [value]
names.extend(name)
values.extend(value)
return names, values
| 2,153 | 29.771429 | 80 | py |
VLC-BERT | VLC-BERT-master/common/metrics/vcr_metrics.py | import torch
from .eval_metric import EvalMetric
class LossLogger(EvalMetric):
def __init__(self, output_name, display_name=None,
allreduce=False, num_replicas=1):
self.output_name = output_name
if display_name is None:
display_name = output_name
super(LossLogger, self).__init__(display_name, allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
if self.output_name in outputs:
self.sum_metric += float(outputs[self.output_name].mean().item())
self.num_inst += 1
class Accuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(Accuracy, self).__init__('Acc', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
_filter = outputs['label'] != -1
cls_logits = outputs['label_logits'][_filter]
label = outputs['label'][_filter]
if cls_logits.dim() == 1:
cls_logits = cls_logits.view((-1, 4))
label = label.view((-1, 4)).argmax(1)
self.sum_metric += float((cls_logits.argmax(dim=1) == label).sum().item())
self.num_inst += cls_logits.shape[0]
class AnsLoss(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(AnsLoss, self).__init__('AnsLoss', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
self.sum_metric += float(outputs['ans_loss'].mean().item())
self.num_inst += 1
class CNNRegLoss(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(CNNRegLoss, self).__init__('CNNRegLoss', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
if 'cnn_regularization_loss' in outputs:
self.sum_metric += float(outputs['cnn_regularization_loss'].mean().item())
self.num_inst += 1
class PositiveFraction(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(PositiveFraction, self).__init__('PosFraction', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
self.sum_metric += float(outputs['positive_fraction'].mean().item())
self.num_inst += 1
class JointAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(JointAccuracy, self).__init__('JointAcc', allreduce, num_replicas)
def update(self, outputs):
a_cls_logits = outputs['answer_label_logits']
a_label = outputs['answer_label']
r_cls_logits = outputs['rationale_label_logits']
r_label = outputs['rationale_label']
self.sum_metric += float(((a_cls_logits.argmax(dim=1) == a_label)
& (r_cls_logits.argmax(dim=1) == r_label)).sum().item())
self.num_inst += a_cls_logits.shape[0]
| 2,953 | 35.02439 | 90 | py |
VLC-BERT | VLC-BERT-master/common/utils/multi_task_dataloader.py | from functools import reduce
import operator
from typing import List
from torch.utils.data import DataLoader
import sys
INT_MAX = sys.maxsize
def prod(iterable):
if len(list(iterable)) > 0:
return reduce(operator.mul, iterable)
else:
return 1
class MultiTaskDataLoader(object):
"""
Multi-task DataLoader, the first dataloader is master dataloader
"""
def __init__(self,
loaders: List[DataLoader]):
assert len(loaders) > 1, "Less than 2 loader!"
self.loaders = loaders
self.iters = [iter(loader) for loader in loaders]
self.lens = [len(loader) for loader in loaders]
self.global_idx_in_cycle = 0
def __iter__(self):
if self.global_idx_in_cycle > 0:
self.iters[0] = iter(self.loaders[0])
return self
def __next__(self):
output_tuple = (*next(self.iters[0]), )
for k, (loader, _iter) in enumerate(zip(self.loaders[1:], self.iters[1:])):
if hasattr(loader.batch_sampler.sampler, 'set_epoch'):
loader.batch_sampler.sampler.set_epoch(int(self.global_idx_in_cycle / self.lens[k+1]))
try:
output_tuple += (*next(_iter), )
except StopIteration:
_iter = iter(loader)
self.iters[k+1] = _iter
output_tuple += (*next(_iter), )
if self.global_idx_in_cycle < INT_MAX - 1:
self.global_idx_in_cycle += 1
else:
self.global_idx_in_cycle = 0
return output_tuple
def __len__(self):
return self.lens[0]
| 1,619 | 26.931034 | 102 | py |
VLC-BERT | VLC-BERT-master/common/utils/build_attn_annot_okvqa.py | import json
import random
import numpy as np
from external.pytorch_pretrained_bert import BertTokenizer
import string
from nltk.corpus import stopwords
#nltk.download('stopwords')
DATASET = 'okvqa'
EXP_NAME = 'semqo'
MAX_COMMONSENSE_LEN = 5
RANDOM_SEED = 12345
random.seed(RANDOM_SEED)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
s = set(stopwords.words('english'))
def _load_json(path):
with open(path, 'r') as f:
return json.load(f)
def filename(exp_name):
return (exp_name[:-1]+ "." + exp_name[-1]).lower()
def build_automatic():
# Load expansions
# Load answers
# If answer is in expansion, give it a weight of 1
# If answer is not in expansion, give it a weight of 0
questions = _load_json('data/coco/okvqa/OpenEnded_mscoco_train2014_questions.json')
questions = questions['questions']
annotations = _load_json('data/coco/okvqa/mscoco_train2014_annotations.json')
annotations = annotations['annotations']
for annot in annotations:
for question in questions:
if question['question_id'] == annot['question_id']:
annot['question'] = question['question']
break
direct_answers = []
for answer in annot['answers']:
direct_answers.append(answer['answer'])
annot['direct_answers'] = direct_answers
expansions = _load_json('data/coco/okvqa/commonsense/expansions/'+filename(EXP_NAME)+'_okvqa_train.json')
annot_size = 4000
annotations_subset = random.sample(annotations, annot_size)
attn_annot = {}
good_counter = 0
total_counter = 0
bad_capacity = 500
for annot in annotations_subset:
question_id = annot['question_id']
image_id = str(annot['image_id'])
direct_answers = annot['direct_answers']
exp = expansions['COCO_train2014_{:012d}.jpg'.format(annot['image_id'])][str(annot['question_id'])][0]
print(exp)
exp = exp.split('.')
exp = [e.strip() for e in exp]
exp = [e for e in exp if e != '']
if len(exp) > MAX_COMMONSENSE_LEN:
exp = exp[:MAX_COMMONSENSE_LEN]
else:
exp = exp + ['']*(MAX_COMMONSENSE_LEN-len(exp))
weights, good = auto_annotator(exp, direct_answers)
if not good and bad_capacity <= 0:
continue
if not good:
bad_capacity -= 1
if image_id not in attn_annot:
attn_annot[image_id] = {}
attn_annot[image_id][question_id] = weights
total_counter += 1
good_counter += 1 if good else 0
with open('data/coco/okvqa/'+EXP_NAME+'_okvqa_train_attn_annot_'+str(MAX_COMMONSENSE_LEN)+'.json', 'w') as f:
json.dump(attn_annot, f)
print('Good: {}'.format(good_counter))
print('Total: {}'.format(total_counter))
def auto_annotator(expansion_list, ans_list):
ans_text = ' '.join(ans_list)
ans_text = ans_text.translate(str.maketrans('', '', string.punctuation))
ans_text = ans_text.lower()
ans_tokens = tokenizer.tokenize(ans_text)
ans_tokens = [t for t in ans_tokens if t not in s]
final_weights = [0.05]*len(expansion_list)
for i, expansion in enumerate(expansion_list):
exp_text = expansion.translate(str.maketrans('', '', string.punctuation))
exp_text = exp_text.lower()
exp_tokens = tokenizer.tokenize(exp_text)
exp_tokens = [t for t in exp_tokens if t not in s]
for token in ans_tokens:
if token in exp_tokens:
final_weights[i] = 0.8
break
good = False
if np.sum(final_weights) > (0.05*len(expansion_list)):
final_weights = np.array(final_weights + [0.05])
final_weights = final_weights / np.sum(final_weights)
good = True
else:
final_weights = np.array(final_weights + [0.25])
final_weights = final_weights / np.sum(final_weights)
assert len(final_weights) == MAX_COMMONSENSE_LEN+1
return final_weights.tolist(), good
if __name__ == '__main__':
build_automatic()
| 4,128 | 28.705036 | 113 | py |
VLC-BERT | VLC-BERT-master/common/utils/misc.py | import os
import numpy as np
import torch
import torch.nn.functional as F
import logging
def block_digonal_matrix(*blocks):
"""
Construct block diagonal matrix
:param blocks: blocks of block diagonal matrix
:param device
:param dtype
:return: block diagonal matrix
"""
assert len(blocks) > 0
rows = [block.shape[0] for block in blocks]
cols = [block.shape[1] for block in blocks]
out = torch.zeros((sum(rows), sum(cols)),
device=blocks[0].device,
dtype=blocks[0].dtype)
cur_row = 0
cur_col = 0
for block, row, col in zip(blocks, rows, cols):
out[cur_row:(cur_row + row), cur_col:(cur_col + col)] = block
cur_row += row
cur_col += col
return out
def print_and_log(string, logger=None):
print(string)
if logger is None:
logging.info(string)
else:
logger.info(string)
def summary_parameters(model, logger=None):
"""
Summary Parameters of Model
:param model: torch.nn.module_name
:param logger: logger
:return: None
"""
print_and_log('>> Trainable Parameters:', logger)
trainable_paramters = [(str(n), str(v.dtype), str(tuple(v.shape)), str(v.numel()))
for n, v in model.named_parameters() if v.requires_grad]
max_lens = [max([len(item) + 4 for item in col]) for col in zip(*trainable_paramters)]
raw_format = '|' + '|'.join(['{{:{}s}}'.format(max_len) for max_len in max_lens]) + '|'
raw_split = '-' * (sum(max_lens) + len(max_lens) + 1)
print_and_log(raw_split, logger)
print_and_log(raw_format.format('Name', 'Dtype', 'Shape', '#Params'), logger)
print_and_log(raw_split, logger)
for name, dtype, shape, number in trainable_paramters:
print_and_log(raw_format.format(name, dtype, shape, number), logger)
print_and_log(raw_split, logger)
num_trainable_params = sum([v.numel() for v in model.parameters() if v.requires_grad])
total_params = sum([v.numel() for v in model.parameters()])
non_trainable_params = total_params - num_trainable_params
print_and_log('>> {:25s}\t{:.2f}\tM'.format('# TrainableParams:', num_trainable_params / (1.0 * 10 ** 6)), logger)
print_and_log('>> {:25s}\t{:.2f}\tM'.format('# NonTrainableParams:', non_trainable_params / (1.0 * 10 ** 6)), logger)
print_and_log('>> {:25s}\t{:.2f}\tM'.format('# TotalParams:', total_params / (1.0 * 10 ** 6)), logger)
def clip_grad(named_parameters, max_norm, logger=logging, std_verbose=False, log_verbose=False):
"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
:param named_parameters: dict, named parameters of pytorch module
:param max_norm: float or int, max norm of the gradients
:param logger: logger to write verbose info
:param std_verbose: verbose info in stdout
:param log_verbose: verbose info in log
:return Total norm of the parameters (viewed as a dict: param name -> param grad norm).
"""
max_norm = float(max_norm)
parameters = [(n, p) for n, p in named_parameters if p.grad is not None]
total_norm = 0
param_to_norm = {}
param_to_shape = {}
for n, p in parameters:
param_norm = p.grad.data.norm(2)
total_norm += param_norm ** 2
param_to_norm[n] = param_norm
param_to_shape[n] = tuple(p.size())
if np.isnan(param_norm.item()):
raise ValueError("the param {} was null.".format(n))
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef.item() < 1:
logger.info('---Clip grad! Total norm: {:.3f}, clip coef: {:.3f}.'.format(total_norm, clip_coef))
for n, p in parameters:
p.grad.data.mul_(clip_coef)
if std_verbose:
print('---Total norm {:.3f} clip coef {:.3f}-----------------'.format(total_norm, clip_coef))
for name, norm in sorted(param_to_norm.items(), key=lambda x: -x[1]):
print("{:<60s}: {:.3f}, ({}: {})".format(name, norm, np.prod(param_to_shape[name]), param_to_shape[name]))
print('-------------------------------', flush=True)
if log_verbose:
logger.info('---Total norm {:.3f} clip coef {:.3f}-----------------'.format(total_norm, clip_coef))
for name, norm in sorted(param_to_norm.items(), key=lambda x: -x[1]):
logger.info("{:<60s}: {:.3f}, ({}: {})".format(name, norm, np.prod(param_to_shape[name]), param_to_shape[name]))
logger.info('-------------------------------')
return {name: norm.item() for name, norm in param_to_norm.items()}
def bn_fp16_half_eval(m):
classname = str(m.__class__)
if 'BatchNorm' in classname and (not m.training):
m.half()
def soft_cross_entropy(input, target, reduction='mean'):
"""
Cross entropy loss with input logits and soft target
:param input: Tensor, size: (N, C)
:param target: Tensor, size: (N, C)
:param reduction: 'none' or 'mean' or 'sum', default: 'mean'
:return: loss
"""
eps = 1.0e-1
# debug = False
valid = (target.sum(1) - 1).abs() < eps
# if debug:
# print('valid', valid.sum().item())
# print('all', valid.numel())
# print('non valid')
# print(target[valid == 0])
if valid.sum().item() == 0:
return input.new_zeros(())
if reduction == 'mean':
return (- F.log_softmax(input[valid], 1) * target[valid]).sum(1).mean(0)
elif reduction == 'sum':
return (- F.log_softmax(input[valid], 1) * target[valid]).sum()
elif reduction == 'none':
l = input.new_zeros((input.shape[0], ))
l[valid] = (- F.log_softmax(input[valid], 1) * target[valid]).sum(1)
return l
else:
raise ValueError('Not support reduction type: {}.'.format(reduction))
| 5,958 | 36.71519 | 124 | py |
VLC-BERT | VLC-BERT-master/common/utils/flatten.py | import torch
class Flattener(torch.nn.Module):
def __init__(self):
"""
Flattens last 3 dimensions to make it only batch size, -1
"""
super(Flattener, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
| 269 | 19.769231 | 65 | py |
VLC-BERT | VLC-BERT-master/common/utils/bbox.py | import torch
def nonlinear_transform(ex_rois, gt_rois):
"""
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [k, 4] ([x1, y1, x2, y2])
:param gt_rois: [k, 4] (corresponding gt_boxes [x1, y1, x2, y2] )
:return: bbox_targets: [k, 4]
"""
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-6)
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-6)
targets_dw = torch.log(gt_widths / (ex_widths).clamp(min=1e-6))
targets_dh = torch.log(gt_heights / ((ex_heights).clamp(min=1e-6)))
targets = torch.cat(
(targets_dx.view(-1, 1), targets_dy.view(-1, 1), targets_dw.view(-1, 1), targets_dh.view(-1, 1)), dim=-1)
return targets
def coordinate_embeddings(boxes, dim):
"""
Coordinate embeddings of bounding boxes
:param boxes: [K, 6] ([x1, y1, x2, y2, w_image, h_image])
:param dim: sin/cos embedding dimension
:return: [K, 4, 2 * dim]
"""
num_boxes = boxes.shape[0]
w = boxes[:, 4]
h = boxes[:, 5]
# transform to (x_c, y_c, w, h) format
boxes_ = boxes.new_zeros((num_boxes, 4))
boxes_[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2
boxes_[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2
boxes_[:, 2] = boxes[:, 2] - boxes[:, 0]
boxes_[:, 3] = boxes[:, 3] - boxes[:, 1]
boxes = boxes_
# position
pos = boxes.new_zeros((num_boxes, 4))
pos[:, 0] = boxes[:, 0] / w * 100
pos[:, 1] = boxes[:, 1] / h * 100
pos[:, 2] = boxes[:, 2] / w * 100
pos[:, 3] = boxes[:, 3] / h * 100
# sin/cos embedding
dim_mat = 1000 ** (torch.arange(dim, dtype=boxes.dtype, device=boxes.device) / dim)
sin_embedding = (pos.view((num_boxes, 4, 1)) / dim_mat.view((1, 1, -1))).sin()
cos_embedding = (pos.view((num_boxes, 4, 1)) / dim_mat.view((1, 1, -1))).cos()
return torch.cat((sin_embedding, cos_embedding), dim=-1)
def bbox_iou_py_vectorized(boxes, query_boxes):
n_ = boxes.shape[0]
k_ = query_boxes.shape[0]
n_mesh, k_mesh = torch.meshgrid([torch.arange(n_), torch.arange(k_)])
n_mesh = n_mesh.contiguous().view(-1)
k_mesh = k_mesh.contiguous().view(-1)
boxes = boxes[n_mesh]
query_boxes = query_boxes[k_mesh]
x11, y11, x12, y12 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
x21, y21, x22, y22 = query_boxes[:, 0], query_boxes[:, 1], query_boxes[:, 2], query_boxes[:, 3]
xA = torch.max(x11, x21)
yA = torch.max(y11, y21)
xB = torch.min(x12, x22)
yB = torch.min(y12, y22)
interArea = torch.clamp(xB - xA + 1, min=0) * torch.clamp(yB - yA + 1, min=0)
boxAArea = (x12 - x11 + 1) * (y12 - y11 + 1)
boxBArea = (x22 - x21 + 1) * (y22 - y21 + 1)
iou = interArea / (boxAArea + boxBArea - interArea)
return iou.view(n_, k_).to(boxes.device)
| 3,289 | 33.631579 | 113 | py |
VLC-BERT | VLC-BERT-master/common/utils/load.py | import torch
import os
def smart_load_model_state_dict(model, state_dict):
parsed_state_dict = {}
for k, v in state_dict.items():
if k not in model.state_dict():
if k.startswith('module.'):
k = k[len('module.'):]
else:
k = 'module.' + k
if k in model.state_dict():
parsed_state_dict[k] = v
else:
raise ValueError('failed to match key of state dict smartly!')
model.load_state_dict(parsed_state_dict)
def smart_resume(model, optimizer, validation_monitor, config, model_prefix, logger):
if config.TRAIN.RESUME:
print(('continue training from ', config.TRAIN.BEGIN_EPOCH))
# load model
model_filename = '{}-{:04d}.model'.format(model_prefix, config.TRAIN.BEGIN_EPOCH - 1)
check_point = torch.load(model_filename, map_location=lambda storage, loc: storage)
# model.load_state_dict(check_point['state_dict'])
smart_load_model_state_dict(model, check_point['state_dict'])
optimizer.load_state_dict(check_point['optimizer'])
if 'validation_monitor' in check_point:
validation_monitor.load_state_dict(check_point['validation_monitor'])
print(
'Best Val {}: {}, Epoch: {}'.format(validation_monitor.host_metric_name,
validation_monitor.best_val,
validation_monitor.best_epoch)
)
elif config.TRAIN.AUTO_RESUME:
for epoch in range(config.TRAIN.END_EPOCH, config.TRAIN.BEGIN_EPOCH, -1):
model_filename = '{}-{:04d}.model'.format(model_prefix, epoch - 1)
if os.path.exists(model_filename):
config.TRAIN.BEGIN_EPOCH = epoch
check_point = torch.load(model_filename, map_location=lambda storage, loc: storage)
# model.load_state_dict(check_point['state_dict'])
smart_load_model_state_dict(model, check_point['state_dict'])
optimizer.load_state_dict(check_point['optimizer'])
if 'validation_monitor' in check_point:
validation_monitor.load_state_dict(check_point['validation_monitor'])
print(
'Best Val {}: {}, Epoch: {}'.format(validation_monitor.host_metric_name,
validation_monitor.best_val,
validation_monitor.best_epoch)
)
logger.info("Auto continue training from {0}".format(model_filename))
print("Auto continue training from {0}".format(model_filename))
break
def smart_partial_load_model_state_dict(model, state_dict, vocab_size=3):
parsed_state_dict = {}
non_match_keys = []
pretrained_keys = []
for k, v in state_dict.items():
if k not in model.state_dict():
if k.startswith('module.'):
k = k[len('module.'):]
else:
k = 'module.' + k
if k in model.state_dict() and not k.startswith('module.final_mlp'):
parsed_state_dict[k] = v
pretrained_keys.append(k)
else:
non_match_keys.append(k)
# raise ValueError('failed to match key of state dict smartly!')
non_pretrain_keys = [k for k in model.state_dict().keys() if k not in pretrained_keys]
print("[Partial Load] partial load state dict of keys: {}".format(parsed_state_dict.keys()))
print("[Partial Load] non matched keys: {}".format(non_match_keys))
print("[Partial Load] non pretrain keys: {}".format(non_pretrain_keys))
new_state_dict = model.state_dict()
new_state_dict.update(parsed_state_dict)
token_type_embeddings_weight = new_state_dict['module.vlbert.token_type_embeddings.weight']
if vocab_size != token_type_embeddings_weight.size(0):
print("[Load Info] Expaded token type vocab size, extending embeddings")
token_type_embeddings_weight_new = torch.zeros(vocab_size, token_type_embeddings_weight.size(1))
for i in range(vocab_size):
if i < token_type_embeddings_weight.size(0):
token_type_embeddings_weight_new[i] = token_type_embeddings_weight[i]
else:
# for all new vocab items, initialize as if it is _text_ type token
token_type_embeddings_weight_new[i] = token_type_embeddings_weight[0]
new_state_dict['module.vlbert.token_type_embeddings.weight'] = token_type_embeddings_weight_new
model.load_state_dict(new_state_dict)
| 4,708 | 47.546392 | 104 | py |
VLC-BERT | VLC-BERT-master/common/utils/masked_softmax.py | import torch
def masked_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, this function returns an array
of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model
that uses categorical cross-entropy loss.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.type(vector.dtype)
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + (1e-7 if vector.dtype == torch.half else 1e-13))
return result
| 1,533 | 50.133333 | 111 | py |
VLC-BERT | VLC-BERT-master/common/utils/pad_sequence.py | import torch
def pad_sequence(sequence, lengths):
"""
:param sequence: [\sum b, .....] sequence
:param lengths: [b1, b2, b3...] that sum to \sum b
:return: [len(lengths), maxlen(b), .....] tensor
"""
output = sequence.new_zeros(len(lengths), max(lengths), *sequence.shape[1:])
start = 0
for i, diff in enumerate(lengths):
if diff > 0:
output[i, :diff] = sequence[start:(start + diff)]
start += diff
return output
| 480 | 25.722222 | 80 | py |
VLC-BERT | VLC-BERT-master/common/utils/build_attn_annot_aokvqa.py | import json
import random
import numpy as np
from external.pytorch_pretrained_bert import BertTokenizer
import string
from nltk.corpus import stopwords
#nltk.download('stopwords')
DATASET = 'aokvqa'
EXP_NAME = 'semqo'
MAX_COMMONSENSE_LEN = 5
RANDOM_SEED = 12345
random.seed(RANDOM_SEED)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
s = set(stopwords.words('english'))
def _load_json(path):
with open(path, 'r') as f:
return json.load(f)
def filename(exp_name):
return (exp_name[:-1]+ "." + exp_name[-1]).lower()
def build_automatic():
# Load expansions
# Load answers
# If answer is in expansion, give it a weight of 1
# If answer is not in expansion, give it a weight of 0
if DATASET == 'aokvqa':
annotations = _load_json('data/coco/aokvqa/aokvqa_v1p0_train.json')
expansions = _load_json('data/coco/aokvqa/commonsense/expansions/'+filename(EXP_NAME)+'_aokvqa_train.json')
annot_size = 4000
annotations_subset = random.sample(annotations, annot_size)
attn_annot = {}
good_counter = 0
total_counter = 0
bad_capacity = 500
for annot in annotations_subset:
question_id = annot['question_id']
image_id = str(annot['image_id'])
direct_answers = annot['direct_answers']
exp = expansions['{:012d}.jpg'.format(annot['image_id'])][str(annot['question_id'])][0]
exp = exp.split('.')
exp = [e.strip() for e in exp]
exp = [e for e in exp if e != '']
if len(exp) > MAX_COMMONSENSE_LEN:
exp = exp[:MAX_COMMONSENSE_LEN]
else:
exp = exp + ['']*(MAX_COMMONSENSE_LEN-len(exp))
weights, good = auto_annotator(exp, direct_answers)
if not good and bad_capacity <= 0:
continue
if not good:
bad_capacity -= 1
if image_id not in attn_annot:
attn_annot[image_id] = {}
attn_annot[image_id][question_id] = weights
total_counter += 1
good_counter += 1 if good else 0
with open('data/coco/aokvqa/'+EXP_NAME+'_aokvqa_train_attn_annot_'+str(MAX_COMMONSENSE_LEN)+'.json', 'w') as f:
json.dump(attn_annot, f)
print('Good: {}'.format(good_counter))
print('Total: {}'.format(total_counter))
def auto_annotator(expansion_list, ans_list):
ans_text = ' '.join(ans_list)
ans_text = ans_text.translate(str.maketrans('', '', string.punctuation))
ans_text = ans_text.lower()
ans_tokens = tokenizer.tokenize(ans_text)
ans_tokens = [t for t in ans_tokens if t not in s]
final_weights = [0.05]*len(expansion_list)
for i, expansion in enumerate(expansion_list):
exp_text = expansion.translate(str.maketrans('', '', string.punctuation))
exp_text = exp_text.lower()
exp_tokens = tokenizer.tokenize(exp_text)
exp_tokens = [t for t in exp_tokens if t not in s]
for token in ans_tokens:
if token in exp_tokens:
final_weights[i] = 0.8
break
good = False
if np.sum(final_weights) > (0.05*len(expansion_list)):
final_weights = np.array(final_weights + [0.05])
final_weights = final_weights / np.sum(final_weights)
good = True
else:
final_weights = np.array(final_weights + [0.25])
final_weights = final_weights / np.sum(final_weights)
assert len(final_weights) == MAX_COMMONSENSE_LEN+1
return final_weights.tolist(), good
if __name__ == '__main__':
build_automatic()
| 3,554 | 28.139344 | 115 | py |
VLC-BERT | VLC-BERT-master/common/utils/clip_pad.py | import torch
def clip_pad_images(tensor, pad_shape, pad=0):
"""
Clip clip_pad_images of the pad area.
:param tensor: [c, H, W]
:param pad_shape: [h, w]
:return: [c, h, w]
"""
if not isinstance(tensor, torch.Tensor):
tensor = torch.as_tensor(tensor)
H, W = tensor.shape[1:]
h = pad_shape[1]
w = pad_shape[2]
tensor_ret = torch.zeros((tensor.shape[0], h, w), dtype=tensor.dtype) + pad
tensor_ret[:, :min(h, H), :min(w, W)] = tensor[:, :min(h, H), :min(w, W)]
return tensor_ret
def clip_pad_boxes(tensor, pad_length, pad=0):
"""
Clip boxes of the pad area.
:param tensor: [k, d]
:param pad_shape: K
:return: [K, d]
"""
if not isinstance(tensor, torch.Tensor):
tensor = torch.as_tensor(tensor)
k = tensor.shape[0]
d = tensor.shape[1]
K = pad_length
tensor_ret = torch.zeros((K, d), dtype=tensor.dtype) + pad
tensor_ret[:min(k, K), :] = tensor[:min(k, K), :]
return tensor_ret
def clip_pad_1d(tensor, pad_length, pad=0):
if not isinstance(tensor, torch.Tensor):
tensor = torch.as_tensor(tensor)
tensor_ret = torch.zeros((pad_length, ), dtype=tensor.dtype) + pad
tensor_ret[:min(tensor.shape[0], pad_length)] = tensor[:min(tensor.shape[0], pad_length)]
return tensor_ret
def clip_pad_2d(tensor, pad_shape, pad=0):
if not isinstance(tensor, torch.Tensor):
tensor = torch.as_tensor(tensor)
tensor_ret = torch.zeros(*pad_shape, dtype=tensor.dtype) + pad
tensor_ret[:min(tensor.shape[0], pad_shape[0]), :min(tensor.shape[1], pad_shape[1])] \
= tensor[:min(tensor.shape[0], pad_shape[0]), :min(tensor.shape[1], pad_shape[1])]
return tensor_ret
| 1,738 | 28.982759 | 93 | py |
VLC-BERT | VLC-BERT-master/common/utils/mask.py | from skimage.draw import polygon
import torch
def generate_instance_mask(seg_polys, box, mask_size=(14, 14), dtype=torch.float32, copy=True):
"""
Generate instance mask from polygon
:param seg_poly: torch.Tensor, (N, 2), (x, y) coordinate of N vertices of segmented foreground polygon
:param box: array-like, (4, ), (xmin, ymin, xmax, ymax), instance bounding box
:param mask_size: tuple, (mask_height, mask_weight)
:param dtype: data type of generated mask
:param copy: whether copy seg_polys to a new tensor first
:return: torch.Tensor, of mask_size, instance mask
"""
mask = torch.zeros(mask_size, dtype=dtype)
w_ratio = float(mask_size[0]) / (box[2] - box[0] + 1)
h_ratio = float(mask_size[1]) / (box[3] - box[1] + 1)
# import IPython
# IPython.embed()
for seg_poly in seg_polys:
if copy:
seg_poly = seg_poly.detach().clone()
seg_poly = seg_poly.type(torch.float32)
seg_poly[:, 0] = (seg_poly[:, 0] - box[0]) * w_ratio
seg_poly[:, 1] = (seg_poly[:, 1] - box[1]) * h_ratio
rr, cc = polygon(seg_poly[:, 1].clamp(min=0, max=mask_size[1] - 1),
seg_poly[:, 0].clamp(min=0, max=mask_size[0] - 1))
mask[rr, cc] = 1
return mask
| 1,282 | 33.675676 | 106 | py |
VLC-BERT | VLC-BERT-master/common/lib/roi_pooling/roi_pool.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from . import C_ROIPooling
class _ROIPool(Function):
@staticmethod
def forward(ctx, input, rois, output_size, spatial_scale):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
output, argmax = C_ROIPooling.roi_pool_forward(
input, rois, spatial_scale, output_size[0], output_size[1]
)
ctx.save_for_backward(input, rois, argmax)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, rois, argmax = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
bs, ch, h, w = ctx.input_shape
grad_input = C_ROIPooling.roi_pool_backward(
grad_output,
input,
rois,
argmax,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
)
return grad_input, None, None, None
roi_pool = _ROIPool.apply
class ROIPool(nn.Module):
def __init__(self, output_size, spatial_scale):
"""
:param output_size: e.g. (3,3)
:param spatial_scale: e.g. 1.0/16
"""
super(ROIPool, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input, rois):
"""
:param input: the input features [B C H W]
:param rois: [k, 5] : (im_index, x1, y1, x2, y2)
:return: pooled features (K C H W), K = k
"""
return roi_pool(input.float(), rois.float(), self.output_size, self.spatial_scale)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ")"
return tmpstr
| 2,174 | 28.794521 | 90 | py |
VLC-BERT | VLC-BERT-master/common/lib/roi_pooling/roi_align.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from . import C_ROIPooling
class _ROIAlign(Function):
@staticmethod
def forward(ctx, input, rois, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(rois)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = C_ROIPooling.roi_align_forward(
input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
rois, = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w = ctx.input_shape
grad_input = C_ROIPooling.roi_align_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None
roi_align = _ROIAlign.apply
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio=1):
"""
:param output_size: e.g. (3,3)
:param spatial_scale: e.g. 1.0/16
:param sampling_ratio: e.g. 1
"""
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
"""
:param input: the input features [B C H W]
:param rois: [k, 5]: (im_index, x1, y1, x2, y2)
:return: pooled features [K C H W], K = k
"""
return roi_align(
input.float(), rois.float(), self.output_size, self.spatial_scale, self.sampling_ratio
)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
| 2,468 | 30.253165 | 98 | py |
VLC-BERT | VLC-BERT-master/common/lib/roi_pooling/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#!/usr/bin/env python
import glob
import os
import torch
from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = this_dir
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"C_ROIPooling",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="C_ROIPooling",
version="0.1",
description="ROIPooling in C++ or CUDA",
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| 1,799 | 26.272727 | 73 | py |
VLC-BERT | VLC-BERT-master/common/lib/roi_pooling/debug.py | import torch
from roi_pool import ROIPool
from roi_align import ROIAlign
align = ROIAlign(output_size=(3, 3), spatial_scale=1.0, sampling_ratio=1)
pool = ROIPool(output_size=(3, 3), spatial_scale=1.0)
device = torch.device("cuda:0")
feature = torch.arange(81*2*3).view((2,3,9,9)).float().to(device)
rois = torch.Tensor([[0,0,0,9,9],[1,0,0,9,9],[1,0,0,7,7]]).to(device)
pooled = pool(feature,rois)
aligned = align(feature,rois)
import IPython
IPython.embed()
| 463 | 24.777778 | 73 | py |
VLC-BERT | VLC-BERT-master/viz/bertviz/attention.py | import torch
from collections import defaultdict
def get_attention(model, model_type, tokenizer, sentence_a, sentence_b=None, include_queries_and_keys=False):
"""Compute representation of attention to pass to the d3 visualization
Args:
model: pytorch-transformers model
model_type: type of model. Valid values 'bert', 'gpt2', 'xlnet', 'roberta'
tokenizer: pytorch-transformers tokenizer
sentence_a: Sentence A string
sentence_b: Sentence B string
include_queries_and_keys: Indicates whether to include queries/keys in results
Returns:
Dictionary of attn representations with the structure:
{
'all': All attention (source = AB, target = AB)
'aa': Sentence A self-attention (source = A, target = A) (if sentence_b is not None)
'bb': Sentence B self-attention (source = B, target = B) (if sentence_b is not None)
'ab': Sentence A -> Sentence B attention (source = A, target = B) (if sentence_b is not None)
'ba': Sentence B -> Sentence A attention (source = B, target = A) (if sentence_b is not None)
}
where each value is a dictionary:
{
'left_text': list of source tokens, to be displayed on the left of the vis
'right_text': list of target tokens, to be displayed on the right of the vis
'attn': list of attention matrices, one for each layer. Each has shape [num_heads, source_seq_len, target_seq_len]
'queries' (optional): list of query vector arrays, one for each layer. Each has shape (num_heads, source_seq_len, vector_size)
'keys' (optional): list of key vector arrays, one for each layer. Each has shape (num_heads, target_seq_len, vector_size)
}
"""
if model_type not in ('bert', 'gpt2', 'xlnet', 'roberta'):
raise ValueError("Invalid model type:", model_type)
if not sentence_a:
raise ValueError("Sentence A is required")
is_sentence_pair = bool(sentence_b)
if is_sentence_pair and model_type not in ('bert', 'roberta', 'xlnet'):
raise ValueError(f'Model {model_type} does not support sentence pairs')
if is_sentence_pair and model_type == 'xlnet':
raise NotImplementedError("Sentence-pair inputs for XLNet not currently supported.")
# Prepare inputs to model
tokens_a = None
tokens_b = None
token_type_ids = None
if not is_sentence_pair: # Single sentence
if model_type in ('bert', 'roberta'):
tokens_a = [tokenizer.cls_token] + tokenizer.tokenize(sentence_a) + [tokenizer.sep_token]
elif model_type == 'xlnet':
tokens_a = tokenizer.tokenize(sentence_a) + [tokenizer.sep_token] + [tokenizer.cls_token]
else:
tokens_a = tokenizer.tokenize(sentence_a)
else:
if model_type == 'bert':
tokens_a = [tokenizer.cls_token] + tokenizer.tokenize(sentence_a) + [tokenizer.sep_token]
tokens_b = tokenizer.tokenize(sentence_b) + [tokenizer.sep_token]
token_type_ids = torch.LongTensor([[0] * len(tokens_a) + [1] * len(tokens_b)])
elif model_type == 'roberta':
tokens_a = [tokenizer.cls_token] + tokenizer.tokenize(sentence_a) + [tokenizer.sep_token]
tokens_b = [tokenizer.sep_token] + tokenizer.tokenize(sentence_b) + [tokenizer.sep_token]
# Roberta doesn't use token type embeddings per https://github.com/huggingface/pytorch-transformers/blob/master/pytorch_transformers/convert_roberta_checkpoint_to_pytorch.py
else:
tokens_b = tokenizer.tokenize(sentence_b)
token_ids = tokenizer.convert_tokens_to_ids(tokens_a + (tokens_b if tokens_b else []))
tokens_tensor = torch.tensor(token_ids).unsqueeze(0)
# Call model to get attention data
model.eval()
if token_type_ids is not None:
output = model(tokens_tensor, token_type_ids=token_type_ids)
else:
output = model(tokens_tensor)
attn_data_list = output[-1]
# Populate map with attn data and, optionally, query, key data
attn_dict = defaultdict(list)
if include_queries_and_keys:
queries_dict = defaultdict(list)
keys_dict = defaultdict(list)
if is_sentence_pair:
slice_a = slice(0, len(tokens_a)) # Positions corresponding to sentence A in input
slice_b = slice(len(tokens_a), len(tokens_a) + len(tokens_b)) # Position corresponding to sentence B in input
for layer, attn_data in enumerate(attn_data_list):
# Process attention
attn = attn_data['attn'][0] # assume batch_size=1; shape = [num_heads, source_seq_len, target_seq_len]
attn_dict['all'].append(attn.tolist())
if is_sentence_pair:
attn_dict['aa'].append(attn[:, slice_a, slice_a].tolist()) # Append A->A attention for layer, across all heads
attn_dict['bb'].append(attn[:, slice_b, slice_b].tolist()) # Append B->B attention for layer, across all heads
attn_dict['ab'].append(attn[:, slice_a, slice_b].tolist()) # Append A->B attention for layer, across all heads
attn_dict['ba'].append(attn[:, slice_b, slice_a].tolist()) # Append B->A attention for layer, across all heads
# Process queries and keys
if include_queries_and_keys:
queries = attn_data['queries'][0] # assume batch_size=1; shape = [num_heads, seq_len, vector_size]
keys = attn_data['keys'][0] # assume batch_size=1; shape = [num_heads, seq_len, vector_size]
queries_dict['all'].append(queries.tolist())
keys_dict['all'].append(keys.tolist())
if is_sentence_pair:
queries_dict['a'].append(queries[:, slice_a, :].tolist())
keys_dict['a'].append(keys[:, slice_a, :].tolist())
queries_dict['b'].append(queries[:, slice_b, :].tolist())
keys_dict['b'].append(keys[:, slice_b, :].tolist())
tokens_a = format_special_chars(tokens_a)
if tokens_b:
tokens_b = format_special_chars(tokens_b)
if model_type != 'gpt2':
tokens_a = format_delimiters(tokens_a, tokenizer)
if tokens_b:
tokens_b = format_delimiters(tokens_b, tokenizer)
results = {
'all': {
'attn': attn_dict['all'],
'left_text': tokens_a + (tokens_b if tokens_b else []),
'right_text': tokens_a + (tokens_b if tokens_b else [])
}
}
if is_sentence_pair:
results.update({
'aa': {
'attn': attn_dict['aa'],
'left_text': tokens_a,
'right_text': tokens_a
},
'bb': {
'attn': attn_dict['bb'],
'left_text': tokens_b,
'right_text': tokens_b
},
'ab': {
'attn': attn_dict['ab'],
'left_text': tokens_a,
'right_text': tokens_b
},
'ba': {
'attn': attn_dict['ba'],
'left_text': tokens_b,
'right_text': tokens_a
}
})
if include_queries_and_keys:
results['all'].update({
'queries': queries_dict['all'],
'keys': keys_dict['all'],
})
if is_sentence_pair:
results['aa'].update({
'queries': queries_dict['a'],
'keys': keys_dict['a'],
})
results['bb'].update({
'queries': queries_dict['b'],
'keys': keys_dict['b'],
})
results['ab'].update({
'queries': queries_dict['a'],
'keys': keys_dict['b'],
})
results['ba'].update({
'queries': queries_dict['b'],
'keys': keys_dict['a'],
})
return results
def format_special_chars(tokens):
return [t.replace('Ġ', ' ').replace('▁', ' ') for t in tokens]
def format_delimiters(tokens, tokenizer):
formatted_tokens = []
for t in tokens:
if tokenizer.sep_token:
t = t.replace(tokenizer.sep_token, '[SEP]')
if tokenizer.cls_token:
t = t.replace(tokenizer.cls_token, '[CLS]')
formatted_tokens.append(t)
return formatted_tokens | 8,283 | 43.778378 | 185 | py |
VLC-BERT | VLC-BERT-master/scripts/launch.py | r"""
`torch.distributed.launch` is a module that spawns up multiple distributed
training processes on each of the training nodes.
The utility can be used for single-node distributed training, in which one or
more processes per node will be spawned. The utility can be used for either
CPU training or GPU training. If the utility is used for GPU training,
each distributed process will be operating on a single GPU. This can achieve
well-improved single-node training performance. It can also be used in
multi-node distributed training, by spawning up multiple processes on each node
for well-improved multi-node distributed training performance as well.
This will especially be benefitial for systems with multiple Infiniband
interfaces that have direct-GPU support, since all of them can be utilized for
aggregated communication bandwidth.
In both cases of single-node distributed training or multi-node distributed
training, this utility will launch the given number of processes per node
(``--nproc_per_node``). If used for GPU training, this number needs to be less
or euqal to the number of GPUs on the current system (``nproc_per_node``),
and each process will be operating on a single GPU from *GPU 0 to
GPU (nproc_per_node - 1)*.
**How to use this module:**
1. Single-Node multi-process distributed training
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other
arguments of your training script)
2. Multi-Node multi-process distributed training: (e.g. two nodes)
Node 1: *(IP: 192.168.1.1, and has a free port: 1234)*
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
--nnodes=2 --node_rank=0 --master_addr="192.168.1.1"
--master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
and all other arguments of your training script)
Node 2:
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
--nnodes=2 --node_rank=1 --master_addr="192.168.1.1"
--master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
and all other arguments of your training script)
3. To look up what optional arguments this module offers:
::
>>> python -m torch.distributed.launch --help
**Important Notices:**
1. This utilty and multi-process distributed (single-node or
multi-node) GPU training currently only achieves the best performance using
the NCCL distributed backend. Thus NCCL backend is the recommended backend to
use for GPU training.
2. In your training program, you must parse the command-line argument:
``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by this module.
If your training program uses GPUs, you should ensure that your code only
runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by:
Parsing the local_rank argument
::
>>> import argparse
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument("--local_rank", type=int)
>>> args = parser.parse_args()
Set your device to local rank using either
::
>>> torch.cuda.set_device(arg.local_rank) # before your code runs
or
::
>>> with torch.cuda.device(arg.local_rank):
>>> # your code to run
3. In your training program, you are supposed to call the following function
at the beginning to start the distributed backend. You need to make sure that
the init_method uses ``env://``, which is the only supported ``init_method``
by this module.
::
torch.distributed.init_process_group(backend='YOUR BACKEND',
init_method='env://')
4. In your training program, you can either use regular distributed functions
or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your
training program uses GPUs for training and you would like to use
:func:`torch.nn.parallel.DistributedDataParallel` module,
here is how to configure it.
::
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[arg.local_rank],
output_device=arg.local_rank)
Please ensure that ``device_ids`` argument is set to be the only GPU device id
that your code will be operating on. This is generally the local rank of the
process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``,
and ``output_device`` needs to be ``args.local_rank`` in order to use this
utility
5. Another way to pass ``local_rank`` to the subprocesses via environment variable
``LOCAL_RANK``. This behavior is enabled when you launch the script with
``--use_env=True``. You must adjust the subprocess example above to replace
``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher
will not pass ``--local_rank`` when you specify this flag.
.. warning::
``local_rank`` is NOT globally unique: it is only unique per process
on a machine. Thus, don't use it to decide if you should, e.g.,
write to a networked filesystem. See
https://github.com/pytorch/pytorch/issues/12042 for an example of
how things can go wrong if you don't do this correctly.
"""
import sys
import subprocess
import os
import socket
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
# Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
# parser.add_argument("--use_env", default=False, action="store_true",
# help="Use environment variable to pass "
# "'local rank'. For legacy reasons, the default value is False. "
# "If set to True, the script will not pass "
# "--local_rank as argument, and will instead set LOCAL_RANK.")
# positional
parser.add_argument("training_script", type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# # spawn the processes
# if args.use_env:
# cmd = [sys.executable, "-u",
# args.training_script] + args.training_script_args
# else:
# cmd = [sys.executable,
# "-u",
# args.training_script,
# "--local_rank={}".format(local_rank)] + args.training_script_args
cmd = [sys.executable, "-u",
args.training_script] + args.training_script_args + ["--dist"]
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode,
cmd=process.args)
if __name__ == "__main__":
main()
| 9,500 | 46.268657 | 95 | py |
VLC-BERT | VLC-BERT-master/okvqa/train_end2end.py | import _init_paths
import os
import argparse
import torch
import subprocess
import json
from okvqa.function.config import config, update_config
from okvqa.function.train import train_net
from okvqa.function.test import test_net
from external.PythonEvaluationTools.okvqa_vqaEval import run_eval
def parse_args():
parser = argparse.ArgumentParser('Train Cognition Network')
parser.add_argument('--cfg', type=str, help='path to config file')
parser.add_argument('--model-dir', type=str, help='root path to store checkpoint')
parser.add_argument('--log-dir', type=str, help='tensorboard log dir')
parser.add_argument('--dist', help='whether to use distributed training', default=False, action='store_true')
parser.add_argument('--slurm', help='whether this is a slurm job', default=False, action='store_true')
parser.add_argument('--do-test', help='whether to generate csv result on test set',
default=True, action='store_true')
parser.add_argument('--cudnn-off', help='disable cudnn', default=False, action='store_true')
# easy test pretrain model
parser.add_argument('--partial-pretrain', type=str)
args = parser.parse_args()
if args.cfg is not None:
update_config(args.cfg)
if args.model_dir is not None:
config.OUTPUT_PATH = os.path.join(args.model_dir, config.OUTPUT_PATH)
if args.partial_pretrain is not None:
config.NETWORK.PARTIAL_PRETRAIN = args.partial_pretrain
if args.slurm:
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
addr = subprocess.getoutput(
'scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(29500)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
return args, config
def _load_json(path):
with open(path, 'r') as f:
return json.load(f)
def prune_res_file(res_file):
res = _load_json(res_file)
annot = _load_json('data/coco/okvqa/OpenEnded_mscoco_val2014_questions_pruned.json')
res_pruned = []
for a in annot['questions']:
qid = a['question_id']
for r in res:
if r['question_id'] == qid:
res_pruned.append(r)
break
res_pruned_path = res_file[:-5]+'_pruned.json'
with open(res_pruned_path, 'w') as f:
json.dump(res_pruned, f)
return res_pruned_path
def main():
args, config = parse_args()
rank, model = train_net(args, config)
if args.do_test and (rank is None or rank == 0):
res_path, save_path = test_net(args, config)
run_eval(res_path, save_path, pruned=False)
res_pruned_path = prune_res_file(res_path)
run_eval(res_pruned_path, save_path, pruned=True)
if __name__ == '__main__':
main()
| 3,058 | 33.370787 | 113 | py |
VLC-BERT | VLC-BERT-master/okvqa/function/val.py | from collections import namedtuple
import torch
from common.trainer import to_cuda
@torch.no_grad()
def do_validation(net, val_loader, metrics, label_index_in_batch):
net.eval()
metrics.reset()
for nbatch, batch in enumerate(val_loader):
batch = to_cuda(batch)
label = batch[label_index_in_batch]
datas = [batch[i] for i in range(len(batch)) if i != label_index_in_batch % len(batch)]
outputs = net(*datas)
outputs.update({'label': label})
metrics.update(outputs)
| 528 | 26.842105 | 95 | py |
VLC-BERT | VLC-BERT-master/okvqa/function/test.py | import os
import pprint
import shutil
import json
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn.functional as F
from common.utils.load import smart_load_model_state_dict
from common.trainer import to_cuda
from common.utils.create_logger import create_logger
from okvqa.data.build import make_dataloader
from okvqa.modules import *
@torch.no_grad()
def test_net(args, config, ckpt_path=None, save_path=None, save_name=None):
print('test net...')
pprint.pprint(args)
pprint.pprint(config)
device_ids = [int(d) for d in config.GPUS.split(',')]
# os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if ckpt_path is None:
_, train_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(train_output_path, config.MODEL_PREFIX)
ckpt_path = '{}-latest.model'.format(model_prefix)
print('Use latest checkpoint {}...'.format(ckpt_path))
if save_path is None:
logger, test_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TEST_IMAGE_SET,
split='test')
save_path = test_output_path
if not os.path.exists(save_path):
os.makedirs(save_path)
# shutil.copy2(ckpt_path,
# os.path.join(save_path, '{}_test_ckpt_{}.model'.format(config.MODEL_PREFIX, config.DATASET.TASK)))
# get network
model = eval(config.MODULE)(config)
if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()
else:
torch.cuda.set_device(device_ids[0])
model = model.cuda()
checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
smart_load_model_state_dict(model, checkpoint['state_dict'])
# loader
test_loader = make_dataloader(config, mode='test', distributed=False)
test_dataset = test_loader.dataset
test_database = test_dataset.database
# test
q_ids = []
answer_ids = []
attn_weights = []
model.eval()
cur_id = 0
for nbatch, batch in zip(trange(len(test_loader)), test_loader):
# for nbatch, batch in tqdm(enumerate(test_loader)):
bs = test_loader.batch_sampler.batch_size if test_loader.batch_sampler is not None else test_loader.batch_size
q_ids.extend([test_database[id]['question_id'] for id in range(cur_id, min(cur_id + bs, len(test_database)))])
batch = to_cuda(batch)
output = model(*batch)
answer_ids.extend(output['label_logits'].argmax(dim=1).detach().cpu().tolist())
attn_weights.extend(output['attn_weights'].detach().cpu().tolist())
cur_id += bs
result = [{'question_id': q_id, 'answer': test_dataset.answer_vocab[a_id], 'attn_weights': attn} for q_id, a_id, attn in zip(q_ids, answer_ids, attn_weights)]
cfg_name = os.path.splitext(os.path.basename(args.cfg))[0]
result_json_path = os.path.join(save_path, '{}_okvqa_{}.json'.format(cfg_name if save_name is None else save_name,
config.DATASET.TEST_IMAGE_SET))
with open(result_json_path, 'w') as f:
json.dump(result, f)
print('result json saved to {}.'.format(result_json_path))
return result_json_path, save_path
| 3,523 | 40.458824 | 162 | py |
VLC-BERT | VLC-BERT-master/okvqa/function/train.py | import os
import pprint
import shutil
import inspect
from tensorboardX import SummaryWriter
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.distributed as distributed
from torch.nn.parallel import DistributedDataParallel as DDP
from common.utils.create_logger import create_logger
from common.utils.misc import summary_parameters, bn_fp16_half_eval
from common.utils.load import smart_resume, smart_partial_load_model_state_dict
from common.trainer import train
from common.metrics.composite_eval_metric import CompositeEvalMetric
from common.metrics import vqa_metrics
from common.callbacks.batch_end_callbacks.speedometer import Speedometer
from common.callbacks.epoch_end_callbacks.validation_monitor import ValidationMonitor
from common.callbacks.epoch_end_callbacks.checkpoint import Checkpoint
from common.lr_scheduler import WarmupMultiStepLR
from common.nlp.bert.optimization import AdamW, WarmupLinearSchedule
from okvqa.data.build import make_dataloader, build_dataset, build_transforms
from okvqa.modules import *
from okvqa.function.val import do_validation
try:
from apex import amp
from apex.parallel import DistributedDataParallel as Apex_DDP
except ImportError:
pass
#raise ImportError("Please install apex from https://www.github.com/nvidia/apex if you want to use fp16.")
def train_net(args, config):
# setup logger
logger, final_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(final_output_path, config.MODEL_PREFIX)
if args.log_dir is None:
args.log_dir = os.path.join(final_output_path, 'tensorboard_logs')
pprint.pprint(args)
logger.info('training args:{}\n'.format(args))
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
# manually set random seed
if config.RNG_SEED > -1:
np.random.seed(config.RNG_SEED)
torch.random.manual_seed(config.RNG_SEED)
torch.cuda.manual_seed_all(config.RNG_SEED)
# cudnn
torch.backends.cudnn.benchmark = False
if args.cudnn_off:
torch.backends.cudnn.enabled = False
if args.dist:
model = eval(config.MODULE)(config)
local_rank = int(os.environ.get('LOCAL_RANK') or 0)
config.GPUS = str(local_rank)
torch.cuda.set_device(local_rank)
master_address = os.environ['MASTER_ADDR']
master_port = int(os.environ['MASTER_PORT'] or 23456)
world_size = int(os.environ['WORLD_SIZE'] or 1)
rank = int(os.environ['RANK'] or 0)
if args.slurm:
distributed.init_process_group(backend='nccl')
else:
distributed.init_process_group(
backend='nccl',
init_method='tcp://{}:{}'.format(master_address, master_port),
world_size=world_size,
rank=rank,
group_name='mtorch')
print(f'native distributed, size: {world_size}, rank: {rank}, local rank: {local_rank}')
torch.cuda.set_device(local_rank)
config.GPUS = str(local_rank)
model = model.cuda()
if not config.TRAIN.FP16:
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
if rank == 0:
summary_parameters(model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model,
logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
writer = None
if args.log_dir is not None:
tb_log_dir = os.path.join(args.log_dir, 'rank{}'.format(rank))
if not os.path.exists(tb_log_dir):
os.makedirs(tb_log_dir)
writer = SummaryWriter(log_dir=tb_log_dir)
train_loader, train_sampler = make_dataloader(config,
mode='train',
distributed=True,
num_replicas=world_size,
rank=rank,
expose_sampler=True)
val_loader = make_dataloader(config,
mode='val',
distributed=True,
num_replicas=world_size,
rank=rank)
batch_size = world_size * (sum(config.TRAIN.BATCH_IMAGES)
if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
total_gpus = world_size
else:
#os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
model = eval(config.MODULE)(config)
summary_parameters(model, logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
num_gpus = len(config.GPUS.split(','))
assert num_gpus <= 1 or (not config.TRAIN.FP16), "Not support fp16 with torch.nn.DataParallel. " \
"Please use amp.parallel.DistributedDataParallel instead."
total_gpus = num_gpus
rank = None
writer = SummaryWriter(log_dir=args.log_dir) if args.log_dir is not None else None
# model
if num_gpus > 1:
model = torch.nn.DataParallel(model, device_ids=[int(d) for d in config.GPUS.split(',')]).cuda()
else:
torch.cuda.set_device(int(config.GPUS))
model.cuda()
# loader
train_loader = make_dataloader(config, mode='train', distributed=False)
val_loader = make_dataloader(config, mode='val', distributed=False)
train_sampler = None
batch_size = num_gpus * (sum(config.TRAIN.BATCH_IMAGES) if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
# partial load pretrain state dict
if config.NETWORK.PARTIAL_PRETRAIN != "":
pretrain_state_dict = torch.load(config.NETWORK.PARTIAL_PRETRAIN, map_location=lambda storage, loc: storage)['state_dict']
prefix_change = [prefix_change.split('->') for prefix_change in config.NETWORK.PARTIAL_PRETRAIN_PREFIX_CHANGES]
if len(prefix_change) > 0:
pretrain_state_dict_parsed = {}
for k, v in pretrain_state_dict.items():
no_match = True
for pretrain_prefix, new_prefix in prefix_change:
if k.startswith(pretrain_prefix):
k = new_prefix + k[len(pretrain_prefix):]
pretrain_state_dict_parsed[k] = v
no_match = False
break
if no_match:
pretrain_state_dict_parsed[k] = v
pretrain_state_dict = pretrain_state_dict_parsed
smart_partial_load_model_state_dict(model, pretrain_state_dict, vocab_size=config.NETWORK.VLBERT.type_vocab_size)
# pretrained classifier
if config.NETWORK.CLASSIFIER_PRETRAINED:
print('Initializing classifier weight from pretrained word embeddings...')
answers_word_embed = []
for k, v in model.state_dict().items():
if 'word_embeddings.weight' in k:
word_embeddings = v.detach().clone()
break
for answer in train_loader.dataset.answer_vocab:
a_tokens = train_loader.dataset.tokenizer.tokenize(answer)
a_ids = train_loader.dataset.tokenizer.convert_tokens_to_ids(a_tokens)
a_word_embed = (torch.stack([word_embeddings[a_id] for a_id in a_ids], dim=0)).mean(dim=0)
answers_word_embed.append(a_word_embed)
answers_word_embed_tensor = torch.stack(answers_word_embed, dim=0)
for name, module in model.named_modules():
if name.endswith('final_mlp'):
module[-1].weight.data = answers_word_embed_tensor.to(device=module[-1].weight.data.device)
# metrics
train_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist,
num_replicas=world_size if args.dist else 1)]
val_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist,
num_replicas=world_size if args.dist else 1)]
for output_name, display_name in config.TRAIN.LOSS_LOGGERS:
train_metrics_list.append(
vqa_metrics.LossLogger(output_name, display_name=display_name, allreduce=args.dist,
num_replicas=world_size if args.dist else 1))
train_metrics = CompositeEvalMetric()
val_metrics = CompositeEvalMetric()
for child_metric in train_metrics_list:
train_metrics.add(child_metric)
for child_metric in val_metrics_list:
val_metrics.add(child_metric)
# epoch end callbacks
epoch_end_callbacks = []
if (rank is None) or (rank == 0):
epoch_end_callbacks = [Checkpoint(model_prefix, config.CHECKPOINT_FREQUENT)]
validation_monitor = ValidationMonitor(do_validation, val_loader, val_metrics,
host_metric_name='SoftAcc',
label_index_in_batch=config.DATASET.LABEL_INDEX_IN_BATCH)
# optimizer initial lr before
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
# resume/auto-resume
if rank is None or rank == 0:
smart_resume(model, optimizer, validation_monitor, config, model_prefix, logger)
if args.dist:
begin_epoch = torch.tensor(config.TRAIN.BEGIN_EPOCH).cuda()
distributed.broadcast(begin_epoch, src=0)
config.TRAIN.BEGIN_EPOCH = begin_epoch.item()
# batch end callbacks
batch_size = len(config.GPUS.split(',')) * config.TRAIN.BATCH_IMAGES
batch_end_callbacks = [Speedometer(batch_size, config.LOG_FREQUENT,
batches_per_epoch=len(train_loader),
epochs=config.TRAIN.END_EPOCH - config.TRAIN.BEGIN_EPOCH)]
# setup lr step and lr scheduler
if config.TRAIN.LR_SCHEDULE == 'plateau':
print("Warning: not support resuming on plateau lr schedule!")
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='max',
factor=config.TRAIN.LR_FACTOR,
patience=1,
verbose=True,
threshold=1e-4,
threshold_mode='rel',
cooldown=2,
min_lr=0,
eps=1e-8)
elif config.TRAIN.LR_SCHEDULE == 'triangle':
lr_scheduler = WarmupLinearSchedule(optimizer,
config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
t_total=int(config.TRAIN.END_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS),
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
elif config.TRAIN.LR_SCHEDULE == 'step':
lr_iters = [int(epoch * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) for epoch in config.TRAIN.LR_STEP]
lr_scheduler = WarmupMultiStepLR(optimizer, milestones=lr_iters, gamma=config.TRAIN.LR_FACTOR,
warmup_factor=config.TRAIN.WARMUP_FACTOR,
warmup_iters=config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
warmup_method=config.TRAIN.WARMUP_METHOD,
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
else:
raise ValueError("Not support lr schedule: {}.".format(config.TRAIN.LR_SCHEDULE))
# broadcast parameter and optimizer state from rank 0 before training start
if args.dist:
for v in model.state_dict().values():
distributed.broadcast(v, src=0)
# for v in optimizer.state_dict().values():
# distributed.broadcast(v, src=0)
best_epoch = torch.tensor(validation_monitor.best_epoch).cuda()
best_val = torch.tensor(validation_monitor.best_val).cuda()
distributed.broadcast(best_epoch, src=0)
distributed.broadcast(best_val, src=0)
validation_monitor.best_epoch = best_epoch.item()
validation_monitor.best_val = best_val.item()
# apex: amp fp16 mixed-precision training
if config.TRAIN.FP16:
# model.apply(bn_fp16_half_eval)
model, optimizer = amp.initialize(model, optimizer,
opt_level='O2',
keep_batchnorm_fp32=False,
loss_scale=config.TRAIN.FP16_LOSS_SCALE,
min_loss_scale=32.0)
if args.dist:
model = Apex_DDP(model, delay_allreduce=True)
train(model, optimizer, lr_scheduler, train_loader, train_sampler, train_metrics,
config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH, logger,
rank=rank, batch_end_callbacks=batch_end_callbacks, epoch_end_callbacks=epoch_end_callbacks,
writer=writer, validation_monitor=validation_monitor, fp16=config.TRAIN.FP16,
clip_grad_norm=config.TRAIN.CLIP_GRAD_NORM,
gradient_accumulate_steps=config.TRAIN.GRAD_ACCUMULATE_STEPS)
return rank, model
| 17,597 | 51.219585 | 147 | py |
VLC-BERT | VLC-BERT-master/okvqa/modules/resnet_vlbert_for_okvqa.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from external.pytorch_pretrained_bert.modeling import BertPredictionHeadTransform
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_bert import VisualLinguisticBert
BERT_WEIGHTS_NAME = 'pytorch_model.bin'
class ResNetVLBERT(Module):
def __init__(self, config):
super(ResNetVLBERT, self).__init__(config)
self.enable_cnn_reg_loss = config.NETWORK.ENABLE_CNN_REG_LOSS
if not config.NETWORK.BLIND:
self.image_feature_extractor = FastRCNN(config,
average_pool=True,
final_dim=config.NETWORK.IMAGE_FINAL_DIM,
enable_cnn_reg_loss=self.enable_cnn_reg_loss)
if config.NETWORK.VLBERT.object_word_embed_mode == 1:
self.object_linguistic_embeddings = nn.Embedding(81, config.NETWORK.VLBERT.hidden_size)
elif config.NETWORK.VLBERT.object_word_embed_mode == 2:
self.object_linguistic_embeddings = nn.Embedding(1, config.NETWORK.VLBERT.hidden_size)
elif config.NETWORK.VLBERT.object_word_embed_mode == 3:
self.object_linguistic_embeddings = None
else:
raise NotImplementedError
self.image_feature_bn_eval = config.NETWORK.IMAGE_FROZEN_BN
self.use_expansions = config.DATASET.COMMONSENSE_EXP_NAME != ''
self.commonsense_exp_name = config.NETWORK.VLBERT.commonsense_emb_type
self.tokenizer = BertTokenizer.from_pretrained(config.NETWORK.BERT_MODEL_NAME)
language_pretrained_model_path = None
if config.NETWORK.BERT_PRETRAINED != '':
language_pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.BERT_PRETRAINED,
config.NETWORK.BERT_PRETRAINED_EPOCH)
elif os.path.isdir(config.NETWORK.BERT_MODEL_NAME):
weight_path = os.path.join(config.NETWORK.BERT_MODEL_NAME, BERT_WEIGHTS_NAME)
if os.path.isfile(weight_path):
language_pretrained_model_path = weight_path
self.language_pretrained_model_path = language_pretrained_model_path
if language_pretrained_model_path is None:
print("Warning: no pretrained language model found, training from scratch!!!")
self.vlbert = VisualLinguisticBert(config.NETWORK.VLBERT,
language_pretrained_model_path=language_pretrained_model_path)
# self.hm_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)
# self.hi_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)
dim = config.NETWORK.VLBERT.hidden_size
if config.NETWORK.CLASSIFIER_TYPE == "2fc":
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(dim, config.NETWORK.CLASSIFIER_HIDDEN_SIZE),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(config.NETWORK.CLASSIFIER_HIDDEN_SIZE, config.DATASET.ANSWER_VOCAB_SIZE),
)
elif config.NETWORK.CLASSIFIER_TYPE == "1fc":
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(dim, config.DATASET.ANSWER_VOCAB_SIZE)
)
elif config.NETWORK.CLASSIFIER_TYPE == 'mlm':
transform = BertPredictionHeadTransform(config.NETWORK.VLBERT)
linear = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.DATASET.ANSWER_VOCAB_SIZE)
self.final_mlp = nn.Sequential(
transform,
nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
linear
)
else:
raise ValueError("Not support classifier type: {}!".format(config.NETWORK.CLASSIFIER_TYPE))
# init weights
self.init_weight()
self.fix_params()
def init_weight(self):
# self.hm_out.weight.data.normal_(mean=0.0, std=0.02)
# self.hm_out.bias.data.zero_()
# self.hi_out.weight.data.normal_(mean=0.0, std=0.02)
# self.hi_out.bias.data.zero_()
self.image_feature_extractor.init_weight()
if self.object_linguistic_embeddings is not None:
self.object_linguistic_embeddings.weight.data.normal_(mean=0.0, std=0.02)
for m in self.final_mlp.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.constant_(m.bias, 0)
if self.config.NETWORK.CLASSIFIER_TYPE == 'mlm':
language_pretrained = torch.load(self.language_pretrained_model_path)
mlm_transform_state_dict = {}
pretrain_keys = []
for k, v in language_pretrained.items():
if k.startswith('cls.predictions.transform.'):
pretrain_keys.append(k)
k_ = k[len('cls.predictions.transform.'):]
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
mlm_transform_state_dict[k_] = v
print("loading pretrained classifier transform keys: {}.".format(pretrain_keys))
self.final_mlp[0].load_state_dict(mlm_transform_state_dict)
def train(self, mode=True):
super(ResNetVLBERT, self).train(mode)
# turn some frozen layers to eval mode
if self.image_feature_bn_eval:
self.image_feature_extractor.bn_eval()
def fix_params(self):
pass
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
span_tags_fixed = torch.clamp(span_tags, min=0) # In case there were masked values here
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def prepare_text_from_qa(self, question, question_tags, question_mask, answer, answer_tags, answer_mask):
batch_size, max_q_len = question.shape
_, max_a_len = answer.shape
max_len = (question_mask.sum(1) + answer_mask.sum(1)).max() + 3
cls_id, sep_id = self.tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]'])
q_end = 1 + question_mask.sum(1, keepdim=True)
a_end = q_end + 1 + answer_mask.sum(1, keepdim=True)
input_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
input_mask = torch.ones((batch_size, max_len), dtype=torch.bool, device=question.device)
input_type_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
text_tags = input_type_ids.new_zeros((batch_size, max_len))
grid_i, grid_j = torch.meshgrid(torch.arange(batch_size, device=question.device),
torch.arange(max_len, device=question.device))
input_mask[grid_j > a_end] = 0
input_type_ids[(grid_j > q_end) & (grid_j <= a_end)] = 1
q_input_mask = (grid_j > 0) & (grid_j < q_end)
a_input_mask = (grid_j > q_end) & (grid_j < a_end)
input_ids[:, 0] = cls_id
input_ids[grid_j == q_end] = sep_id
input_ids[grid_j == a_end] = sep_id
input_ids[q_input_mask] = question[question_mask]
input_ids[a_input_mask] = answer[answer_mask]
text_tags[q_input_mask] = question_tags[question_mask]
text_tags[a_input_mask] = answer_tags[answer_mask]
return input_ids, input_type_ids, text_tags, input_mask, (a_end - 1).squeeze(1)
def prepare_text_from_qea(self, question, question_tags, question_mask, expansions, expansions_tags, expansions_mask, answer, answer_tags, answer_mask):
batch_size, max_q_len = question.shape
_, max_e_len = expansions.shape
_, max_a_len = answer.shape
max_len = (question_mask.sum(1) + expansions_mask.sum(1) + answer_mask.sum(1)).max() + 4
cls_id, sep_id = self.tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]'])
q_end = 1 + question_mask.sum(1, keepdim=True)
e_end = q_end + 1 + expansions_mask.sum(1, keepdim=True)
a_end = e_end + 1 + answer_mask.sum(1, keepdim=True)
# Define a new input sequence
input_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
input_mask = torch.ones((batch_size, max_len), dtype=torch.bool, device=question.device)
input_type_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
text_tags = input_type_ids.new_zeros((batch_size, max_len))
grid_i, grid_j = torch.meshgrid(torch.arange(batch_size, device=question.device),
torch.arange(max_len, device=question.device))
input_mask[grid_j > a_end] = 0
input_type_ids[(grid_j > q_end) & (grid_j <= e_end)] = 3
input_type_ids[(grid_j > e_end) & (grid_j <= a_end)] = 1
q_input_mask = (grid_j > 0) & (grid_j < q_end)
c_input_mask = (grid_j > q_end) & (grid_j < e_end)
a_input_mask = (grid_j > e_end) & (grid_j < a_end)
input_ids[:, 0] = cls_id
input_ids[grid_j == q_end] = sep_id
input_ids[grid_j == e_end] = sep_id
input_ids[grid_j == a_end] = sep_id
input_ids[q_input_mask] = question[question_mask]
input_ids[c_input_mask] = expansions[expansions_mask]
input_ids[a_input_mask] = answer[answer_mask]
text_tags[q_input_mask] = question_tags[question_mask]
text_tags[c_input_mask] = expansions_tags[expansions_mask]
text_tags[a_input_mask] = answer_tags[answer_mask]
#print('Inputs: ', input_ids, input_type_ids, text_tags, input_mask)
return input_ids, input_type_ids, text_tags, input_mask, (a_end - 1).squeeze(1)
def train_forward(self,
image,
boxes,
im_info,
question,
expansions,
commonsense_emb,
label,
):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > - 1.5)
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None)
question_ids = question
question_tags = question.new_zeros(question_ids.shape)
question_mask = (question > 0.5)
expansions_ids = expansions
expansions_tags = expansions.new_zeros(expansions_ids.shape)
expansions_mask = (expansions > 0.5)
answer_ids = question_ids.new_zeros((question_ids.shape[0], 1)).fill_(
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
answer_mask = question_mask.new_zeros(answer_ids.shape).fill_(1)
answer_tags = question_tags.new_zeros(answer_ids.shape)
############################################
# prepare text
if self.use_expansions:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qea(question_ids,
question_tags,
question_mask,
expansions_ids,
expansions_tags,
expansions_mask,
answer_ids,
answer_tags,
answer_mask)
else:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qa(question_ids,
question_tags,
question_mask,
answer_ids,
answer_tags,
answer_mask)
if self.config.NETWORK.NO_GROUNDING:
obj_rep_zeroed = obj_reps['obj_reps'].new_zeros(obj_reps['obj_reps'].shape)
text_tags.zero_()
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_rep_zeroed)
else:
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])
assert self.config.NETWORK.VLBERT.object_word_embed_mode == 2
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
hidden_states, hc, attn_weights = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask,
commonsense_embeddings=commonsense_emb,
output_all_encoded_layers=False,
output_commonsense_attn_weights=True)
_batch_inds = torch.arange(question.shape[0], device=question.device)
hm = hidden_states[_batch_inds, ans_pos]
# hm = F.tanh(self.hm_out(hidden_states[_batch_inds, ans_pos]))
# hi = F.tanh(self.hi_out(hidden_states[_batch_inds, ans_pos + 2]))
###########################################
outputs = {}
# classifier
# logits = self.final_mlp(hc * hm * hi)
# logits = self.final_mlp(hc)
logits = self.final_mlp(hm)
# loss
if self.config.NETWORK.WEAK_ATTN_LOSS:
max_c_len = 0-(self.config.DATASET.MAX_COMMONSENSE_LEN + 1)
attn_label = label[:, max_c_len:]
label = label[:, :max_c_len]
attn_weights = torch.mean(attn_weights, dim=1)
# loss
ans_loss = F.binary_cross_entropy_with_logits(logits, label) * label.size(1)
if self.config.NETWORK.WEAK_ATTN_LOSS:
loss_mask = attn_label.sum(1) > 0
attn_weights = attn_weights[loss_mask, :]
attn_label = attn_label[loss_mask, :]
if attn_label.sum() > 0:
attn_loss = F.binary_cross_entropy_with_logits(attn_weights, attn_label) * attn_label.size(1)
else:
attn_loss = 0
ans_loss = ans_loss + attn_loss
outputs.update({'label_logits': logits,
'label': label,
'ans_loss': ans_loss})
loss = ans_loss.mean()
return outputs, loss
def inference_forward(self,
image,
boxes,
im_info,
question,
expansions,
commonsense_emb):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > - 1.5)
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None)
question_ids = question
question_tags = question.new_zeros(question_ids.shape)
question_mask = (question > 0.5)
expansions_ids = expansions
expansions_tags = expansions.new_zeros(expansions_ids.shape)
expansions_mask = (expansions > 0.5)
answer_ids = question_ids.new_zeros((question_ids.shape[0], 1)).fill_(
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
answer_mask = question_mask.new_zeros(answer_ids.shape).fill_(1)
answer_tags = question_tags.new_zeros(answer_ids.shape)
############################################
# prepare text
if self.use_expansions:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qea(question_ids,
question_tags,
question_mask,
expansions_ids,
expansions_tags,
expansions_mask,
answer_ids,
answer_tags,
answer_mask)
else:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qa(question_ids,
question_tags,
question_mask,
answer_ids,
answer_tags,
answer_mask)
if self.config.NETWORK.NO_GROUNDING:
obj_rep_zeroed = obj_reps['obj_reps'].new_zeros(obj_reps['obj_reps'].shape)
text_tags.zero_()
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_rep_zeroed)
else:
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])
assert self.config.NETWORK.VLBERT.object_word_embed_mode == 2
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
hidden_states, hc, attn_weights = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask,
commonsense_embeddings=commonsense_emb,
output_all_encoded_layers=False,
output_commonsense_attn_weights=True)
_batch_inds = torch.arange(question.shape[0], device=question.device)
hm = hidden_states[_batch_inds, ans_pos]
# hm = F.tanh(self.hm_out(hidden_states[_batch_inds, ans_pos]))
# hi = F.tanh(self.hi_out(hidden_states[_batch_inds, ans_pos + 2]))
###########################################
outputs = {}
# classifier
# logits = self.final_mlp(hc * hm * hi)
# logits = self.final_mlp(hc)
logits = self.final_mlp(hm)
outputs.update({'label_logits': logits, 'attn_weights': attn_weights})
return outputs
| 22,549 | 50.601831 | 156 | py |
VLC-BERT | VLC-BERT-master/okvqa/data/collate_batch.py | import torch
from common.utils.clip_pad import *
class BatchCollator(object):
def __init__(self, dataset, append_ind=False):
self.dataset = dataset
self.test_mode = self.dataset.test_mode
self.data_names = self.dataset.data_names
self.append_ind = append_ind
def __call__(self, batch):
if not isinstance(batch, list):
batch = list(batch)
if batch[0][self.data_names.index('image')] is not None:
max_shape = tuple(max(s) for s in zip(*[data[self.data_names.index('image')].shape for data in batch]))
image_none = False
else:
image_none = True
max_boxes = max([data[self.data_names.index('boxes')].shape[0] for data in batch])
max_question_length = max([len(data[self.data_names.index('question')]) for data in batch])
max_expansions_length = max([len(data[self.data_names.index('expansions')]) for data in batch])
for i, ibatch in enumerate(batch):
out = {}
if image_none:
out['image'] = None
else:
image = ibatch[self.data_names.index('image')]
out['image'] = clip_pad_images(image, max_shape, pad=0)
boxes = ibatch[self.data_names.index('boxes')]
out['boxes'] = clip_pad_boxes(boxes, max_boxes, pad=-2)
question = ibatch[self.data_names.index('question')]
out['question'] = clip_pad_1d(question, max_question_length, pad=0)
expansions = ibatch[self.data_names.index('expansions')]
out['expansions'] = clip_pad_1d(expansions, max_expansions_length, pad=0)
other_names = [data_name for data_name in self.data_names if data_name not in out]
for name in other_names:
out[name] = torch.as_tensor(ibatch[self.data_names.index(name)])
batch[i] = tuple(out[data_name] for data_name in self.data_names)
if self.append_ind:
batch[i] += (torch.tensor(i, dtype=torch.int64),)
out_tuple = ()
for items in zip(*batch):
if items[0] is None:
out_tuple += (None,)
else:
out_tuple += (torch.stack(tuple(items), dim=0), )
return out_tuple
| 2,295 | 37.266667 | 115 | py |
VLC-BERT | VLC-BERT-master/okvqa/data/build.py | import torch.utils.data
from .datasets import *
from . import samplers
from .transforms.build import build_transforms
from .collate_batch import BatchCollator
import pprint
DATASET_CATALOGS = {'okvqa': OKVQA}
def build_dataset(dataset_name, *args, **kwargs):
assert dataset_name in DATASET_CATALOGS, "dataset not in catalogs"
return DATASET_CATALOGS[dataset_name](*args, **kwargs)
def make_data_sampler(dataset, shuffle, distributed, num_replicas, rank):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle, num_replicas=num_replicas, rank=rank)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def make_batch_data_sampler(dataset, sampler, aspect_grouping, batch_size):
if aspect_grouping:
group_ids = dataset.group_ids
batch_sampler = samplers.GroupedBatchSampler(
sampler, group_ids, batch_size, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last=False
)
return batch_sampler
def make_dataloader(cfg, dataset=None, mode='train', distributed=False, num_replicas=None, rank=None,
expose_sampler=False):
assert mode in ['train', 'val', 'test']
if mode == 'train':
ann_file = cfg.DATASET.TRAIN_ANNOTATION_FILE
image_set = cfg.DATASET.TRAIN_IMAGE_SET
aspect_grouping = cfg.TRAIN.ASPECT_GROUPING
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TRAIN.BATCH_IMAGES * num_gpu
shuffle = cfg.TRAIN.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
elif mode == 'val':
ann_file = cfg.DATASET.VAL_ANNOTATION_FILE
image_set = cfg.DATASET.VAL_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.VAL.BATCH_IMAGES * num_gpu
shuffle = cfg.VAL.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
else:
ann_file = cfg.DATASET.TEST_ANNOTATION_FILE
image_set = cfg.DATASET.TEST_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TEST.BATCH_IMAGES * num_gpu
shuffle = cfg.TEST.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
transform = build_transforms(cfg, mode)
if dataset is None:
dataset = build_dataset(dataset_name=cfg.DATASET.DATASET, ann_file=ann_file, image_set=image_set,
use_imdb=cfg.DATASET.USE_IMDB,
with_precomputed_visual_feat=cfg.NETWORK.IMAGE_FEAT_PRECOMPUTED,
boxes=cfg.DATASET.BOXES,
answer_vocab_file=cfg.DATASET.ANSWER_VOCAB_FILE,
root_path=cfg.DATASET.ROOT_PATH, data_path=cfg.DATASET.DATASET_PATH,
test_mode=(mode == 'test'), transform=transform,
zip_mode=cfg.DATASET.ZIP_MODE, cache_mode=cfg.DATASET.CACHE_MODE,
cache_db=True if (rank is None or rank == 0) else False,
ignore_db_cache=cfg.DATASET.IGNORE_DB_CACHE,
add_image_as_a_box=cfg.DATASET.ADD_IMAGE_AS_A_BOX,
aspect_grouping=aspect_grouping,
mask_size=(cfg.DATASET.MASK_SIZE, cfg.DATASET.MASK_SIZE),
pretrained_model_name=cfg.NETWORK.BERT_MODEL_NAME,
use_sbert = cfg.DATASET.USE_SBERT,
commonsense_exp_name = cfg.DATASET.COMMONSENSE_EXP_NAME,
max_commonsense_len = cfg.DATASET.MAX_COMMONSENSE_LEN,
commonsense_emb_type = cfg.NETWORK.VLBERT.commonsense_emb_type,
learn_attn=cfg.NETWORK.WEAK_ATTN_LOSS)
sampler = make_data_sampler(dataset, shuffle, distributed, num_replicas, rank)
batch_sampler = make_batch_data_sampler(dataset, sampler, aspect_grouping, batch_size)
collator = BatchCollator(dataset=dataset, append_ind=cfg.DATASET.APPEND_INDEX)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=False,
collate_fn=collator)
if expose_sampler:
return dataloader, sampler
return dataloader
| 4,750 | 44.247619 | 106 | py |
VLC-BERT | VLC-BERT-master/okvqa/data/datasets/okvqa.py | import os
import json
import _pickle as cPickle
from PIL import Image
import re
import base64
import numpy as np
import csv
import sys
import time
import logging
import pickle5 as pickle
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipreader import ZipReader
from common.utils.create_logger import makedirsExist
from pycocotools.coco import COCO
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
class OKVQA(Dataset):
def __init__(self, image_set, root_path, data_path, answer_vocab_file, use_imdb=True,
with_precomputed_visual_feat=False, boxes="36",
transform=None, test_mode=False,
zip_mode=False, cache_mode=False, cache_db=True, ignore_db_cache=True,
tokenizer=None, pretrained_model_name=None,
add_image_as_a_box=False, mask_size=(14, 14),
aspect_grouping=False, use_sbert=False, commonsense_exp_name='', max_commonsense_len=5,
commonsense_emb_type='', learn_attn=False, **kwargs):
"""
Visual Question Answering Dataset
:param image_set: image folder name
:param root_path: root path to cache database loaded from annotation file
:param data_path: path to vcr dataset
:param transform: transform
:param test_mode: test mode means no labels available
:param zip_mode: reading images and metadata in zip archive
:param cache_mode: cache whole dataset to RAM first, then __getitem__ read them from RAM
:param ignore_db_cache: ignore previous cached database, reload it from annotation file
:param tokenizer: default is BertTokenizer from pytorch_pretrained_bert
:param add_image_as_a_box: add whole image as a box
:param mask_size: size of instance mask of each object
:param aspect_grouping: whether to group images via their aspect
:param kwargs:
"""
super(OKVQA, self).__init__()
assert not cache_mode, 'currently not support cache mode!'
okvqa_question = {
"train2014": "okvqa/OpenEnded_mscoco_train2014_questions.json",
"trainval2014": "okvqa/OpenEnded_mscoco_trainval2014_questions.json",
"val2014": "okvqa/OpenEnded_mscoco_val2014_questions.json",
}
okvqa_annot = {
"train2014": "okvqa/mscoco_train2014_annotations.json",
"trainval2014": "okvqa/mscoco_train2014_annotations.json",
"val2014": "okvqa/mscoco_val2014_annotations.json",
}
if boxes == "36":
precomputed_boxes = {
'train2014': ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
'trainval2014': ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
'val2014': ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
}
elif boxes == "10-100ada":
precomputed_boxes = {
'train2014': ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
'trainval2014': ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
'val2014': ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
}
else:
raise ValueError("Not support boxes: {}!".format(boxes))
coco_dataset = {
"train2014": ("train2014", "annotations/instances_train2014.json"),
"trainval2014": ("train2014", "annotations/instances_train2014.json"),
"val2014": ("val2014", "annotations/instances_val2014.json"),
}
commonsense_path = "data/coco/okvqa/commonsense/"
self.experiment_name = commonsense_exp_name
self.use_sbert = use_sbert
self.max_commonsense_len = max_commonsense_len
self.commonsense_emb_type = commonsense_emb_type
self.learn_attn = learn_attn
if self.experiment_name == 'semqo':
okvqa_expansions = {
'train2014': commonsense_path+'expansions/semq.o_okvqa_train.json',
'trainval2014': commonsense_path+'expansions/semq.o_okvqa_train.json',
'val2014': commonsense_path+'expansions/semq.o_okvqa_val.json',
}
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(\,)(\d)")
self.punct = [';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
print("Loading OK-VQA dataset: ", image_set)
self.boxes = boxes
self.test_mode = test_mode
self.with_precomputed_visual_feat = with_precomputed_visual_feat
self.data_path = data_path
self.root_path = root_path
with open(answer_vocab_file, 'r', encoding='utf8') as f:
self.answer_vocab = [w.lower().strip().strip('\r').strip('\n').strip('\r') for w in f.readlines()]
self.answer_vocab = list(filter(lambda x: x != '', self.answer_vocab))
self.answer_vocab = [self.processPunctuation(w) for w in self.answer_vocab]
self.image_sets = [iset.strip() for iset in image_set.split('+')]
self.ann_files = [os.path.join(data_path, okvqa_annot[iset]) for iset in self.image_sets] \
if not self.test_mode else [None for iset in self.image_sets]
self.q_files = [os.path.join(data_path, okvqa_question[iset]) for iset in self.image_sets]
self.expansion_files = [okvqa_expansions[iset] for iset in self.image_sets] \
if (self.experiment_name != '') else [None for iset in self.image_sets]
self.precomputed_box_files = [
os.path.join(data_path, precomputed_boxes[iset][0],
'{0}.zip@/{0}'.format(precomputed_boxes[iset][1])
if zip_mode else precomputed_boxes[iset][1])
for iset in self.image_sets]
self.box_bank = {}
self.coco_datasets = [(os.path.join(data_path,
coco_dataset[iset][0],
'COCO_{}_{{:012d}}.jpg'.format(coco_dataset[iset][0]))
if not zip_mode else
os.path.join(data_path,
coco_dataset[iset][0] + '.zip@/' + coco_dataset[iset][0],
'COCO_{}_{{:012d}}.jpg'.format(coco_dataset[iset][0])),
os.path.join(data_path, coco_dataset[iset][1]))
for iset in self.image_sets]
self.transform = transform
self.zip_mode = zip_mode
self.cache_mode = cache_mode
self.cache_db = cache_db
self.ignore_db_cache = ignore_db_cache
self.aspect_grouping = aspect_grouping
self.cache_dir = os.path.join(root_path, 'cache')
self.add_image_as_a_box = add_image_as_a_box
self.mask_size = mask_size
if not os.path.exists(self.cache_dir):
makedirsExist(self.cache_dir)
self.tokenizer = tokenizer if tokenizer is not None \
else BertTokenizer.from_pretrained(
'bert-base-uncased' if pretrained_model_name is None else pretrained_model_name,
cache_dir=self.cache_dir)
if zip_mode:
self.zipreader = ZipReader()
self.database = self.load_annotations()
if self.aspect_grouping:
self.group_ids = self.group_aspect(self.database)
self.attn_gt = None
if self.learn_attn and not self.test_mode:
self.attn_gt = self._load_json('data/coco/okvqa/'+self.experiment_name+'_okvqa_train_attn_annot_'+str(self.max_commonsense_len)+'.json')
@property
def data_names(self):
if self.test_mode:
return ['image', 'boxes', 'im_info', 'question', 'expansions', 'c_emb']
else:
return ['image', 'boxes', 'im_info', 'question', 'expansions', 'c_emb', 'label']
def __getitem__(self, index):
idb = self.database[index]
# image, boxes, im_info
boxes_data = self._load_json(idb['box_fn'])
if self.with_precomputed_visual_feat:
image = None
w0, h0 = idb['width'], idb['height']
boxes_features = torch.tensor(
np.frombuffer(self.b64_decode(boxes_data['features']), dtype=np.float32).reshape((boxes_data['num_boxes'], -1))
)
else:
image = self._load_image(idb['image_fn'])
w0, h0 = image.size
boxes = torch.tensor(
np.frombuffer(self.b64_decode(boxes_data['boxes']), dtype=np.float32).reshape(
(boxes_data['num_boxes'], -1))
)
if self.add_image_as_a_box:
image_box = torch.as_tensor([[0.0, 0.0, w0 - 1, h0 - 1]])
boxes = torch.cat((image_box, boxes), dim=0)
if self.with_precomputed_visual_feat:
if 'image_box_feature' in boxes_data:
image_box_feature = torch.as_tensor(
np.frombuffer(
self.b64_decode(boxes_data['image_box_feature']), dtype=np.float32
).reshape((1, -1))
)
else:
image_box_feature = boxes_features.mean(0, keepdim=True)
boxes_features = torch.cat((image_box_feature, boxes_features), dim=0)
im_info = torch.tensor([w0, h0, 1.0, 1.0])
flipped = False
if self.transform is not None:
image, boxes, _, im_info, flipped = self.transform(image, boxes, None, im_info, flipped)
# clamp boxes
w = im_info[0].item()
h = im_info[1].item()
boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=w - 1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=h - 1)
# flip: 'left' -> 'right', 'right' -> 'left'
q_tokens = self.tokenizer.tokenize(idb['question'])
if flipped:
q_tokens = self.flip_tokens(q_tokens, verbose=False)
if not self.test_mode:
answers = idb['answers']
if flipped:
answers_tokens = [a.split(' ') for a in answers]
answers_tokens = [self.flip_tokens(a_toks, verbose=False) for a_toks in answers_tokens]
answers = [' '.join(a_toks) for a_toks in answers_tokens]
label = self.get_soft_target(answers)
# question
q_retokens = q_tokens
q_ids = self.tokenizer.convert_tokens_to_ids(q_retokens)
# commonsense
exp_ids = []
commonsense_embeddings = torch.tensor([0])
if self.experiment_name != '':
# If we use SBERT, add [MASK] tokens to exp_ids, and load the embeddings in commonsense_embeddings
if self.use_sbert:
if self.commonsense_emb_type == 'fusion':
commonsense_embeddings = self.get_cached_expansion_emb(idb['image_fn'].split('/')[-1], idb['question_id'], custom_tag='_ques')
else:
commonsense_embeddings = self.get_cached_expansion_emb(idb['image_fn'].split('/')[-1], idb['question_id'])
if self.commonsense_emb_type == 'fusion':
m_tokens = ['[MASK]']
else:
m_tokens = ['[MASK]']*self.max_commonsense_len
m_ids = self.tokenizer.convert_tokens_to_ids(m_tokens)
exp_ids += m_ids
# If not SBERT, clean the picked expansions and add them to exp_ids
else:
picked_exp = idb['picked_exp']
if isinstance(picked_exp, list):
picked_exp = picked_exp[0]
picked_exp = picked_exp.split('.')
picked_exp = [sentence.strip() for sentence in picked_exp]
picked_exp = [sentence+'.' for sentence in picked_exp if sentence != '']
if len(picked_exp) >= self.max_commonsense_len:
picked_exp = picked_exp[:self.max_commonsense_len]
else:
picked_exp = picked_exp + [''] * (self.max_commonsense_len - len(picked_exp))
picked_exp = ' '.join(picked_exp)
picked_exp_tokens = self.tokenizer.tokenize(picked_exp)
exp_ids += self.tokenizer.convert_tokens_to_ids(picked_exp_tokens)
# concat box feature to box
if self.with_precomputed_visual_feat:
boxes = torch.cat((boxes, boxes_features), dim=-1)
if self.attn_gt is not None:
if str(idb['image_id']) in self.attn_gt and str(idb['question_id']) in self.attn_gt[str(idb['image_id'])]:
attn_weight_label = torch.tensor(self.attn_gt[str(idb['image_id'])][str(idb['question_id'])])
else:
attn_weight_label = torch.zeros(self.max_commonsense_len+1)
label = torch.cat((label, attn_weight_label), dim=0)
if self.test_mode:
return image, boxes, im_info, q_ids, exp_ids, commonsense_embeddings
else:
return image, boxes, im_info, q_ids, exp_ids, commonsense_embeddings, label
@staticmethod
def flip_tokens(tokens, verbose=True):
changed = False
tokens_new = [tok for tok in tokens]
for i, tok in enumerate(tokens):
if tok == 'left':
tokens_new[i] = 'right'
changed = True
elif tok == 'right':
tokens_new[i] = 'left'
changed = True
if verbose and changed:
logging.info('[Tokens Flip] {} -> {}'.format(tokens, tokens_new))
return tokens_new
@staticmethod
def b64_decode(string):
return base64.decodebytes(string.encode())
def answer_to_ind(self, answer):
if answer in self.answer_vocab:
return self.answer_vocab.index(answer)
else:
return self.answer_vocab.index('<unk>')
def get_soft_target(self, answers):
soft_target = torch.zeros(len(self.answer_vocab), dtype=torch.float)
answer_indices = [self.answer_to_ind(answer) for answer in answers]
gt_answers = list(enumerate(answer_indices))
unique_answers = set(answer_indices)
for answer in unique_answers:
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [item for item in other_answers if item[1] == answer]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
avg_acc = sum(accs) / len(accs)
if answer != self.answer_vocab.index('<unk>'):
soft_target[answer] = avg_acc
return soft_target
def processPunctuation(self, inText):
if inText == '<unk>':
return inText
outText = inText
for p in self.punct:
if (p + ' ' in inText or ' ' + p in inText) or (re.search(self.commaStrip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = self.periodStrip.sub("",
outText,
re.UNICODE)
return outText
def load_annotations(self):
tic = time.time()
database = []
db_cache_name = 'okvqa_boxes{}_{}'.format(self.boxes, '+'.join(self.image_sets))
if self.with_precomputed_visual_feat:
db_cache_name += 'visualprecomp'
if self.zip_mode:
db_cache_name = db_cache_name + '_zipmode'
if self.test_mode:
db_cache_name = db_cache_name + '_testmode'
if self.experiment_name != '':
db_cache_name = db_cache_name + '_' + self.experiment_name
db_cache_root = os.path.join(self.root_path, 'cache')
db_cache_path = os.path.join(db_cache_root, '{}.pkl'.format(db_cache_name))
if os.path.exists(db_cache_path):
if not self.ignore_db_cache:
# reading cached database
print('cached database found in {}.'.format(db_cache_path))
with open(db_cache_path, 'rb') as f:
print('loading cached database from {}...'.format(db_cache_path))
tic = time.time()
database = cPickle.load(f)
print('Done (t={:.2f}s)'.format(time.time() - tic))
return database
else:
print('cached database ignored.')
# ignore or not find cached database, reload it from annotation file
print('loading database of split {}...'.format('+'.join(self.image_sets)))
tic = time.time()
for ann_file, q_file, expansion_file, (coco_path, coco_annot), box_file \
in zip(self.ann_files, self.q_files, self.expansion_files, self.coco_datasets, self.precomputed_box_files):
qs = self._load_json(q_file)['questions']
expansion_data = self._load_json(expansion_file)
anns = self._load_json(ann_file)['annotations'] if not self.test_mode else ([None] * len(qs))
coco = COCO(coco_annot)
for ann, q in zip(anns, qs):
idb = {'image_id': q['image_id'],
'image_fn': coco_path.format(q['image_id']),
'width': coco.imgs[q['image_id']]['width'],
'height': coco.imgs[q['image_id']]['height'],
'box_fn': os.path.join(box_file, '{}.json'.format(q['image_id'])),
'question_id': q['question_id'],
'question': q['question'],
"picked_exp": expansion_data[str(coco_path.format(q['image_id']).split('/')[-1])][str(q['question_id'])] if (self.experiment_name != '') else None,
'answers': [a['answer'] for a in ann['answers']] if not self.test_mode else None,
"question_type": ann['question_type'] if not self.test_mode else None,
"answer_type": ann['answer_type'] if not self.test_mode else None,
}
database.append(idb)
print('Done (t={:.2f}s)'.format(time.time() - tic))
# cache database via cPickle
if self.cache_db:
print('caching database to {}...'.format(db_cache_path))
tic = time.time()
if not os.path.exists(db_cache_root):
makedirsExist(db_cache_root)
with open(db_cache_path, 'wb') as f:
cPickle.dump(database, f)
print('Done (t={:.2f}s)'.format(time.time() - tic))
return database
@staticmethod
def group_aspect(database):
print('grouping aspect...')
t = time.time()
# get shape of all images
widths = torch.as_tensor([idb['width'] for idb in database])
heights = torch.as_tensor([idb['height'] for idb in database])
# group
group_ids = torch.zeros(len(database))
horz = widths >= heights
vert = 1 - horz
group_ids[horz] = 0
group_ids[vert] = 1
print('Done (t={:.2f}s)'.format(time.time() - t))
return group_ids
def load_precomputed_boxes(self, box_file):
if box_file in self.box_bank:
return self.box_bank[box_file]
else:
in_data = {}
with open(box_file, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['image_h'] = int(item['image_h'])
item['image_w'] = int(item['image_w'])
item['num_boxes'] = int(item['num_boxes'])
for field in (['boxes', 'features'] if self.with_precomputed_visual_feat else ['boxes']):
item[field] = np.frombuffer(base64.decodebytes(item[field].encode()),
dtype=np.float32).reshape((item['num_boxes'], -1))
in_data[item['image_id']] = item
self.box_bank[box_file] = in_data
return in_data
def get_cached_expansion_emb(self, image_id, question_id, custom_tag=''):
subsets = []
if self.image_sets[0] == 'train2014':
subsets.append('train2014')
if self.image_sets[0] == 'val2014':
subsets.append('val2014')
if self.image_sets[0] == 'trainval2014':
subsets.append('train2014')
commonsense_embeddings = None
for subset in subsets:
savepath = 'data/coco/sbert/okvqa/'+self.experiment_name+'/'+str(self.max_commonsense_len)+custom_tag+'/'+subset
image_id = str(image_id)
question_id = str(question_id)
if not os.path.exists(savepath+'/'+image_id+'.pkl'):
continue
with open(savepath+'/'+image_id+'.pkl', 'rb') as handle:
unserialized_data = pickle.load(handle)
commonsense_embeddings = torch.tensor(unserialized_data[question_id])
assert commonsense_embeddings is not None, 'No expansion embedding found at {}'.format(savepath+'/'+image_id+'.pkl')
return commonsense_embeddings
def __len__(self):
return len(self.database)
def _load_image(self, path):
if '.zip@' in path:
return self.zipreader.imread(path).convert('RGB')
else:
return Image.open(path).convert('RGB')
def _load_json(self, path):
if path == None:
return None
elif '.zip@' in path:
f = self.zipreader.read(path)
return json.loads(f.decode())
else:
with open(path, 'r') as f:
return json.load(f)
| 22,450 | 42.935421 | 171 | py |
VLC-BERT | VLC-BERT-master/okvqa/data/samplers/grouped_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that elements from the same group should appear in groups of batch_size.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_uneven (bool): If ``True``, the sampler will drop the batches whose
size is less than ``batch_size``
"""
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert self.group_ids.dim() == 1
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
self._can_reuse_batches = False
def _prepare_batches(self):
dataset_size = len(self.group_ids)
# get the sampled indices from the sampler
sampled_ids = torch.as_tensor(list(self.sampler))
# potentially not all elements of the dataset were sampled
# by the sampler (e.g., DistributedSampler).
# construct a tensor which contains -1 if the element was
# not sampled, and a non-negative number indicating the
# order where the element was sampled.
# for example. if sampled_ids = [3, 1] and dataset_size = 5,
# the order is [-1, 1, -1, 0, -1]
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
# get a mask with the elements that were sampled
mask = order >= 0
# find the elements that belong to each individual cluster
clusters = [(self.group_ids == i) & mask for i in self.groups]
# get relative order of the elements inside each cluster
# that follows the order from the sampler
relative_order = [order[cluster] for cluster in clusters]
# with the relative order, find the absolute order in the
# sampled space
permutation_ids = [s[s.sort()[1]] for s in relative_order]
# permute each cluster so that they follow the order from
# the sampler
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
# splits each cluster in batch_size, and merge as a list of tensors
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
# now each batch internally has the right order, but
# they are grouped by clusters. Find the permutation between
# different batches that brings them as close as possible to
# the order that we have in the sampler. For that, we will consider the
# ordering as coming from the first element of each batch, and sort
# correspondingly
first_element_of_batch = [t[0].item() for t in merged]
# get and inverse mapping from sampled indices and the position where
# they occur (as returned by the sampler)
inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())}
# from the first element in each batch, get a relative ordering
first_index_of_batch = torch.as_tensor(
[inv_sampled_ids_map[s] for s in first_element_of_batch]
)
# permute the batches so that they approximately follow the order
# from the sampler
permutation_order = first_index_of_batch.sort(0)[1].tolist()
# finally, permute the batches
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
return batches
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
batches = self._prepare_batches()
self._batches = batches
return iter(batches)
def __len__(self):
if not hasattr(self, "_batches"):
self._batches = self._prepare_batches()
self._can_reuse_batches = True
return len(self._batches)
| 4,846 | 40.42735 | 88 | py |
VLC-BERT | VLC-BERT-master/okvqa/data/samplers/distributed.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch | 2,568 | 37.924242 | 86 | py |
VLC-BERT | VLC-BERT-master/okvqa/data/transforms/transforms.py | import random
import numpy as np
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, boxes, masks, im_info, flipped):
for t in self.transforms:
image, boxes, masks, im_info, flipped = t(image, boxes, masks, im_info, flipped)
return image, boxes, masks, im_info, flipped
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(max_size * min_original_size / max_original_size)
if (w <= h and w == size) or (h <= w and h == size):
return (w, h)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (ow, oh)
def __call__(self, image, boxes, masks, im_info, flipped):
origin_size = im_info[:2]
size = self.get_size(origin_size)
if image is not None:
image = F.resize(image, (size[1], size[0]))
ratios = [size[0] * 1.0 / origin_size[0], size[1] * 1.0 / origin_size[1]]
if boxes is not None:
boxes[:, [0, 2]] *= ratios[0]
boxes[:, [1, 3]] *= ratios[1]
im_info[0], im_info[1] = size
im_info[2], im_info[3] = ratios
return image, boxes, masks, im_info, flipped
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, boxes, masks, im_info, flipped):
if random.random() < self.prob:
w, h = im_info[:2]
if image is not None:
image = F.hflip(image)
if boxes is not None:
boxes[:, [0, 2]] = w - 1 - boxes[:, [2, 0]]
if masks is not None:
masks = torch.as_tensor(masks.numpy()[:, :, ::-1].tolist())
flipped = not flipped
return image, boxes, masks, im_info, flipped
class ToTensor(object):
def __call__(self, image, boxes, masks, im_info, flipped):
return F.to_tensor(image) if image is not None else image, boxes, masks, im_info, flipped
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, boxes, masks, im_info, flipped):
if image is not None:
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, boxes, masks, im_info, flipped
class FixPadding(object):
def __init__(self, min_size, max_size, pad=0):
self.min_size = min_size
self.max_size = max_size
self.pad = pad
def __call__(self, image, boxes, masks, im_info, flipped):
if image is not None:
# padding to fixed size for determinacy
c, h, w = image.shape
if h <= w:
h1 = self.min_size
w1 = self.max_size
else:
h1 = self.max_size
w1 = self.min_size
padded_image = image.new_zeros((c, h1, w1)).fill_(self.pad)
padded_image[:, :h, :w] = image
image = padded_image
return image, boxes, masks, im_info, flipped
| 4,104 | 30.821705 | 97 | py |
HVAE | HVAE-master/setup.py | from distutils.core import setup
from setuptools import dist
dist.Distribution().fetch_build_eggs(['Cython', 'numpy<=1.19'])
import numpy
from Cython.Build import cythonize
required = [
"cython",
"numpy",
"torch",
"editdistance",
"scikit-learn",
"tqdm",
"pymoo"
]
setup(name='HVAE',
version='0.1',
description='Hierarchical Variational Autoencoder',
author='smeznar',
packages=['src'],
setup_requires=["numpy", "Cython"],
ext_modules=cythonize("src/cyfunc.pyx"),
include_dirs=[numpy.get_include()],
install_requires=required)
| 606 | 20.678571 | 63 | py |
HVAE | HVAE-master/src/symbolic_regression.py | import argparse
import json
import random
import time
import numpy as np
import torch
from pymoo.algorithms.soo.nonconvex.ga import GA
from pymoo.optimize import minimize
from pymoo.core.problem import ElementwiseProblem
from pymoo.core.sampling import Sampling
from pymoo.core.crossover import Crossover
from pymoo.core.mutation import Mutation
from pymoo.core.termination import Termination
from pymoo.termination.max_gen import MaximumGenerationTermination
from symbol_library import generate_symbol_library
from model import HVAE
from evaluation import RustEval
def read_eq_data(filename):
train = []
with open(filename, "r") as file:
for line in file:
train.append([float(v) for v in line.strip().split(",")])
return np.array(train)
def eval_vector(l, model, eval_obj):
try:
tree = model.decode(l)
error = eval_obj.get_error(tree.to_list(notation="postfix"))
if error is None:
error = 1e10
# else:
# error = np.sqrt(np.square(np.subtract(eval_obj.data[:, -1], y_hat)).mean())
except:
print("Recursion limit")
return 1e10, "", []
return error, str(tree), []
# return error, str(tree), constants
class SRProblem(ElementwiseProblem):
def __init__(self, model, eval_object, dim):
self.model = model
self.eval_object = eval_object
self.input_mean = torch.zeros(next(model.decoder.parameters()).size(0))
self.best_f = 9e+50
self.best_expr = None
self.models = dict()
super().__init__(n_var=dim, n_obj=1)
def _evaluate(self, x, out, *args, **kwargs):
error, expr, constants = eval_vector(torch.tensor(x[None, None, :]), self.model, self.eval_object)
if expr in self.models:
self.models[expr]["trees"] += 1
else:
constants = [float(c) for c in constants]
self.models[expr] = {"expr": expr, "error": error, "trees": 1, "const": constants}
if error < self.best_f:
self.best_f = error
self.best_expr = self.models[expr]
print(f"New best expression: {expr}, with constants [{','.join([str(c) for c in constants])}]")
out["F"] = error
class TorchNormalSampling(Sampling):
def _do(self, problem, n_samples, **kwargs):
return [torch.normal(problem.input_mean).numpy() for _ in range(n_samples)]
class BestTermination(Termination):
def __init__(self, min_f=1e-10, n_max_gen=500) -> None:
super().__init__()
self.min_f = min_f
self.max_gen = MaximumGenerationTermination(n_max_gen)
def _update(self, algorithm):
if algorithm.problem.best_f < self.min_f:
self.terminate()
return self.max_gen.update(algorithm)
class LICrossover(Crossover):
def __init__(self):
super().__init__(2, 1)
def _do(self, problem, X, **kwargs):
weights = np.random.random(X.shape[1])
return (X[0, :]*weights[:, None] + X[1, :]*(1-weights[:, None]))[None, :, :]
class RandomMutation(Mutation):
def __init__(self):
super().__init__()
def _do(self, problem, X, **kwargs):
new = []
for i in range(X.shape[0]):
eq = problem.model.decode(torch.tensor(X[i, :])[None, None, :])
var = problem.model.encode(eq)[1][0, 0].detach().numpy()
mutation_scale = np.random.random()
std = mutation_scale * (np.exp(var / 2.0) - 1) + 1
new.append(torch.normal(torch.tensor(mutation_scale*X[i]), std=torch.tensor(std)).numpy())
return np.array(new, dtype=np.float32)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='Nguyen benchmark', description='Run a ED benchmark')
parser.add_argument("-dataset", required=True)
parser.add_argument("-baseline", choices=['HVAE_evo'], required=True)
parser.add_argument("-symbols", nargs="+", required=True)
parser.add_argument("-num_vars", default=2, type=int)
parser.add_argument("-has_const", action="store_true")
parser.add_argument("-latent", default=32, type=int)
parser.add_argument("-params", required=True)
parser.add_argument("-success_threshold", default=1e-8)
parser.add_argument("-seed", type=int)
args = parser.parse_args()
# -----------------------------------------------------------------------------------------------------------------
#
# WORK IN PROGRESS, USE SR SCRIPTS FROM ProGED
# (https://github.com/smeznar/ProGED/blob/main/ProGED/examples/ng_bench.py)
# TO EVALUATE THE RESULTS
#
# -----------------------------------------------------------------------------------------------------------------
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
# Read data
train = read_eq_data(args.dataset)
symbols = generate_symbol_library(args.num_vars, args.symbols, args.has_const)
input_dim = len(symbols)
HVAE.add_symbols(symbols)
model = torch.load(args.params)
# fe = FastEval(train, args.num_vars, symbols, has_const=args.has_const)
fe = RustEval(train)
if args.baseline == "HVAE_evo":
ga = GA(pop_size=200, sampling=TorchNormalSampling(), crossover=LICrossover(), mutation=RandomMutation(),
eliminate_duplicates=False)
problem = SRProblem(model, fe, args.latent)
res = minimize(problem, ga, BestTermination(min_f=args.success_threshold), verbose=True)
with open(f"../results/nguyen/{args.dataset.strip().split('/')[-1]}_{time.time()}.json", "w") as file:
json.dump({"best": problem.best_expr, "all": list(problem.models.values())}, file)
# if args.baseline == "HVAE_random":
# fe = FastEval(train, args.num_vars, symbols, has_const=args.has_const)
# generator = GeneratorHVAE(args.params, ["X"], universal_symbols)
# ed = EqDisco(data=train, variable_names=["X", 'Y'], generator=generator, sample_size=100000, verbosity=0)
# ed.generate_models()
# ed.fit_models()
# print(len(ed.models))
# print(ed.get_results())
# ed.write_results(f"results/hvae_random_{args.dimension}/nguyen_{args.eq_num}_{np.random.randint(0, 1000000)}.json")
| 6,382 | 37.920732 | 125 | py |
HVAE | HVAE-master/src/batch_model.py | import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
from tree import Node, BatchedNode
from symbol_library import SymType
class HVAE(nn.Module):
_symbols = None
def __init__(self, input_size, output_size, hidden_size=None):
super(HVAE, self).__init__()
if hidden_size is None:
hidden_size = output_size
self.encoder = Encoder(input_size, hidden_size, output_size)
self.decoder = Decoder(output_size, hidden_size, input_size)
def forward(self, tree):
mu, logvar = self.encoder(tree)
z = self.sample(mu, logvar)
out = self.decoder(z, tree)
return mu, logvar, out
def sample(self, mu, logvar):
eps = Variable(torch.randn(mu.size()))
std = torch.exp(logvar / 2.0)
return mu + eps * std
def encode(self, tree):
mu, logvar = self.encoder(tree)
return mu, logvar
def decode(self, z):
if HVAE._symbols is None:
raise Exception("To generate expression trees, a symbol library is needed. Please add it using the"
" HVAE.add_symbols method.")
return self.decoder.decode(z, HVAE._symbols)
@staticmethod
def add_symbols(symbols):
HVAE._symbols = symbols
Node.add_symbols(symbols)
BatchedNode.add_symbols(symbols)
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.gru = GRU221(input_size=input_size, hidden_size=hidden_size)
self.mu = nn.Linear(in_features=hidden_size, out_features=output_size)
self.logvar = nn.Linear(in_features=hidden_size, out_features=output_size)
torch.nn.init.xavier_uniform_(self.mu.weight)
torch.nn.init.xavier_uniform_(self.logvar.weight)
def forward(self, tree):
# Check if the tree has target vectors
if tree.target is None:
tree.add_target_vectors()
tree_encoding = self.recursive_forward(tree)
mu = self.mu(tree_encoding)
logvar = self.logvar(tree_encoding)
return mu, logvar
def recursive_forward(self, tree):
left = self.recursive_forward(tree.left) if tree.left is not None \
else torch.zeros(tree.target.size(0), 1, self.hidden_size)
right = self.recursive_forward(tree.right) if tree.right is not None \
else torch.zeros(tree.target.size(0), 1, self.hidden_size)
# left = left.mul(tree.mask[:, None, None])
# right = right.mul(tree.mask[:, None, None])
hidden = self.gru(tree.target, left, right)
hidden = hidden.mul(tree.mask[:, None, None])
return hidden
class Decoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Decoder, self).__init__()
self.hidden_size = hidden_size
self.z2h = nn.Linear(input_size, hidden_size)
self.h2o = nn.Linear(hidden_size, output_size)
self.gru = GRU122(input_size=output_size, hidden_size=hidden_size)
torch.nn.init.xavier_uniform_(self.z2h.weight)
torch.nn.init.xavier_uniform_(self.h2o.weight)
# Used during training to guide the learning process
def forward(self, z, tree):
hidden = self.z2h(z)
self.recursive_forward(hidden, tree)
return tree
def recursive_forward(self, hidden, tree):
prediction = self.h2o(hidden)
symbol_probs = F.softmax(prediction, dim=2)
tree.prediction = prediction
if tree.left is not None or tree.right is not None:
left, right = self.gru(symbol_probs, hidden)
if tree.left is not None:
self.recursive_forward(left, tree.left)
if tree.right is not None:
self.recursive_forward(right, tree.right)
# Used for inference to generate expression trees from latent vectorS
def decode(self, z, symbol_dict):
with torch.no_grad():
mask = torch.ones(z.size(0)).bool()
hidden = self.z2h(z)
batch = self.recursive_decode(hidden, symbol_dict, mask)
return batch.to_expr_list()
def recursive_decode(self, hidden, symbol_dict, mask):
prediction = F.softmax(self.h2o(hidden), dim=2)
# Sample symbol in a given node
symbols, left_mask, right_mask = self.sample_symbol(prediction, symbol_dict, mask)
left, right = self.gru(prediction, hidden)
if torch.any(left_mask):
l_tree = self.recursive_decode(left, symbol_dict, left_mask)
else:
l_tree = None
if torch.any(right_mask):
r_tree = self.recursive_decode(right, symbol_dict, right_mask)
else:
r_tree = None
node = BatchedNode()
node.symbols = symbols
node.left = l_tree
node.right = r_tree
return node
def sample_symbol(self, prediction, symbol_dict, mask):
sampled = F.softmax(prediction, dim=2)
# Select the symbol with the highest value ("probability")
symbols = []
left_mask = torch.clone(mask)
right_mask = torch.clone(mask)
for i in range(sampled.size(0)):
if mask[i]:
symbol = symbol_dict[torch.argmax(sampled[i, 0, :])]
symbols.append(symbol["symbol"])
if symbol["type"].value is SymType.Fun.value:
right_mask[i] = False
if symbol["type"].value is SymType.Var.value or symbol["type"].value is SymType.Const.value:
left_mask[i] = False
right_mask[i] = False
else:
symbols.append("")
return symbols, left_mask, right_mask
class GRU221(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRU221, self).__init__()
self.wir = nn.Linear(in_features=input_size, out_features=hidden_size)
self.whr = nn.Linear(in_features=2*hidden_size, out_features=hidden_size)
self.wiz = nn.Linear(in_features=input_size, out_features=hidden_size)
self.whz = nn.Linear(in_features=2 * hidden_size, out_features=hidden_size)
self.win = nn.Linear(in_features=input_size, out_features=hidden_size)
self.whn = nn.Linear(in_features=2 * hidden_size, out_features=hidden_size)
torch.nn.init.xavier_uniform_(self.wir.weight)
torch.nn.init.xavier_uniform_(self.whr.weight)
torch.nn.init.xavier_uniform_(self.wiz.weight)
torch.nn.init.xavier_uniform_(self.whz.weight)
torch.nn.init.xavier_uniform_(self.win.weight)
torch.nn.init.xavier_uniform_(self.whn.weight)
def forward(self, x, h1, h2):
h = torch.cat([h1, h2], dim=2)
r = torch.sigmoid(self.wir(x) + self.whr(h))
z = torch.sigmoid(self.wiz(x) + self.whz(h))
n = torch.tanh(self.win(x) + r * self.whn(h))
return (1 - z) * n + (z / 2) * h1 + (z / 2) * h2
class GRU122(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRU122, self).__init__()
self.hidden_size = hidden_size
self.wir = nn.Linear(in_features=input_size, out_features=2*hidden_size)
self.whr = nn.Linear(in_features=hidden_size, out_features=2*hidden_size)
self.wiz = nn.Linear(in_features=input_size, out_features=2*hidden_size)
self.whz = nn.Linear(in_features=hidden_size, out_features=2*hidden_size)
self.win = nn.Linear(in_features=input_size, out_features=2*hidden_size)
self.whn = nn.Linear(in_features=hidden_size, out_features=2*hidden_size)
torch.nn.init.xavier_uniform_(self.wir.weight)
torch.nn.init.xavier_uniform_(self.whr.weight)
torch.nn.init.xavier_uniform_(self.wiz.weight)
torch.nn.init.xavier_uniform_(self.whz.weight)
torch.nn.init.xavier_uniform_(self.win.weight)
torch.nn.init.xavier_uniform_(self.whn.weight)
def forward(self, x, h):
r = torch.sigmoid(self.wir(x) + self.whr(h))
z = torch.sigmoid(self.wiz(x) + self.whz(h))
n = torch.tanh(self.win(x) + r * self.whn(h))
dh = h.repeat(1, 1, 2)
out = (1 - z) * n + z * dh
return torch.split(out, self.hidden_size, dim=2)
| 8,343 | 38.545024 | 111 | py |
HVAE | HVAE-master/src/batch_train.py | from argparse import ArgumentParser
import numpy as np
import torch
from torch.utils.data import Sampler, Dataset, DataLoader
from tqdm import tqdm
# from utils import tokens_to_tree, read_expressions
from utils import read_expressions_json
from batch_model import HVAE
from symbol_library import generate_symbol_library
from tree import BatchedNode
def collate_fn(batch):
return batch
class TreeSampler(Sampler):
def __init__(self, batch_size, num_eq):
self.batch_size = batch_size
self.num_eq = num_eq
def __iter__(self):
for i in range(len(self)):
batch = np.random.randint(low=0, high=self.num_eq, size=self.batch_size)
yield batch
def __len__(self):
return self.num_eq // self.batch_size
class TreeBatchSampler(Sampler):
def __init__(self, batch_size, num_eq):
self.batch_size = batch_size
self.num_eq = num_eq
self.permute = np.random.permutation(self.num_eq)
def __iter__(self):
for i in range(len(self)):
batch = self.permute[(i*32):((i+1)*32)]
yield batch
def __len__(self):
return self.num_eq // self.batch_size
class TreeDataset(Dataset):
def __init__(self, train):
self.train = train
def __getitem__(self, idx):
return self.train[idx]
def __len__(self):
return len(self.train)
def create_batch(trees):
t = BatchedNode(trees=trees)
t.create_target()
return t
def logistic_function(iter, total_iters, supremum=0.045):
x = iter/total_iters
return supremum/(1+50*np.exp(-10*x))
def train_hvae(model, trees, epochs=20, batch_size=32, verbose=True):
dataset = TreeDataset(trees)
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.CrossEntropyLoss(ignore_index=-1, reduction="mean")
iter_counter = 0
total_iters = epochs*(len(dataset)//batch_size)
lmbda = logistic_function(iter_counter, total_iters)
midpoint = len(dataset) // (2 * batch_size)
for epoch in range(epochs):
sampler = TreeBatchSampler(batch_size, len(dataset))
bce, kl, total, num_iters = 0, 0, 0, 0
with tqdm(total=len(dataset), desc=f'Testing - Epoch: {epoch + 1}/{epochs}', unit='chunks') as prog_bar:
for i, tree_ids in enumerate(sampler):
batch = create_batch([dataset[j] for j in tree_ids])
mu, logvar, outputs = model(batch)
loss, bcel, kll = outputs.loss(mu, logvar, lmbda, criterion)
bce += bcel.detach().item()
kl += kll.detach().item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
num_iters += 1
prog_bar.set_postfix(**{'run:': "HVAE",
'loss': (bce+kl) / num_iters,
'BCE': bce / num_iters,
'KLD': kl / num_iters})
prog_bar.update(batch_size)
lmbda = logistic_function(iter_counter, total_iters)
iter_counter += 1
if verbose and i == midpoint:
original_trees = batch.to_expr_list()
z = model.encode(batch)[0]
decoded_trees = model.decode(z)
for i in range(10):
print("--------------------")
print(f"O: {original_trees[i]}")
print(f"P: {decoded_trees[i]}")
a = 0
if __name__ == '__main__':
parser = ArgumentParser(prog='Train HVAE', description='A script for training the HVAE model.')
parser.add_argument("-expressions", required=True)
parser.add_argument("-symbols", nargs="+", required=True)
parser.add_argument("-batch", default=32, type=int)
parser.add_argument("-num_vars", default=2, type=int)
parser.add_argument("-has_const", action="store_true")
parser.add_argument("-latent_size", default=32, type=int)
parser.add_argument("-epochs", default=20, type=int)
parser.add_argument("-param_path", default="")
parser.add_argument("-annealing_iters", default=3000, type=int)
parser.add_argument("-verbose", action="store_true")
parser.add_argument("-seed", type=int)
args = parser.parse_args()
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
symbols = generate_symbol_library(args.num_vars, args.symbols, args.has_const)
HVAE.add_symbols(symbols)
s2t = {s["symbol"]: s for s in symbols}
trees = read_expressions_json(args.expressions)
model = HVAE(len(symbols), args.latent_size)
train_hvae(model, trees, args.epochs, args.batch, args.verbose)
if args.param_path != "":
torch.save(model, args.param_path)
| 4,883 | 31.778523 | 112 | py |
HVAE | HVAE-master/src/tree.py | import torch
from torch.autograd import Variable
class Node:
_symbols = None
_s2c = None
def __init__(self, symbol=None, right=None, left=None):
self.symbol = symbol
self.right = right
self.left = left
self.target = None
self.prediction = None
def __str__(self):
return "".join(self.to_list())
def __len__(self):
left = len(self.left) if self.left is not None else 0
right = len(self.right) if self.right is not None else 0
return 1 + left + right
def height(self):
hl = self.left.height() if self.left is not None else 0
hr = self.right.height() if self.right is not None else 0
return max(hl, hr) + 1
def to_list(self, notation="infix"):
if notation == "infix" and Node._symbols is None:
raise Exception("To generate a list of token in the infix notation, symbol library is needed. Please use"
" the Node.add_symbols methods to add them, before using the to_list method.")
left = [] if self.left is None else self.left.to_list(notation)
right = [] if self.right is None else self.right.to_list(notation)
if notation == "prefix":
return [self.symbol] + left + right
elif notation == "postfix":
return left + right + [self.symbol]
elif notation == "infix":
if len(left) > 0 and len(right) == 0 and Node.symbol_precedence(self.symbol) > 0:
return [self.symbol] + ["("] + left + [")"]
elif len(left) > 0 >= Node.symbol_precedence(self.symbol) and len(right) == 0:
return ["("] + left + [")"] + [self.symbol]
if self.left is not None \
and -1 < Node.symbol_precedence(self.left.symbol) < Node.symbol_precedence(self.symbol):
left = ["("] + left + [")"]
if self.right is not None \
and -1 < Node.symbol_precedence(self.right.symbol) < Node.symbol_precedence(self.symbol):
right = ["("] + right + [")"]
return left + [self.symbol] + right
else:
raise Exception("Invalid notation selected. Use 'infix', 'prefix', 'postfix'.")
def to_pexpr(self):
if Node._symbols is None:
raise Exception("To generate a pexpr, symbol library is needed. Please use"
" the Node.add_symbols methods to add them, before using the to_list method.")
left = [] if self.left is None else self.left.to_pexpr()
right = [] if self.right is None else self.right.to_pexpr()
return [Node._symbols[Node._s2c[self.symbol]]["psymbol"]] + left + right
def add_target_vectors(self):
if Node._symbols is None:
raise Exception("Encoding needs a symbol library to create target vectors. Please use Node.add_symbols"
" method to add a symbol library to trees before encoding.")
target = torch.zeros(len(Node._symbols)).float()
target[Node._s2c[self.symbol]] = 1.0
self.target = Variable(target[None, None, :])
if self.left is not None:
self.left.add_target_vectors()
if self.right is not None:
self.right.add_target_vectors()
def loss(self, mu, logvar, lmbda, criterion):
pred = Node.to_matrix(self, "prediction")
target = Node.to_matrix(self, "target")
BCE = criterion(pred, target)
KLD = (lmbda * -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()))
return BCE + KLD, BCE, KLD
def clear_prediction(self):
if self.left is not None:
self.left.clear_prediction()
if self.right is not None:
self.right.clear_prediction()
self.prediction = None
def to_dict(self):
d = {'s': self.symbol}
if self.left is not None:
d['l'] = self.left.to_dict()
if self.right is not None:
d['r'] = self.right.to_dict()
return d
@staticmethod
def from_dict(d):
left = None
right = None
if "l" in d:
left = Node.from_dict(d["l"])
if 'r' in d:
right = Node.from_dict(d["r"])
return Node(d["s"], right=right, left=left)
@staticmethod
def symbol_precedence(symbol):
return Node._symbols[Node._s2c[symbol]]["precedence"]
@staticmethod
def to_matrix(tree, matrix_type="prediction"):
reps = []
if tree.left is not None:
reps.append(Node.to_matrix(tree.left, matrix_type))
if matrix_type == "target":
reps.append(torch.Tensor([Node._s2c[tree.symbol]]).long())
else:
reps.append(tree.prediction[0, :, :])
if tree.right is not None:
reps.append(Node.to_matrix(tree.right, matrix_type))
return torch.cat(reps)
@staticmethod
def add_symbols(symbols):
Node._symbols = symbols
Node._s2c = {s["symbol"]: i for i, s in enumerate(symbols)}
class BatchedNode():
_symbols = None
_s2c = None
def __init__(self, size=0, trees=None):
self.symbols = ["" for _ in range(size)]
self.left = None
self.right = None
if trees is not None:
for tree in trees:
self.add_tree(tree)
@staticmethod
def add_symbols(symbols):
BatchedNode._symbols = symbols
BatchedNode._s2c = {s["symbol"]: i for i, s in enumerate(symbols)}
def add_tree(self, tree=None):
if tree is None:
self.symbols.append("")
if self.left is not None:
self.left.add_tree()
if self.right is not None:
self.right.add_tree()
else:
self.symbols.append(tree.symbol)
if self.left is not None and tree.left is not None:
self.left.add_tree(tree.left)
elif self.left is not None:
self.left.add_tree()
elif tree.left is not None:
self.left = BatchedNode(size=len(self.symbols)-1)
self.left.add_tree(tree.left)
if self.right is not None and tree.right is not None:
self.right.add_tree(tree.right)
elif self.right is not None:
self.right.add_tree()
elif tree.right is not None:
self.right = BatchedNode(size=len(self.symbols)-1)
self.right.add_tree(tree.right)
def loss(self, mu, logvar, lmbda, criterion):
pred = BatchedNode.get_prediction(self)
pred = torch.permute(pred, [0, 2, 1])
target = BatchedNode.get_target(self)
BCE = criterion(pred, target)
KLD = (lmbda * -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()))/mu.size(0)
return BCE + KLD, BCE, KLD
def create_target(self):
if BatchedNode._symbols is None:
raise Exception("Encoding needs a symbol library to create target vectors. Please use"
" BatchedNode.add_symbols method to add a symbol library to trees before encoding.")
target = torch.zeros((len(self.symbols), 1, len(Node._symbols)))
mask = torch.ones(len(self.symbols))
for i, s in enumerate(self.symbols):
if s == "":
mask[i] = 0
else:
target[i, 0, Node._s2c[s]] = 1
self.mask = mask
self.target = Variable(target)
if self.left is not None:
self.left.create_target()
if self.right is not None:
self.right.create_target()
def to_expr_list(self):
exprs = []
for i in range(len(self.symbols)):
exprs.append(self.get_expr_at_idx(i))
return exprs
def get_expr_at_idx(self, idx):
symbol = self.symbols[idx]
if symbol == "":
return None
left = self.left.get_expr_at_idx(idx) if self.left is not None else None
right = self.right.get_expr_at_idx(idx) if self.right is not None else None
return Node(symbol, left=left, right=right)
@staticmethod
def get_prediction(tree):
reps = []
if tree.left is not None:
reps.append(BatchedNode.get_prediction(tree.left))
target = tree.prediction[:, 0, :]
reps.append(target[:, None, :])
if tree.right is not None:
reps.append(BatchedNode.get_prediction(tree.right))
return torch.cat(reps, dim=1)
@staticmethod
def get_target(tree):
reps = []
if tree.left is not None:
reps.append(BatchedNode.get_target(tree.left))
target = torch.zeros(len(tree.symbols)).long()
for i, s in enumerate(tree.symbols):
if s == "":
target[i] = -1
else:
target[i] = BatchedNode._s2c[s]
reps.append(target[:, None])
if tree.right is not None:
reps.append(BatchedNode.get_target(tree.right))
return torch.cat(reps, dim=1)
| 9,097 | 34.263566 | 117 | py |
HVAE | HVAE-master/src/model.py | import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
from tree import Node
from symbol_library import SymType
class HVAE(nn.Module):
_symbols = None
def __init__(self, input_size, output_size, hidden_size=None):
super(HVAE, self).__init__()
if hidden_size is None:
hidden_size = output_size
self.encoder = Encoder(input_size, hidden_size, output_size)
self.decoder = Decoder(output_size, hidden_size, input_size)
def forward(self, tree):
mu, logvar = self.encoder(tree)
z = self.sample(mu, logvar)
out = self.decoder(z, tree)
return mu, logvar, out
def sample(self, mu, logvar):
eps = Variable(torch.randn(mu.size()))
std = torch.exp(logvar / 2.0)
return mu + eps * std
def encode(self, tree):
mu, logvar = self.encoder(tree)
return mu, logvar
def decode(self, z):
if HVAE.symbols is None:
raise Exception("To generate expression trees, a symbol library is needed. Please add it using the"
" HVAE.add_symbols method.")
return self.decoder.decode(z, HVAE.symbols)
@staticmethod
def add_symbols(symbols):
HVAE.symbols = symbols
Node.add_symbols(symbols)
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.gru = GRU221(input_size=input_size, hidden_size=hidden_size)
self.mu = nn.Linear(in_features=hidden_size, out_features=output_size)
self.logvar = nn.Linear(in_features=hidden_size, out_features=output_size)
torch.nn.init.xavier_uniform_(self.mu.weight)
torch.nn.init.xavier_uniform_(self.logvar.weight)
def forward(self, tree):
# Check if the tree has target vectors
if tree.target is None:
tree.add_target_vectors()
tree_encoding = self.recursive_forward(tree)
mu = self.mu(tree_encoding)
logvar = self.logvar(tree_encoding)
return mu, logvar
def recursive_forward(self, tree):
left = self.recursive_forward(tree.left) if tree.left is not None \
else torch.zeros(tree.target.size(0), 1, self.hidden_size)
right = self.recursive_forward(tree.right) if tree.right is not None \
else torch.zeros(tree.target.size(0), 1, self.hidden_size)
hidden = self.gru(tree.target, left, right)
return hidden
class Decoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Decoder, self).__init__()
self.hidden_size = hidden_size
self.z2h = nn.Linear(input_size, hidden_size)
self.h2o = nn.Linear(hidden_size, output_size)
self.gru = GRU122(input_size=output_size, hidden_size=hidden_size)
torch.nn.init.xavier_uniform_(self.z2h.weight)
torch.nn.init.xavier_uniform_(self.h2o.weight)
# Used during training to guide the learning process
def forward(self, z, tree):
hidden = self.z2h(z)
self.recursive_forward(hidden, tree)
return tree
def recursive_forward(self, hidden, tree):
prediction = self.h2o(hidden)
symbol_probs = F.softmax(prediction, dim=2)
tree.prediction = prediction
if tree.left is not None or tree.right is not None:
left, right = self.gru(symbol_probs, hidden)
if tree.left is not None:
self.recursive_forward(left, tree.left)
if tree.right is not None:
self.recursive_forward(right, tree.right)
# Used for inference to generate expression trees from latent vectorS
def decode(self, z, symbols):
with torch.no_grad():
hidden = self.z2h(z)
tree = self.recursive_decode(hidden, symbols)
return tree
def recursive_decode(self, hidden, symbols):
prediction = self.h2o(hidden)
# Sample symbol in a given node
sampled, symbol, stype = self.sample_symbol(prediction, symbols)
# print(symbol)
if stype.value is SymType.Fun.value:
left, right = self.gru(sampled, hidden)
l_tree = self.recursive_decode(left, symbols)
r_tree = None
elif stype.value is SymType.Operator.value:
left, right = self.gru(sampled, hidden)
l_tree = self.recursive_decode(left, symbols)
r_tree = self.recursive_decode(right, symbols)
else:
l_tree = None
r_tree = None
return Node(symbol, right=r_tree, left=l_tree)
def sample_symbol(self, prediction, symbol_dict):
sampled = F.softmax(prediction, dim=2)
# Select the symbol with the highest value ("probability")
symbol = symbol_dict[torch.argmax(sampled).item()]
return sampled, symbol["symbol"], symbol["type"]
class GRU221(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRU221, self).__init__()
self.wir = nn.Linear(in_features=input_size, out_features=hidden_size)
self.whr = nn.Linear(in_features=2*hidden_size, out_features=hidden_size)
self.wiz = nn.Linear(in_features=input_size, out_features=hidden_size)
self.whz = nn.Linear(in_features=2 * hidden_size, out_features=hidden_size)
self.win = nn.Linear(in_features=input_size, out_features=hidden_size)
self.whn = nn.Linear(in_features=2 * hidden_size, out_features=hidden_size)
torch.nn.init.xavier_uniform_(self.wir.weight)
torch.nn.init.xavier_uniform_(self.whr.weight)
torch.nn.init.xavier_uniform_(self.wiz.weight)
torch.nn.init.xavier_uniform_(self.whz.weight)
torch.nn.init.xavier_uniform_(self.win.weight)
torch.nn.init.xavier_uniform_(self.whn.weight)
def forward(self, x, h1, h2):
h = torch.cat([h1, h2], dim=2)
r = torch.sigmoid(self.wir(x) + self.whr(h))
z = torch.sigmoid(self.wiz(x) + self.whz(h))
n = torch.tanh(self.win(x) + r * self.whn(h))
return (1 - z) * n + (z / 2) * h1 + (z / 2) * h2
class GRU122(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRU122, self).__init__()
self.hidden_size = hidden_size
self.wir = nn.Linear(in_features=input_size, out_features=2*hidden_size)
self.whr = nn.Linear(in_features=hidden_size, out_features=2*hidden_size)
self.wiz = nn.Linear(in_features=input_size, out_features=2*hidden_size)
self.whz = nn.Linear(in_features=hidden_size, out_features=2*hidden_size)
self.win = nn.Linear(in_features=input_size, out_features=2*hidden_size)
self.whn = nn.Linear(in_features=hidden_size, out_features=2*hidden_size)
torch.nn.init.xavier_uniform_(self.wir.weight)
torch.nn.init.xavier_uniform_(self.whr.weight)
torch.nn.init.xavier_uniform_(self.wiz.weight)
torch.nn.init.xavier_uniform_(self.whz.weight)
torch.nn.init.xavier_uniform_(self.win.weight)
torch.nn.init.xavier_uniform_(self.whn.weight)
def forward(self, x, h):
r = torch.sigmoid(self.wir(x) + self.whr(h))
z = torch.sigmoid(self.wiz(x) + self.whz(h))
n = torch.tanh(self.win(x) + r * self.whn(h))
dh = h.repeat(1, 1, 2)
out = (1 - z) * n + z * dh
return torch.split(out, self.hidden_size, dim=2)
| 7,496 | 39.090909 | 111 | py |
HVAE | HVAE-master/src/reconstruction_accuracy.py | from argparse import ArgumentParser
import numpy as np
import torch
from sklearn.model_selection import KFold
import editdistance
from utils import read_expressions, tokens_to_tree
from symbol_library import generate_symbol_library
from model import HVAE
from train import train_hvae
def one_fold(model, train, test, epochs, batch_size, annealing_iters, verbose):
train_hvae(model, train, epochs, batch_size, annealing_iters, verbose)
total_distance = []
for t in test:
latent = model.encode(t)[0]
pt = model.decode(latent)
total_distance.append(editdistance.eval(t.to_list(notation="postfix"), pt.to_list(notation="postfix")))
return total_distance
def one_experiment(name, trees, input_dim, latent_dim, epochs, batch_size, annealing_iters, verbose, seed,
smaller_dataset=False, examples=2000):
kf = KFold()
distances = []
for i, (train_idx, test_idx) in enumerate(kf.split(trees)):
print(f"Fold {i + 1}")
if smaller_dataset:
np.random.seed(seed + i)
torch.manual_seed(seed + i)
inds = np.random.permutation(train_idx)
inds = inds[:examples]
train = [trees[i] for i in inds]
else:
train = [trees[i] for i in train_idx]
test = [trees[i] for i in test_idx]
model = HVAE(input_dim, latent_dim)
distances.append(one_fold(model, train, test, epochs, batch_size, annealing_iters, verbose))
print(f"Mean: {np.mean(distances[-1])}, Var: {np.var(distances[-1])}")
print()
fm = [np.mean(d) for d in distances]
with open("../results/hvae.txt", "a") as file:
file.write(f"{name}\t Mean: {np.mean(fm)}, Std dev: {np.std(fm)}, All: {', '.join([str(f) for f in fm])}\n")
print(f"Mean: {np.mean(fm)}, Std dev: {np.std(fm)}, All: {', '.join([str(f) for f in fm])}")
return fm
if __name__ == '__main__':
parser = ArgumentParser(prog='Train HVAE', description='A script for training the HVAE model.')
parser.add_argument("-expressions", required=True)
parser.add_argument("-symbols", nargs="+", required=True)
parser.add_argument("-batch", default=32, type=int)
parser.add_argument("-num_vars", default=2, type=int)
parser.add_argument("-has_const", action="store_true")
parser.add_argument("-latent_size", default=128, type=int)
parser.add_argument("-epochs", default=20, type=int)
parser.add_argument("-annealing_iters", default=1800, type=int)
parser.add_argument("-verbose", action="store_true")
parser.add_argument("-seed", default=18, type=int)
args = parser.parse_args()
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
equations = read_expressions(args.expressions)
symbols = generate_symbol_library(args.num_vars, args.symbols, args.has_const)
input_dim = len(symbols)
HVAE.add_symbols(symbols)
s2t = {s["symbol"]: s for s in symbols}
trees = [tokens_to_tree(eq, s2t) for eq in equations]
one_experiment(args.expressions, trees, input_dim, args.latent_size, args.epochs, args.batch, args.annealing_iters,
args.verbose, args.seed)
| 3,214 | 38.691358 | 119 | py |
HVAE | HVAE-master/src/linear_interpolation.py | import torch
from model import HVAE
from utils import tokens_to_tree
from symbol_library import generate_symbol_library
def interpolateAB(model, exprA, exprB, steps=5):
tokensA = exprA.split(" ")
tokensB = exprB.split(" ")
treeA = tokens_to_tree(tokensA, s2t)
treeB = tokens_to_tree(tokensB, s2t)
l1 = model.encode(treeA)[0]
l2 = model.encode(treeB)[0]
print(f"Expr A:\t{str(treeA)}")
print(f"a=0:\t{str(model.decode(l1))}")
for i in range(1, steps-1):
a = i/(steps-1)
la = (1-a) * l1 + a * l2
print(f"a={str(a)[:5]}:\t{str(model.decode(la))}")
print(f"a=1:\t{str(model.decode(l2))}")
print(f"Expr B:\t{str(treeB)}")
if __name__ == '__main__':
param_file = "../params/4_2k.pt"
symbols = generate_symbol_library(1, ["+", "-", "*", "/", "^"])
HVAE.add_symbols(symbols)
s2t = {s["symbol"]: s for s in symbols}
steps = 5
model = torch.load(param_file)
interpolateAB(model, "A + A / A", "A * C ^ A")
# TODO: Create reproducible results of linear interpolation and add them to the paper
| 1,089 | 28.459459 | 89 | py |
HVAE | HVAE-master/src/train.py | from argparse import ArgumentParser
import numpy as np
import torch
from torch.utils.data import Sampler, Dataset, DataLoader
from tqdm import tqdm
from utils import tokens_to_tree, read_expressions, read_json
from model import HVAE
from symbol_library import generate_symbol_library
def collate_fn(batch):
return batch
class TreeSampler(Sampler):
def __init__(self, batch_size, num_eq):
self.batch_size = batch_size
self.num_eq = num_eq
def __iter__(self):
for i in range(len(self)):
batch = np.random.randint(low=0, high=self.num_eq, size=self.batch_size)
yield batch
def __len__(self):
return self.num_eq // self.batch_size
class TreeDataset(Dataset):
def __init__(self, train):
self.train = train
def __getitem__(self, idx):
return self.train[idx]
def __len__(self):
return len(self.train)
def train_hvae(model, trees, epochs=20, batch_size=32, annealing_iters=2800, verbose=True):
dataset = TreeDataset(trees)
sampler = TreeSampler(batch_size, len(dataset))
trainloader = DataLoader(dataset, batch_sampler=sampler, collate_fn=collate_fn, num_workers=0)
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.CrossEntropyLoss()
iter_counter = 0
lmbda = (np.tanh(-4.5) + 1) / 2
midpoint = len(dataset) // (2 * batch_size)
for epoch in range(epochs):
bce, kl, total, num_trees = 0, 0, 0, 0
with tqdm(total=len(dataset), desc=f'Testing - Epoch: {epoch + 1}/{epochs}', unit='chunks') as prog_bar:
for i, trees in enumerate(trainloader):
batch_loss = 0
for tree in trees:
mu, logvar, outputs = model(tree)
loss, bcel, kll = outputs.loss(mu, logvar, lmbda, criterion)
batch_loss += loss
total += loss.detach().item()
bce += bcel.detach().item()
kl += kll.detach().item()
num_trees += batch_size
optimizer.zero_grad()
batch_loss = batch_loss / batch_size
batch_loss.backward()
optimizer.step()
prog_bar.set_postfix(**{'run:': "HVAE",
'loss': total / num_trees,
'BCE': bce / num_trees,
'KLD': kl / num_trees})
prog_bar.update(batch_size)
if iter_counter < annealing_iters:
lmbda = (np.tanh((iter_counter - 4500) / 1000) + 1) / 2
iter_counter += 1
if verbose and i == midpoint:
z = model.encode(trees[0])[0]
decoded_tree = model.decode(z)
print("\nO: {}".format(str(trees[0])))
print("P: {}".format(str(decoded_tree)))
for t in trees:
t.clear_prediction()
if __name__ == '__main__':
parser = ArgumentParser(prog='Train HVAE', description='A script for training the HVAE model.')
parser.add_argument("-expressions", required=True)
parser.add_argument("-symbols", nargs="+", required=True)
parser.add_argument("-batch", default=32, type=int)
parser.add_argument("-num_vars", default=2, type=int)
parser.add_argument("-has_const", action="store_true")
parser.add_argument("-latent_size", default=32, type=int)
parser.add_argument("-epochs", default=20, type=int)
parser.add_argument("-param_path", default="")
parser.add_argument("-annealing_iters", default=2800, type=int)
parser.add_argument("-verbose", action="store_true")
parser.add_argument("-seed", type=int)
args = parser.parse_args()
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
symbols = generate_symbol_library(args.num_vars, args.symbols, args.has_const)
HVAE.add_symbols(symbols)
if args.expressions.strip().split(".")[-1] == "json":
trees = read_json(args.expressions)
else:
s2t = {s["symbol"]: s for s in symbols}
equations = read_expressions(args.expressions)
trees = [tokens_to_tree(eq, s2t) for eq in equations]
model = HVAE(len(symbols), args.latent_size)
train_hvae(model, trees, args.epochs, args.batch, args.annealing_iters, args.verbose)
if args.param_path != "":
torch.save(model, args.param_path)
| 4,523 | 34.904762 | 112 | py |
AutoPruner | AutoPruner-master/ResNet50/50/fine_tune_compressed_model.py | import argparse
import os
import shutil
import time
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torchvision import datasets, transforms
from src_code.lmdbdataset import lmdbDataset
from compress_model.new_model import NetworkNew_test
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
'''parser.add_argument('data', metavar='DIR',
help='path to dataset')
'''
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--evaluate', default=False, type=bool,
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--gpu_id', default='7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--load_from_lmdb', default=True, type=bool, help='load image data from lmdb or not')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = 0
print(args)
def main():
global args, best_prec1
args = parser.parse_args()
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
# model = models.vgg16(pretrained=True)
model = NetworkNew_test('checkpoint/model.pth')
print(model)
model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Data loading code from lmdb
if args.load_from_lmdb:
train_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True
)
print('train_loader_success!')
val_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True
)
else:
traindir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'train')
valdir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# evaluate and train
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, int(args.epochs/3.0))
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if is_best:
folder_path = 'checkpoint/fine_tune'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
torch.save(model.state_dict(), folder_path + '/model.pth')
print('best acc is %.3f' % best_prec1)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 4 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 10,872 | 35.733108 | 106 | py |
AutoPruner | AutoPruner-master/ResNet50/50/main.py | # ************************************************************
# Author : Bumsoo Kim, 2017
# Github : https://github.com/meliketoy/fine-tuning.pytorch
#
# Korea University, Data-Mining Lab
# Deep Convolutional Network Fine tuning Implementation
#
# Description : main.py
# The main code for training classification networks.
# ***********************************************************
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import time
import os
import sys
import argparse
import numpy as np
import shutil
import math
from torchvision import models
from src_code import Network_FT
from src_code.lmdbdataset import lmdbDataset
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=5e-4, type=float, help='weight decay')
parser.add_argument('--batch_size', default=256, type=int, help='batch size')
parser.add_argument('--num_epochs', default=8, type=int, help='number of training epochs')
parser.add_argument('--lr_decay_epoch', default=10, type=int, help='learning rate decay epoch')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--ft_model_path', default='/home/luojh2/.torch/models/resnet50-19c8e357.pth',
type=str, help='the path of fine tuned model')
parser.add_argument('--gpu_id', default='0,1,2,3', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--group_id', default=0, type=int, help='the id of compressed group, starting from 0')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--compression_rate', default=0.5, type=float, help='the percentage of 1 in compressed model')
parser.add_argument('--channel_index_range', default=20, type=int, help='the range to calculate channel index')
parser.add_argument('--print-freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--alpha_range', default=100, type=int, help='the range to calculate channel index')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = -1
print(args)
resnet_channel_number = [6, 8, 12, 4]
scale_factor_list = None
alpha_index = 0
threshold = 95 * np.ones(resnet_channel_number[args.group_id])
def main():
global args, best_prec1, scale_factor_list, resnet_channel_number
# Phase 1 : Data Upload
print('\n[Phase 1] : Data Preperation')
train_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True)
print('data_loader_success!')
# Phase 2 : Model setup
print('\n[Phase 2] : Model setup')
if args.group_id == 0:
model_ft = models.resnet50(True).cuda()
model_ft = torch.nn.DataParallel(model_ft)
model_param = model_ft.state_dict()
torch.save(model_param, 'checkpoint/model.pth')
model_ft = Network_FT.NetworkNew(args.group_id).cuda()
model_ft = torch.nn.DataParallel(model_ft)
cudnn.benchmark = True
print("model setup success!")
# Phase 3: fine_tune model
print('\n[Phase 3] : Model fine tune')
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()), args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
tmp = np.linspace(1, 100, int(args.num_epochs * len(train_loader) / args.alpha_range))
scale_factor_list = np.ones([resnet_channel_number[args.group_id], len(tmp)])
for tmp_i in range(resnet_channel_number[args.group_id]):
scale_factor_list[tmp_i, :] = tmp.copy()
reg_lambda = 10.0 * np.ones(resnet_channel_number[args.group_id])
for epoch in range(args.start_epoch, args.num_epochs):
adjust_learning_rate(optimizer, epoch, int(args.num_epochs/2.0))
# train for one epoch
channel_index, reg_lambda = train(train_loader, model_ft, criterion, optimizer, epoch, reg_lambda)
# evaluate on validation set
prec1 = validate(val_loader, model_ft, criterion, channel_index)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
if is_best:
best_prec1 = prec1
folder_path = 'checkpoint/group_' + str(args.group_id)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
torch.save(model_ft.state_dict(), folder_path+'/model.pth')
if args.group_id == 3:
tmp = channel_index[0].copy()
tmp[:] = 1.0
channel_index.append(tmp.copy())
channel_index.append(tmp.copy())
torch.save(channel_index, folder_path+'/channel_index.pth')
def train(train_loader, model, criterion, optimizer, epoch, reg_lambda):
global resnet_channel_number, scale_factor_list, alpha_index, threshold
gpu_num = torch.cuda.device_count()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
channel_index_list = list()
channel_index_binary = list()
end = time.time()
for i, (input, target) in enumerate(train_loader):
if i % args.alpha_range == 0:
if alpha_index == scale_factor_list.shape[1]:
alpha_index = alpha_index - 1
scale_factor = scale_factor_list[:, alpha_index]
alpha_index = alpha_index + 1
model.module.set_scale_factor(scale_factor)
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output, scale_vec = model(input_var)
loss = criterion(output, target_var)
for vec_i in range(len(scale_vec)):
loss = loss + float(reg_lambda[vec_i]) * (
scale_vec[vec_i].norm(1) / float(scale_vec[vec_i].size(0)) - args.compression_rate) ** 2
# compute channel index
channel_index_sublist = list()
for vec_i in range(len(scale_vec)):
tmp = scale_vec[vec_i].data.cpu().numpy().reshape(gpu_num, -1).mean(0)
channel_index_sublist.append(tmp.copy())
if i == 0:
print('first 5 values in layer {0}: [{1:.6f}, {2:.6f}, {3:.6f}, {4:.6f}, {5:.6f}]'.format(int(vec_i),
tmp[0],
tmp[1],
tmp[2],
tmp[3],
tmp[4]))
channel_index_list.append(channel_index_sublist.copy())
if len(channel_index_list) == args.channel_index_range:
channel_index_binary = list()
for vec_i in range(len(scale_vec)):
tmp = list()
for tmp_i in range(len(channel_index_list)):
tmp_a = channel_index_list[tmp_i][vec_i]
tmp_a = (np.sign(tmp_a - 0.5) + 1) / 2.0 # to 0-1 binary
tmp.append(tmp_a)
tmp = np.array(tmp).sum(axis=0)
tmp = tmp / args.channel_index_range
tmp_value = channel_index_list[0][vec_i]
print(
'first 5 values in layer {0}: [{1:.6f}, {2:.6f}, {3:.6f}, {4:.6f}, {5:.6f}]'.format(int(vec_i),
tmp_value[0],
tmp_value[1],
tmp_value[2],
tmp_value[3],
tmp_value[4]))
channel_index = (np.sign(tmp - 0.5) + 1) / 2.0 # to 0-1 binary
channel_index_binary.append(channel_index.copy())
binary_pruning_rate = 100.0 * np.sum(channel_index == 0) / len(channel_index)
if binary_pruning_rate >= threshold[vec_i]:
scale_factor_list[vec_i, :] = scale_factor_list[vec_i, :] + 1
threshold[vec_i] = threshold[vec_i] - 5
if threshold[vec_i] < 100 - 100 * args.compression_rate:
threshold[vec_i] = 100 - 100 * args.compression_rate
print('threshold in layer %d is %d' % (int(vec_i), int(threshold[vec_i])))
two_side_rate = (np.sum(tmp_value > 0.8) + np.sum(tmp_value < 0.2)) / len(tmp_value)
if two_side_rate < 0.9 and alpha_index >= int(scale_factor_list.shape[1] / args.num_epochs):
scale_factor_list[vec_i, :] = scale_factor_list[vec_i, :] + 1
reg_lambda[vec_i] = 100.0 * np.abs(binary_pruning_rate/100.0 - 1 + args.compression_rate)
tmp[tmp == 0] = 1
channel_inconsistency = 100.0 * np.sum(tmp != 1) / len(tmp)
print(
"[{0}] pruning rate: {1:.4f}%, inconsistency: {2:.4f}%, reg_lambda: {3:.4f}, scale_factor: {4:.4f}, two_side: {5:.4f}".format(
int(vec_i), binary_pruning_rate, channel_inconsistency, reg_lambda[vec_i], scale_factor[vec_i], two_side_rate))
sys.stdout.flush()
channel_index_list = list()
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch[{0}]: [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
top1=top1, top5=top5))
print('+--------------------------------------------------------------------------------------------------+')
sys.stdout.flush()
return channel_index_binary, reg_lambda
def validate(val_loader, model, criterion, channel_index):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output, _ = model(input_var, channel_index)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
| 14,783 | 42.354839 | 146 | py |
AutoPruner | AutoPruner-master/ResNet50/50/evaluate_network.py | import torch
import torch.backends.cudnn as cudnn
import os
import sys
import argparse
import time
from src_code.lmdbdataset import lmdbDataset
from src_code import Network_FT
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--gpu_id', default='1', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--ft_model_path', default='/home/luojh2/.torch/models/resnet50-19c8e357.pth',
type=str, help='the path of fine tuned model')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
print(args)
# Phase 1 : Data Upload
print('\n[Phase 1] : Data Preperation')
dset_loaders = {
'train': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True),
'val': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True)
}
print('data_loader_success!')
# Phase 2 : Model setup
print('\n[Phase 2] : Model setup')
model = Network_FT.NetworkNew(0).cuda()
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
model.module.set_scale_factor(2.0)
cudnn.benchmark = True
# Phase 3: evaluation
def evaluate_net():
print("\n[Phase 3 : Inference on val")
criterion = torch.nn.CrossEntropyLoss().cuda()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for batch_idx, (input, target) in enumerate(dset_loaders['val']): # dset_loaders['val']):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_idx, len(dset_loaders['val']), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
evaluate_net()
| 4,037 | 31.564516 | 106 | py |
AutoPruner | AutoPruner-master/ResNet50/50/fine_tune_again.py | import argparse
import os
import shutil
import time
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torchvision import datasets, transforms
from src_code.lmdbdataset import lmdbDataset
from compress_model.new_model import NetworkNew_test
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
'''parser.add_argument('data', metavar='DIR',
help='path to dataset')
'''
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=12, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--evaluate', default=False, type=bool,
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--gpu_id', default='7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--load_from_lmdb', default=True, type=bool, help='load image data from lmdb or not')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = 0
print(args)
def main():
global args, best_prec1
args = parser.parse_args()
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
# model = models.vgg16(pretrained=True)
model = NetworkNew_test('checkpoint/model.pth')
print(model)
model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Data loading code from lmdb
if args.load_from_lmdb:
train_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
num_workers=16,
pin_memory=True
)
print('train_loader_success!')
val_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True
)
else:
traindir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'train')
valdir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# evaluate and train
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
if is_best:
folder_path = 'checkpoint/fine_again'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
best_prec1 = max(prec1, best_prec1)
torch.save(model.state_dict(), folder_path + '/model.pth')
print('best accuracy is %.3f' % best_prec1)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 4 epochs"""
lr = args.lr * (0.1 ** (epoch // 4))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 10,815 | 35.789116 | 106 | py |
AutoPruner | AutoPruner-master/ResNet50/50/src_code/my_op_fc.py | import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.autograd import gradcheck
import numpy as np
class MyGAP_fc(torch.autograd.Function):
'''
Global Average Pooling with batchsize: N*4096 -> 1*4096
'''
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
input = torch.mean(input, dim=0, keepdim=True)
return input
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = input[0].clone()
for i in range(grad_input.shape[0]):
grad_input[i, :] = grad_output.data / grad_input.shape[0]
return Variable(grad_input)
class MyScale_fc(torch.autograd.Function):
'''
input: x: 64*4096, scale:4096 ==> x[:, i]*scale[i]
'''
@staticmethod
def forward(self, input_data, scale_vec):
self.save_for_backward(input_data, scale_vec)
input_data2 = input_data.clone()
for i in range(scale_vec.shape[0]):
input_data2[:, i] = input_data[:, i] * scale_vec[i]
return input_data2
@staticmethod
def backward(self, grad_output):
input_data, scale_vec = self.saved_tensors
grad_input = input_data.clone()
for i in range(scale_vec.shape[0]):
grad_input[:, i] = grad_output.data[:, i] * scale_vec[i]
grad_vec = scale_vec.clone()
for i in range(scale_vec.shape[0]):
grad_vec[i] = torch.sum(grad_output.data[:, i]*input_data[:, i])
return Variable(grad_input), Variable(grad_vec)
class MyCS_fc(nn.Module):
def __init__(self, channels_num):
super(MyCS_fc, self).__init__()
self.layer_type = 'MyCS_fc'
self.fc = nn.Linear(channels_num, channels_num)
self.sigmoid = nn.Sigmoid()
def forward(self, x, scale_factor):
x_scale = MyGAP_fc.apply(x) # apply my GAP: N*4096 => 1*4096
x_scale = self.fc(x_scale) # 1*4096
x_scale = torch.squeeze(x_scale) # 4096
x_scale = x_scale * scale_factor # apply scale sigmoid
x_scale = self.sigmoid(x_scale)
if not self.training:
index = (np.sign(x_scale.data.cpu().numpy() - 0.5) + 1) / 2.0
x_scale.data = torch.FloatTensor(index).cuda()
x = MyScale_fc.apply(x, x_scale)
return x, x_scale
if __name__ == '__main__':
in_ = (Variable(torch.randn(3, 4).double(), requires_grad=True),
Variable(torch.randn(4).double(), requires_grad=True))
res = gradcheck(MyScale_fc.apply, in_, eps=1e-6, atol=1e-4)
# in_ = (Variable(torch.randn(4, 64).double(), requires_grad=True),)
# res = gradcheck(MyGAP_fc.apply, in_, eps=1e-6, atol=1e-4)
print(res)
| 2,729 | 31.117647 | 76 | py |
AutoPruner | AutoPruner-master/ResNet50/50/src_code/my_op.py | import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.autograd import gradcheck
import numpy as np
import math
class MyGAP(torch.autograd.Function):
'''
Global Average Pooling with batchsize: N*512*14*14 -> 1*512*14*14
'''
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
input = torch.mean(input, dim=0, keepdim=True)
return input
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = input[0].clone()
for i in range(grad_input.shape[0]):
grad_input[i, :, :, :] = grad_output.data / grad_input.shape[0]
return Variable(grad_input)
class MyScale(torch.autograd.Function):
'''
input: x: 64*512*7*7, scale:512 ==> x[:, i, :, :]*scale[i]
'''
@staticmethod
def forward(self, input_data, scale_vec):
self.save_for_backward(input_data, scale_vec)
input_data2 = input_data.clone()
for i in range(scale_vec.shape[0]):
input_data2[:, i, :, :] = input_data[:, i, :, :] * scale_vec[i]
return input_data2
@staticmethod
def backward(self, grad_output):
input_data, scale_vec = self.saved_tensors
grad_input = input_data.clone()
for i in range(scale_vec.shape[0]):
grad_input[:, i, :, :] = grad_output.data[:, i, :, :] * scale_vec[i]
grad_vec = scale_vec.clone()
for i in range(scale_vec.shape[0]):
grad_vec[i] = torch.sum(grad_output.data[:, i, :, :]*input_data[:, i, :, :])
return Variable(grad_input), Variable(grad_vec)
class MyCS(nn.Module):
def __init__(self, channels_num, activation_size=14, max_ks=2):
super(MyCS, self).__init__()
self.layer_type = 'MyCS'
self.conv = nn.Conv2d(channels_num, channels_num,
kernel_size=int(activation_size / max_ks), stride=1, padding=0)
self.map = nn.MaxPool2d(kernel_size=max_ks, stride=max_ks)
self.sigmoid = nn.Sigmoid()
n = int(activation_size / max_ks) * int(activation_size / max_ks) * channels_num
self.conv.weight.data.normal_(0, 10 * math.sqrt(2.0 / n))
def forward(self, x, scale_factor, channel_index=None):
x_scale = MyGAP.apply(x) # apply my GAP: N*512*14*14 => 1*512*14*14
x_scale = self.map(x_scale) # apply MAP: 1*512*14*14 => 1*512*7*7
x_scale = self.conv(x_scale) # 1*512*1*1
x_scale = torch.squeeze(x_scale) # 512
x_scale = x_scale * scale_factor # apply scale sigmoid
x_scale = self.sigmoid(x_scale)
if not self.training:
x_scale.data = torch.FloatTensor(channel_index).cuda()
x = MyScale.apply(x, x_scale)
return x, x_scale
if __name__ == '__main__':
# in_ = (Variable(torch.randn(1, 1, 3, 3).double(), requires_grad=True),
# Variable(torch.randn(1).double(), requires_grad=True))
# res = gradcheck(MyScale.apply, in_, eps=1e-6, atol=1e-4)
in_ = (Variable(torch.randn(2, 64, 3, 3).double(), requires_grad=True),)
res = gradcheck(MyGAP.apply, in_, eps=1e-6, atol=1e-4)
print(res)
| 3,182 | 33.225806 | 93 | py |
AutoPruner | AutoPruner-master/ResNet50/50/src_code/Network_FT.py | import torch.nn as nn
import math
import torch
from . import my_op
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, number_list, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(number_list[1], number_list[0], kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(number_list[0])
self.conv2 = nn.Conv2d(number_list[3], number_list[2], kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(number_list[2])
self.conv3 = nn.Conv2d(number_list[5], number_list[4], kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(number_list[4])
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck_with_CS(nn.Module):
expansion = 4
def __init__(self, number_list, stride=1, downsample=None, ks=1, CS_id=0):
super(Bottleneck_with_CS, self).__init__()
self.conv1 = nn.Conv2d(number_list[1], number_list[0], kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(number_list[0])
self.conv2 = nn.Conv2d(number_list[3], number_list[2], kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(number_list[2])
self.conv3 = nn.Conv2d(number_list[5], number_list[4], kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(number_list[4])
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.CS_id = CS_id
self.channel_index = list()
if ks == 7:
mks = 1
else:
mks = 2
self.cs1 = my_op.MyCS(number_list[0], activation_size=ks * stride, max_ks=mks)
self.cs2 = my_op.MyCS(number_list[2], activation_size=ks, max_ks=mks)
self.vec1 = None
self.vec2 = None
self.scale_factor1 = 1.0
self.scale_factor2 = 1.0
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.training:
out, self.vec1 = self.cs1(out, self.scale_factor1)
else:
out, self.vec1 = self.cs1(out, self.scale_factor1, self.channel_index[2 * self.CS_id])
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if self.training:
out, self.vec2 = self.cs2(out, self.scale_factor2)
else:
out, self.vec2 = self.cs2(out, self.scale_factor2, self.channel_index[2 * self.CS_id + 1])
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, group_id, block, layers, num_classes=1000):
old_weight = torch.load('checkpoint/model.pth')
channel_number_list = analyse_number(old_weight)
self.kernel_size = int(56 / (2**group_id))
self.inplanes = 64
self.g_id = group_id
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(channel_number_list[0], 0, block, 64, layers[0])
self.layer2 = self._make_layer(channel_number_list[1], 1, block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(channel_number_list[2], 2, block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(channel_number_list[3], 3, block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# # m.weight.data.normal_(0, math.sqrt(2. / n))
# m.weight.data.normal_(0, math.sqrt(1.))
# # torch.nn.init.xavier_uniform(m.weight)
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
old_weight = torch.load('checkpoint/model.pth')
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
for k, v in old_weight.items():
name = ''.join(list(k)[7:])
if name in my_keys:
my_weight[name] = v
self.load_state_dict(my_weight)
def _make_layer(self, number_list, group_id, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
if group_id == self.g_id:
layers.append(Bottleneck_with_CS(number_list[0], stride, downsample, ks=self.kernel_size, CS_id=0))
else:
layers.append(block(number_list[0], stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if group_id == self.g_id:
if self.g_id == 3 and i == blocks-1:
layers.append(block(number_list[i]))
else:
layers.append(Bottleneck_with_CS(number_list[i], ks=self.kernel_size, CS_id=i))
else:
layers.append(block(number_list[i]))
return nn.Sequential(*layers)
def forward(self, x, channel_index=None):
if not self.training:
self.set_channel_index(channel_index)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x) # 128, 64, 56, 56
x = self.layer1(x) # 128, 64, 56, 56
x = self.layer2(x) # 128, 512, 28, 28
x = self.layer3(x) # 128, 1024, 14, 14
x = self.layer4(x) # 128, 2048, 7, 7
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
scale_vector = self.get_scale_vector()
return x, scale_vector
def set_channel_index(self, channel_index):
if self.g_id == 0:
self.layer1[0].channel_index = channel_index
self.layer1[1].channel_index = channel_index
self.layer1[2].channel_index = channel_index
elif self.g_id == 1:
self.layer2[0].channel_index = channel_index
self.layer2[1].channel_index = channel_index
self.layer2[2].channel_index = channel_index
self.layer2[3].channel_index = channel_index
elif self.g_id == 2:
self.layer3[0].channel_index = channel_index
self.layer3[1].channel_index = channel_index
self.layer3[2].channel_index = channel_index
self.layer3[3].channel_index = channel_index
self.layer3[4].channel_index = channel_index
self.layer3[5].channel_index = channel_index
else:
self.layer4[0].channel_index = channel_index
self.layer4[1].channel_index = channel_index
# self.layer4[2].channel_index = channel_index
def get_scale_vector(self):
vector_list = list()
if self.g_id == 0:
vector_list.append(self.layer1[0].vec1)
vector_list.append(self.layer1[0].vec2)
vector_list.append(self.layer1[1].vec1)
vector_list.append(self.layer1[1].vec2)
vector_list.append(self.layer1[2].vec1)
vector_list.append(self.layer1[2].vec2)
elif self.g_id == 1:
vector_list.append(self.layer2[0].vec1)
vector_list.append(self.layer2[0].vec2)
vector_list.append(self.layer2[1].vec1)
vector_list.append(self.layer2[1].vec2)
vector_list.append(self.layer2[2].vec1)
vector_list.append(self.layer2[2].vec2)
vector_list.append(self.layer2[3].vec1)
vector_list.append(self.layer2[3].vec2)
elif self.g_id == 2:
vector_list.append(self.layer3[0].vec1)
vector_list.append(self.layer3[0].vec2)
vector_list.append(self.layer3[1].vec1)
vector_list.append(self.layer3[1].vec2)
vector_list.append(self.layer3[2].vec1)
vector_list.append(self.layer3[2].vec2)
vector_list.append(self.layer3[3].vec1)
vector_list.append(self.layer3[3].vec2)
vector_list.append(self.layer3[4].vec1)
vector_list.append(self.layer3[4].vec2)
vector_list.append(self.layer3[5].vec1)
vector_list.append(self.layer3[5].vec2)
else:
vector_list.append(self.layer4[0].vec1)
vector_list.append(self.layer4[0].vec2)
vector_list.append(self.layer4[1].vec1)
vector_list.append(self.layer4[1].vec2)
# vector_list.append(self.layer4[2].vec1)
# vector_list.append(self.layer4[2].vec2)
return vector_list
def set_scale_factor(self, sf):
if self.g_id == 0:
self.layer1[0].scale_factor1 = sf[0]
self.layer1[0].scale_factor2 = sf[1]
self.layer1[1].scale_factor1 = sf[2]
self.layer1[1].scale_factor2 = sf[3]
self.layer1[2].scale_factor1 = sf[4]
self.layer1[2].scale_factor2 = sf[5]
elif self.g_id == 1:
self.layer2[0].scale_factor1 = sf[0]
self.layer2[0].scale_factor2 = sf[1]
self.layer2[1].scale_factor1 = sf[2]
self.layer2[1].scale_factor2 = sf[3]
self.layer2[2].scale_factor1 = sf[4]
self.layer2[2].scale_factor2 = sf[5]
self.layer2[3].scale_factor1 = sf[6]
self.layer2[3].scale_factor2 = sf[7]
elif self.g_id == 2:
self.layer3[0].scale_factor1 = sf[0]
self.layer3[0].scale_factor2 = sf[1]
self.layer3[1].scale_factor1 = sf[2]
self.layer3[1].scale_factor2 = sf[3]
self.layer3[2].scale_factor1 = sf[4]
self.layer3[2].scale_factor2 = sf[5]
self.layer3[3].scale_factor1 = sf[6]
self.layer3[3].scale_factor2 = sf[7]
self.layer3[4].scale_factor1 = sf[8]
self.layer3[4].scale_factor2 = sf[9]
self.layer3[5].scale_factor1 = sf[10]
self.layer3[5].scale_factor2 = sf[11]
else:
self.layer4[0].scale_factor1 = sf[0]
self.layer4[0].scale_factor2 = sf[1]
self.layer4[1].scale_factor1 = sf[2]
self.layer4[1].scale_factor2 = sf[3]
# self.layer4[2].scale_factor = sf
def analyse_number(weight):
number_list = list()
group_list = list()
layer_list = list()
old_name = '1.0.'
old_group = '1'
for k, v in weight.items():
if 'layer' in k and'conv' in k:
current_name = k.split('layer')[1].split('conv')[0]
current_group = current_name.split('.')[0]
if current_name != old_name:
old_name = current_name
group_list.append(layer_list.copy())
layer_list = list()
if current_group != old_group:
old_group = current_group
number_list.append(group_list.copy())
group_list = list()
layer_list.append(v.size()[0])
layer_list.append(v.size()[1])
group_list.append(layer_list.copy())
number_list.append(group_list.copy())
return number_list
def NetworkNew(group_id):
model = ResNet(group_id, Bottleneck, [3, 4, 6, 3])
return model
| 12,489 | 37.549383 | 111 | py |
AutoPruner | AutoPruner-master/ResNet50/50/src_code/lmdbdataset.py | import cv2
import numpy as np
import torchvision.transforms as transforms
import lmdb
import msgpack
from torch.utils.data import Dataset
from PIL import Image
class lmdbDataset(Dataset):
def __init__(self, location, is_train):
self.env = lmdb.open(location, subdir=False, max_readers=1, readonly=True, lock=False, readahead=False,
meminit=False)
self.txn = self.env.begin(write=False)
self.length = self.txn.stat()['entries']
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# train data augment
if is_train:
self.transform = transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# test data augment
else:
self.transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
'''
for key,data in self.txn.cursor():
now_data = msgpack.loads(data,raw=False)
data_img = now_data[0]
label = now_data[1]
now_arr = np.frombuffer(data_img[b'data'],dtype=np.uint8)
print(now_arr)
image_content = cv2.imdecode(now_arr, cv2.IMREAD_COLOR)
print(image_content.shape)
#print(type(_))
break
'''
def __len__(self):
return self.length - 1
def __getitem__(self, index):
new_index = str(index).encode()
data = self.txn.get(new_index)
now_data = msgpack.loads(data, raw=False)
data_img = now_data[0]
label = now_data[1]
now_arr = np.frombuffer(data_img[b'data'], dtype=np.uint8)
image_content = cv2.imdecode(now_arr, cv2.IMREAD_COLOR)
image_content = cv2.cvtColor(image_content, cv2.COLOR_BGR2RGB)
image_content = Image.fromarray(image_content)
image_content = self.transform(image_content)
return image_content, label
if __name__ == '__main__':
temp_dataset = lmdbDataset('indoor67.lmdb', True)
print(temp_dataset[0])
#print(i)
#assert temp_dataset[i][0] is not None | 2,431 | 34.246377 | 111 | py |
AutoPruner | AutoPruner-master/ResNet50/50/compress_model/new_model.py | import torch.nn as nn
import torch
import numpy as np
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, number_list, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(number_list[1], number_list[0], kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(number_list[0])
self.conv2 = nn.Conv2d(number_list[3], number_list[2], kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(number_list[2])
self.conv3 = nn.Conv2d(number_list[5], number_list[4], kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(number_list[4])
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, group_id, block, layers, num_classes=1000):
folder_path = '../checkpoint/group_' + str(group_id)
old_weight = torch.load(folder_path+'/model.pth')
channel_index = torch.load(folder_path+'/channel_index.pth')
channel_number_list = analyse_number(old_weight)
for i in range(int(len(channel_index)/2)):
new_num = np.where(channel_index[2 * i] != 0)[0]
new_num_1 = int(new_num.shape[0])
new_num = np.where(channel_index[2 * i + 1] != 0)[0]
new_num_2 = int(new_num.shape[0])
channel_number_list[group_id][i][0] = new_num_1
channel_number_list[group_id][i][2] = new_num_2
channel_number_list[group_id][i][3] = new_num_1
channel_number_list[group_id][i][5] = new_num_2
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(channel_number_list[0], block, 64, layers[0])
self.layer2 = self._make_layer(channel_number_list[1], block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(channel_number_list[2], block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(channel_number_list[3], block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
my_weight = self.state_dict()
ci_count = 0
ci_1 = 0
ci_2 = 0
for k, v in my_weight.items():
name = 'module.' + k
if 'layer'+str(group_id+1) in name and 'downsample' not in name:
name_tmp = name.split('.')
if '1' in name_tmp[3]:
if 'conv' in name:
ci_1 = torch.cuda.LongTensor(np.where(channel_index[ci_count] != 0)[0])
ci_count += 1
my_weight[k] = old_weight[name][ci_1, :, :, :]
else:
my_weight[k] = old_weight[name][ci_1]
elif '2' in name_tmp[3]:
if 'conv' in name:
ci_2 = torch.cuda.LongTensor(np.where(channel_index[ci_count] != 0)[0])
ci_count += 1
my_weight[k] = old_weight[name][ci_2, :, :, :]
my_weight[k] = my_weight[k][:, ci_1, :, :]
else:
my_weight[k] = old_weight[name][ci_2]
elif '3' in name_tmp[3]:
if 'conv' in name:
my_weight[k] = old_weight[name][:, ci_2, :, :]
else:
my_weight[k] = old_weight[name]
else:
print('error!')
else:
my_weight[k] = old_weight[name]
self.load_state_dict(my_weight)
def _make_layer(self, number_list, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(number_list[0], stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(number_list[i]))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def analyse_number(weight):
number_list = list()
group_list = list()
layer_list = list()
old_name = '1.0.'
old_group = '1'
for k, v in weight.items():
if 'layer' in k and'conv' in k and 'cs' not in k:
current_name = k.split('layer')[1].split('conv')[0]
current_group = current_name.split('.')[0]
if current_name != old_name:
old_name = current_name
group_list.append(layer_list.copy())
layer_list = list()
if current_group != old_group:
old_group = current_group
number_list.append(group_list.copy())
group_list = list()
layer_list.append(v.size()[0])
layer_list.append(v.size()[1])
group_list.append(layer_list.copy())
number_list.append(group_list.copy())
return number_list
def NetworkNew(group_id):
model = ResNet(group_id, Bottleneck, [3, 4, 6, 3])
return model
class ResNet_test(nn.Module):
def __init__(self, model_path, block, layers, num_classes=1000):
old_weight = torch.load(model_path)
channel_number_list = analyse_number(old_weight)
self.inplanes = 64
super(ResNet_test, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(channel_number_list[0], block, 64, layers[0])
self.layer2 = self._make_layer(channel_number_list[1], block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(channel_number_list[2], block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(channel_number_list[3], block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
my_weight = self.state_dict()
for k, v in my_weight.items():
name = 'module.' + k
my_weight[k] = old_weight[name]
self.load_state_dict(my_weight)
def _make_layer(self, number_list, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(number_list[0], stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(number_list[i]))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def NetworkNew_test(model_path):
model = ResNet_test(model_path, Bottleneck, [3, 4, 6, 3])
return model
| 8,768 | 35.235537 | 95 | py |
Subsets and Splits