version
stringclasses 25
values | code
stringlengths 75
178k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 9
78
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.2 | import torch
from terminaltables import AsciiTable
from copy import deepcopy
import numpy as np
import torch.nn.functional as F
def get_sr_flag(epoch, sr):
# return epoch >= 5 and sr
return sr
def parse_module_defs(module_defs):
CBL_idx = []
Conv_idx = []
ignore_idx = set()
for i, module_def in enumerate(module_defs):
if module_def['type'] == 'convolutional':
if module_def['batch_normalize'] == '1':
CBL_idx.append(i)
else:
Conv_idx.append(i)
if module_defs[i+1]['type'] == 'maxpool' and module_defs[i+2]['type'] == 'route':
#spp前一个CBL不剪 区分tiny
ignore_idx.add(i)
if module_defs[i+1]['type'] == 'route' and 'groups' in module_defs[i+1]:
ignore_idx.add(i)
elif module_def['type'] == 'shortcut':
ignore_idx.add(i-1)
identity_idx = (i + int(module_def['from']))
if module_defs[identity_idx]['type'] == 'convolutional':
ignore_idx.add(identity_idx)
elif module_defs[identity_idx]['type'] == 'shortcut':
ignore_idx.add(identity_idx - 1)
elif module_def['type'] == 'upsample':
#上采样层前的卷积层不裁剪
ignore_idx.add(i - 1)
prune_idx = [idx for idx in CBL_idx if idx not in ignore_idx]
return CBL_idx, Conv_idx, prune_idx
def parse_module_defs2(module_defs):
CBL_idx = []
Conv_idx = []
shortcut_idx=dict()
shortcut_all=set()
ignore_idx = set()
for i, module_def in enumerate(module_defs):
if module_def['type'] == 'convolutional':
if module_def['batch_normalize'] == '1':
CBL_idx.append(i)
else:
Conv_idx.append(i)
if module_defs[i+1]['type'] == 'maxpool' and module_defs[i+2]['type'] == 'route':
#spp前一个CBL不剪 区分spp和tiny
ignore_idx.add(i)
if module_defs[i+1]['type'] == 'route' and 'groups' in module_defs[i+1]:
ignore_idx.add(i)
elif module_def['type'] == 'upsample':
#上采样层前的卷积层不裁剪
ignore_idx.add(i - 1)
elif module_def['type'] == 'shortcut':
identity_idx = (i + int(module_def['from']))
if module_defs[identity_idx]['type'] == 'convolutional':
#ignore_idx.add(identity_idx)
shortcut_idx[i-1]=identity_idx
shortcut_all.add(identity_idx)
elif module_defs[identity_idx]['type'] == 'shortcut':
#ignore_idx.add(identity_idx - 1)
shortcut_idx[i-1]=identity_idx-1
shortcut_all.add(identity_idx-1)
shortcut_all.add(i-1)
prune_idx = [idx for idx in CBL_idx if idx not in ignore_idx]
return CBL_idx, Conv_idx, prune_idx,shortcut_idx,shortcut_all
def parse_module_defs4(module_defs):
CBL_idx = []
Conv_idx = []
shortcut_idx= []
for i, module_def in enumerate(module_defs):
if module_def['type'] == 'convolutional':
if module_def['batch_normalize'] == '1':
CBL_idx.append(i)
else:
Conv_idx.append(i)
elif module_def['type'] == 'shortcut':
shortcut_idx.append(i-1)
return CBL_idx, Conv_idx, shortcut_idx
def gather_bn_weights(module_list, prune_idx):
size_list = [module_list[idx][1].weight.data.shape[0] for idx in prune_idx]
bn_weights = torch.zeros(sum(size_list))
index = 0
for idx, size in zip(prune_idx, size_list):
bn_weights[index:(index + size)] = module_list[idx][1].weight.data.abs().clone()
index += size
return bn_weights
def write_cfg(cfg_file, module_defs):
with open(cfg_file, 'w') as f:
for module_def in module_defs:
f.write(f"[{module_def['type']}]\n")
for key, value in module_def.items():
if key == 'batch_normalize' and value == 0:
continue
if key != 'type':
if key == 'anchors':
value = ', '.join(','.join(str(int(i)) for i in j) for j in value)
f.write(f"{key}={value}\n")
f.write("\n")
return cfg_file
class BNOptimizer():
@staticmethod
def updateBN(sr_flag, module_list, s, prune_idx, epoch, idx2mask=None, opt=None):
if sr_flag:
# s = s if epoch <= opt.epochs * 0.5 else s * 0.01
for idx in prune_idx:
# Squential(Conv, BN, Lrelu)
bn_module = module_list[idx][1]
bn_module.weight.grad.data.add_(s * torch.sign(bn_module.weight.data)) # L1
if idx2mask:
for idx in idx2mask:
bn_module = module_list[idx][1]
#bn_module.weight.grad.data.add_(0.5 * s * torch.sign(bn_module.weight.data) * (1 - idx2mask[idx].cuda()))
bn_module.weight.grad.data.sub_(0.99 * s * torch.sign(bn_module.weight.data) * idx2mask[idx].cuda())
def obtain_quantiles(bn_weights, num_quantile=5):
sorted_bn_weights, i = torch.sort(bn_weights)
total = sorted_bn_weights.shape[0]
quantiles = sorted_bn_weights.tolist()[-1::-total//num_quantile][::-1]
print("\nBN weights quantile:")
quantile_table = [
[f'{i}/{num_quantile}' for i in range(1, num_quantile+1)],
["%.3f" % quantile for quantile in quantiles]
]
print(AsciiTable(quantile_table).table)
return quantiles
def get_input_mask(module_defs, idx, CBLidx2mask):
if idx == 0:
return np.ones(3)
if module_defs[idx - 1]['type'] == 'convolutional':
return CBLidx2mask[idx - 1]
elif module_defs[idx - 1]['type'] == 'shortcut':
return CBLidx2mask[idx - 2]
elif module_defs[idx - 1]['type'] == 'route':
route_in_idxs = []
for layer_i in module_defs[idx - 1]['layers'].split(","):
if int(layer_i) < 0:
route_in_idxs.append(idx - 1 + int(layer_i))
else:
route_in_idxs.append(int(layer_i))
if len(route_in_idxs) == 1:
mask = CBLidx2mask[route_in_idxs[0]]
if 'groups' in module_defs[idx - 1]:
return mask[(mask.shape[0]//2):]
return mask
elif len(route_in_idxs) == 2:
# return np.concatenate([CBLidx2mask[in_idx - 1] for in_idx in route_in_idxs])
if module_defs[route_in_idxs[0]]['type'] == 'upsample':
mask1 = CBLidx2mask[route_in_idxs[0] - 1]
elif module_defs[route_in_idxs[0]]['type'] == 'convolutional':
mask1 = CBLidx2mask[route_in_idxs[0]]
if module_defs[route_in_idxs[1]]['type'] == 'convolutional':
mask2 = CBLidx2mask[route_in_idxs[1]]
else:
mask2 = CBLidx2mask[route_in_idxs[1] - 1]
return np.concatenate([mask1, mask2])
elif len(route_in_idxs) == 4:
#spp结构中最后一个route
mask = CBLidx2mask[route_in_idxs[-1]]
return np.concatenate([mask, mask, mask, mask])
else:
print("Something wrong with route module!")
raise Exception
elif module_defs[idx - 1]['type'] == 'maxpool': #tiny
if module_defs[idx - 2]['type'] == 'route': #v4 tiny
return get_input_mask(module_defs, idx - 1, CBLidx2mask)
else:
return CBLidx2mask[idx - 2] #v3 tiny
def init_weights_from_loose_model(compact_model, loose_model, CBL_idx, Conv_idx, CBLidx2mask):
for idx in CBL_idx:
compact_CBL = compact_model.module_list[idx]
loose_CBL = loose_model.module_list[idx]
out_channel_idx = np.argwhere(CBLidx2mask[idx])[:, 0].tolist()
compact_bn, loose_bn = compact_CBL[1], loose_CBL[1]
compact_bn.weight.data = loose_bn.weight.data[out_channel_idx].clone()
compact_bn.bias.data = loose_bn.bias.data[out_channel_idx].clone()
compact_bn.running_mean.data = loose_bn.running_mean.data[out_channel_idx].clone()
compact_bn.running_var.data = loose_bn.running_var.data[out_channel_idx].clone()
input_mask = get_input_mask(loose_model.module_defs, idx, CBLidx2mask)
in_channel_idx = np.argwhere(input_mask)[:, 0].tolist()
compact_conv, loose_conv = compact_CBL[0], loose_CBL[0]
tmp = loose_conv.weight.data[:, in_channel_idx, :, :].clone()
compact_conv.weight.data = tmp[out_channel_idx, :, :, :].clone()
for idx in Conv_idx:
compact_conv = compact_model.module_list[idx][0]
loose_conv = loose_model.module_list[idx][0]
input_mask = get_input_mask(loose_model.module_defs, idx, CBLidx2mask)
in_channel_idx = np.argwhere(input_mask)[:, 0].tolist()
compact_conv.weight.data = loose_conv.weight.data[:, in_channel_idx, :, :].clone()
compact_conv.bias.data = loose_conv.bias.data.clone()
def prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask):
pruned_model = deepcopy(model)
for idx in prune_idx:
mask = torch.from_numpy(CBLidx2mask[idx]).cuda()
bn_module = pruned_model.module_list[idx][1]
bn_module.weight.data.mul_(mask)
activation = F.leaky_relu((1 - mask) * bn_module.bias.data, 0.1)
# 两个上采样层前的卷积层
next_idx_list = [idx + 1]
if idx == 79:
next_idx_list.append(84)
elif idx == 91:
next_idx_list.append(96)
for next_idx in next_idx_list:
next_conv = pruned_model.module_list[next_idx][0]
conv_sum = next_conv.weight.data.sum(dim=(2, 3))
offset = conv_sum.matmul(activation.reshape(-1, 1)).reshape(-1)
if next_idx in CBL_idx:
next_bn = pruned_model.module_list[next_idx][1]
next_bn.running_mean.data.sub_(offset)
else:
next_conv.bias.data.add_(offset)
bn_module.bias.data.mul_(mask)
return pruned_model
def obtain_bn_mask(bn_module, thre):
thre = thre.cuda()
mask = bn_module.weight.data.abs().ge(thre).float()
return mask
def update_activation(i, pruned_model, activation, CBL_idx):
next_idx = i + 1
if pruned_model.module_defs[next_idx]['type'] == 'convolutional':
next_conv = pruned_model.module_list[next_idx][0]
conv_sum = next_conv.weight.data.sum(dim=(2, 3))
offset = conv_sum.matmul(activation.reshape(-1, 1)).reshape(-1)
if next_idx in CBL_idx:
next_bn = pruned_model.module_list[next_idx][1]
next_bn.running_mean.data.sub_(offset)
else:
next_conv.bias.data.add_(offset)
def prune_model_keep_size2(model, prune_idx, CBL_idx, CBLidx2mask):
pruned_model = deepcopy(model)
activations = []
for i, model_def in enumerate(model.module_defs):
if model_def['type'] == 'convolutional':
activation = torch.zeros(int(model_def['filters'])).cuda()
if i in prune_idx:
mask = torch.from_numpy(CBLidx2mask[i]).cuda()
bn_module = pruned_model.module_list[i][1]
bn_module.weight.data.mul_(mask)
if model_def['activation'] == 'leaky':
activation = F.leaky_relu((1 - mask) * bn_module.bias.data, 0.1)
elif model_def['activation'] == 'mish':
activation = (1 - mask) * bn_module.bias.data.mul(F.softplus(bn_module.bias.data).tanh())
update_activation(i, pruned_model, activation, CBL_idx)
bn_module.bias.data.mul_(mask)
activations.append(activation)
elif model_def['type'] == 'shortcut':
actv1 = activations[i - 1]
from_layer = int(model_def['from'])
actv2 = activations[i + from_layer]
activation = actv1 + actv2
update_activation(i, pruned_model, activation, CBL_idx)
activations.append(activation)
elif model_def['type'] == 'route':
#spp不参与剪枝,其中的route不用更新,仅占位
from_layers = [int(s) for s in model_def['layers'].split(',')]
activation = None
if len(from_layers) == 1:
activation = activations[i + from_layers[0] if from_layers[0] < 0 else from_layers[0]]
if 'groups' in model_def:
activation = activation[(activation.shape[0]//2):]
update_activation(i, pruned_model, activation, CBL_idx)
elif len(from_layers) == 2:
actv1 = activations[i + from_layers[0]]
actv2 = activations[i + from_layers[1] if from_layers[1] < 0 else from_layers[1]]
activation = torch.cat((actv1, actv2))
update_activation(i, pruned_model, activation, CBL_idx)
activations.append(activation)
elif model_def['type'] == 'upsample':
# activation = torch.zeros(int(model.module_defs[i - 1]['filters'])).cuda()
activations.append(activations[i-1])
elif model_def['type'] == 'yolo':
activations.append(None)
elif model_def['type'] == 'maxpool': #区分spp和tiny
if model.module_defs[i + 1]['type'] == 'route':
activations.append(None)
else:
activation = activations[i-1]
update_activation(i, pruned_model, activation, CBL_idx)
activations.append(activation)
return pruned_model
def get_mask(model, prune_idx, shortcut_idx):
sort_prune_idx=[idx for idx in prune_idx if idx not in shortcut_idx]
bn_weights = gather_bn_weights(model.module_list, sort_prune_idx)
sorted_bn = torch.sort(bn_weights)[0]
highest_thre = []
for idx in sort_prune_idx:
#.item()可以得到张量里的元素值
highest_thre.append(model.module_list[idx][1].weight.data.abs().max().item())
highest_thre = min(highest_thre)
filters_mask = []
idx_new=dict()
#CBL_idx存储的是所有带BN的卷积层(YOLO层的前一层卷积层是不带BN的)
for idx in prune_idx:
bn_module = model.module_list[idx][1]
if idx not in shortcut_idx:
mask = obtain_bn_mask(bn_module, torch.tensor(highest_thre)).cpu()
idx_new[idx]=mask
else:
mask=idx_new[shortcut_idx[idx]]
idx_new[idx]=mask
filters_mask.append(mask.clone())
prune2mask = {idx: mask for idx, mask in zip(prune_idx, filters_mask)}
return prune2mask
def get_mask2(model, prune_idx, percent):
bn_weights = gather_bn_weights(model.module_list, prune_idx)
sorted_bn = torch.sort(bn_weights)[0]
thre_index = int(len(sorted_bn) * percent)
thre = sorted_bn[thre_index]
filters_mask = []
for idx in prune_idx:
bn_module = model.module_list[idx][1]
mask = obtain_bn_mask(bn_module, thre).cpu()
filters_mask.append(mask.clone())
prune2mask = {idx: mask for idx, mask in zip(prune_idx, filters_mask)}
return prune2mask
def merge_mask(model, CBLidx2mask, CBLidx2filters):
for i in range(len(model.module_defs) - 1, -1, -1):
mtype = model.module_defs[i]['type']
if mtype == 'shortcut':
if model.module_defs[i]['is_access']:
continue
Merge_masks = []
layer_i = i
while mtype == 'shortcut':
model.module_defs[layer_i]['is_access'] = True
if model.module_defs[layer_i-1]['type'] == 'convolutional':
bn = int(model.module_defs[layer_i-1]['batch_normalize'])
if bn:
Merge_masks.append(CBLidx2mask[layer_i-1].unsqueeze(0))
layer_i = int(model.module_defs[layer_i]['from'])+layer_i
mtype = model.module_defs[layer_i]['type']
if mtype == 'convolutional':
bn = int(model.module_defs[layer_i]['batch_normalize'])
if bn:
Merge_masks.append(CBLidx2mask[layer_i].unsqueeze(0))
if len(Merge_masks) > 1:
Merge_masks = torch.cat(Merge_masks, 0)
merge_mask = (torch.sum(Merge_masks, dim=0) > 0).float()
else:
merge_mask = Merge_masks[0].float()
layer_i = i
mtype = 'shortcut'
while mtype == 'shortcut':
if model.module_defs[layer_i-1]['type'] == 'convolutional':
bn = int(model.module_defs[layer_i-1]['batch_normalize'])
if bn:
CBLidx2mask[layer_i-1] = merge_mask
CBLidx2filters[layer_i-1] = int(torch.sum(merge_mask).item())
layer_i = int(model.module_defs[layer_i]['from'])+layer_i
mtype = model.module_defs[layer_i]['type']
if mtype == 'convolutional':
bn = int(model.module_defs[layer_i]['batch_normalize'])
if bn:
CBLidx2mask[layer_i] = merge_mask
CBLidx2filters[layer_i] = int(torch.sum(merge_mask).item())
| [
"torch.cat",
"torch.nn.functional.softplus",
"torch.sign",
"torch.from_numpy",
"torch.sum",
"torch.tensor",
"torch.sort",
"torch.nn.functional.leaky_relu"
] | 1.2 | qianyuqian-DeepLearning/YoloV3-prune-layer | 5b4a4d6346d980b36235eae5335003f2c43a28cf |
0.4 | # -*- coding: utf-8 -*-
"""
Module to handle getting data loading classes and helper functions.
"""
import json
import io
import torch
import numpy as np
from scipy.sparse import *
from collections import Counter, defaultdict
from torch.utils.data import Dataset
from .bert_utils import *
from .eval_utils import normalize_text
from . import constants as Constants
from .timer import Timer
################################################################################
# Dataset Prep #
################################################################################
def prepare_datasets(config):
train_set = None if config['trainset'] is None else QADataset(config['trainset'], config)
dev_set = None if config['devset'] is None else QADataset(config['devset'], config)
test_set = None if config['testset'] is None else QADataset(config['testset'], config)
return {'train': train_set, 'dev': dev_set, 'test': test_set}
################################################################################
# Dataset Classes #
################################################################################
class QADataset(Dataset):
"""QA dataset."""
def __init__(self, filename, config):
timer = Timer('Load %s' % filename)
self.filename = filename
self.config = config
paragraph_lens = []
question_lens = []
turn_num = []
self.paragraphs = []
self.vocab = Counter()
dataset = read_json(filename)
for paragraph in dataset['data']: # Paragraph level
paragraph_lens.append(len(paragraph['annotated_context']['word']))
turn_num.append(len(paragraph['qas']))
# Prepare paragraphs
history = []
para = {'turns': []}
for qas in paragraph['qas']: # Turn level
# Build vocab
for w in qas['annotated_question']['word'] \
+ paragraph['annotated_context']['word'] \
+ qas['annotated_answer']['word']:
self.vocab[w.lower()] += 1
temp = []
marker = []
n_history = len(history) if self.config['n_history'] < 0 else min(self.config['n_history'], len(history))
if n_history > 0:
count = sum([not config['no_pre_question'], not config['no_pre_answer']]) * len(history[-n_history:])
for q, a in history[-n_history:]:
if not config['no_pre_question']:
temp.extend(q)
marker.extend([count] * len(q))
count -= 1
if not config['no_pre_answer']:
temp.extend(a)
marker.extend([count] * len(a))
count -= 1
temp.extend(qas['annotated_question']['word'])
marker.extend([0] * len(qas['annotated_question']['word']))
history.append((qas['annotated_question']['word'], qas['annotated_answer']['word']))
qas['annotated_question']['word'] = temp
qas['annotated_question']['marker'] = marker
question_lens.append(len(qas['annotated_question']['word']))
# Prepare a question-answer pair
question = qas['annotated_question']
if config['dataset_name'] == 'coqa':
answers = [qas['answer']]
if 'additional_answers' in qas:
answers = answers + qas['additional_answers']
else:
answers = qas['additional_answers']
normalized_answer = normalize_text(qas['answer'])
sample = {'turn_id': qas['turn_id'],
'question': question,
'answers': answers,
'targets': qas['answer_span'],
'span_mask': 0}
if config['dataset_name'] == 'coqa':
if Constants.CoQA_UNK_ANSWER == normalized_answer:
sample['answer_type_targets'] = Constants.CoQA_UNK_ANSWER_LABEL
elif Constants.CoQA_YES_ANSWER == normalized_answer:
sample['answer_type_targets'] = Constants.CoQA_ANSWER_YES_LABEL
elif Constants.CoQA_NO_ANSWER == normalized_answer:
sample['answer_type_targets'] = Constants.CoQA_ANSWER_NO_LABEL
else:
sample['answer_type_targets'] = Constants.CoQA_ANSWER_SPAN_LABEL
sample['span_mask'] = 1
else:
if Constants.QuAC_UNK_ANSWER == normalized_answer:
sample['unk_answer_targets'] = 1
else:
sample['unk_answer_targets'] = 0
sample['span_mask'] = 1
if qas['yesno'] == Constants.QuAC_YESNO_YES:
sample['yesno_targets'] = Constants.QuAC_YESNO_YES_LABEL
elif qas['yesno'] == Constants.QuAC_YESNO_NO:
sample['yesno_targets'] = Constants.QuAC_YESNO_NO_LABEL
else:
sample['yesno_targets'] = Constants.QuAC_YESNO_OTHER_LABEL
if qas['followup'] == Constants.QuAC_FOLLOWUP_YES:
sample['followup_targets'] = Constants.QuAC_FOLLOWUP_YES_LABEL
elif qas['followup'] == Constants.QuAC_FOLLOWUP_NO:
sample['followup_targets'] = Constants.QuAC_FOLLOWUP_NO_LABEL
else:
sample['followup_targets'] = Constants.QuAC_FOLLOWUP_OTHER_LABEL
para['id'] = paragraph['id']
para['evidence'] = paragraph['annotated_context']
if self.config['predict_raw_text']:
para['raw_evidence'] = paragraph['context']
para['turns'].append(sample)
self.paragraphs.append(para)
print('Load {} paragraphs.'.format(len(self.paragraphs)))
print('Turn num: avg = %.1f, min = %d, max = %d' % (np.average(turn_num), np.min(turn_num), np.max(turn_num)))
print('Paragraph length: avg = %.1f, min = %d, max = %d' % (np.average(paragraph_lens), np.min(paragraph_lens), np.max(paragraph_lens)))
print('Question length: avg = %.1f, min = %d, max = %d' % (np.average(question_lens), np.min(question_lens), np.max(question_lens)))
timer.finish()
def __len__(self):
return len(self.paragraphs)
def __getitem__(self, idx):
return self.paragraphs[idx]
################################################################################
# Read & Write Helper Functions #
################################################################################
def write_json_to_file(json_object, json_file, mode='w', encoding='utf-8'):
with io.open(json_file, mode, encoding=encoding) as outfile:
json.dump(json_object, outfile, indent=4, sort_keys=True, ensure_ascii=False)
def log_json(data, filename, mode='w', encoding='utf-8'):
with io.open(filename, mode, encoding=encoding) as outfile:
outfile.write(json.dumps(data, indent=4, ensure_ascii=False))
def get_file_contents(filename, encoding='utf-8'):
with io.open(filename, encoding=encoding) as f:
content = f.read()
f.close()
return content
def read_json(filename, encoding='utf-8'):
contents = get_file_contents(filename, encoding=encoding)
return json.loads(contents)
def get_processed_file_contents(file_path, encoding="utf-8"):
contents = get_file_contents(file_path, encoding=encoding)
return contents.strip()
################################################################################
# DataLoader Helper Functions #
################################################################################
def sanitize_input(sample_batch, config, vocab, feature_dict, bert_tokenizer, training=True):
"""
Reformats sample_batch for easy vectorization.
Args:
sample_batch: the sampled batch, yet to be sanitized or vectorized.
vocab: word embedding dictionary.
feature_dict: the features we want to concatenate to our embeddings.
train: train or test?
"""
sanitized_batch = defaultdict(list)
batch_graphs = []
for paragraph in sample_batch:
if 'id' in paragraph:
sanitized_batch['id'].append(paragraph['id'])
evidence = paragraph['evidence']['word']
processed_e = [vocab[w.lower()] if w.lower() in vocab else vocab[Constants._UNK_TOKEN] for w in evidence]
sanitized_batch['evidence'].append(processed_e)
if config.get('static_graph', None):
batch_graphs.append(paragraph['evidence']['graph'])
if config['f_tf']:
sanitized_batch['evidence_tf'].append(compute_tf(evidence))
if config['predict_raw_text']:
sanitized_batch['raw_evidence_text'].append(paragraph['raw_evidence'])
sanitized_batch['offsets'].append(paragraph['evidence']['offsets'])
else:
sanitized_batch['evidence_text'].append(evidence)
para_turn_ids = []
para_ques = []
para_ques_marker = []
para_bert_ques_features = []
para_features = []
para_targets = []
para_span_mask = []
if config['dataset_name'] == 'coqa':
para_answer_type_targets = []
else:
para_unk_answer_targets = []
para_yesno_targets = []
para_followup_targets = []
para_answers = []
for ex in paragraph['turns']:
para_turn_ids.append(ex['turn_id'])
question = ex['question']['word']
processed_q = [vocab[w.lower()] if w.lower() in vocab else vocab[Constants._UNK_TOKEN] for w in question]
para_ques.append(processed_q)
para_ques_marker.append(ex['question']['marker'])
if config['use_bert']:
bert_ques_features = convert_text_to_bert_features(question, bert_tokenizer, config['bert_max_seq_len'], config['bert_doc_stride'])
para_bert_ques_features.append(bert_ques_features)
# featurize evidence document:
para_features.append(featurize(ex['question'], paragraph['evidence'], feature_dict))
para_targets.append(ex['targets'])
para_span_mask.append(ex['span_mask'])
para_answers.append(ex['answers'])
if config['dataset_name'] == 'coqa':
para_answer_type_targets.append(ex['answer_type_targets'])
else:
para_unk_answer_targets.append(ex['unk_answer_targets'])
para_yesno_targets.append(ex['yesno_targets'])
para_followup_targets.append(ex['followup_targets'])
sanitized_batch['question'].append(para_ques)
sanitized_batch['question_marker'].append(para_ques_marker)
if config['use_bert']:
bert_evidence_features = convert_text_to_bert_features(evidence, bert_tokenizer, config['bert_max_seq_len'], config['bert_doc_stride'])
sanitized_batch['bert_evidence_features'].append(bert_evidence_features)
sanitized_batch['bert_question_features'].append(para_bert_ques_features)
sanitized_batch['turn_ids'].append(para_turn_ids)
sanitized_batch['features'].append(para_features)
sanitized_batch['targets'].append(para_targets)
sanitized_batch['span_mask'].append(para_span_mask)
sanitized_batch['answers'].append(para_answers)
if config['dataset_name'] == 'coqa':
sanitized_batch['answer_type_targets'].append(para_answer_type_targets)
else:
sanitized_batch['unk_answer_targets'].append(para_unk_answer_targets)
sanitized_batch['yesno_targets'].append(para_yesno_targets)
sanitized_batch['followup_targets'].append(para_followup_targets)
if config.get('static_graph', None):
batch_graphs = cons_batch_graph(batch_graphs)
sanitized_batch['evidence_graphs'] = vectorize_batch_graph(batch_graphs)
return sanitized_batch
def vectorize_input(batch, config, bert_model, training=True, device=None):
"""
- Vectorize question and question mask
- Vectorize evidence documents, mask and features
- Vectorize target representations
"""
# Check there is at least one valid example in batch (containing targets):
if not batch:
return None
# Relevant parameters:
batch_size = len(batch['question'])
# Initialize all relevant parameters to None:
targets = None
# Part 1: Question Words
# Batch questions ( sum_bs(n_sect), len_q)
max_q_len = max([len(q) for para_q in batch['question'] for q in para_q])
max_turn_len = max([len(para_q) for para_q in batch['question']])
xq = torch.LongTensor(batch_size, max_turn_len, max_q_len).fill_(0)
xq_len = torch.LongTensor(batch_size, max_turn_len).fill_(1)
num_turn = torch.LongTensor(batch_size).fill_(0)
if config['use_ques_marker']:
xq_f = torch.LongTensor(batch_size, max_turn_len, max_q_len).fill_(0)
for i, para_q in enumerate(batch['question']):
num_turn[i] = len(para_q)
for j, q in enumerate(para_q):
xq[i, j, :len(q)].copy_(torch.LongTensor(q))
if config['use_ques_marker']:
xq_f[i, j, :len(q)].copy_(torch.LongTensor(batch['question_marker'][i][j]))
xq_len[i, j] = len(q)
# Part 2: Document Words
max_d_len = max([len(d) for d in batch['evidence']])
xd = torch.LongTensor(batch_size, max_d_len).fill_(0)
xd_len = torch.LongTensor(batch_size).fill_(1)
# 2(a): fill up DrQA section variables
if config['f_tf']:
xd_tf = torch.Tensor(batch_size, max_d_len).fill_(0)
for i, d in enumerate(batch['evidence_tf']):
xd_tf[i, :len(d)].copy_(torch.Tensor(d))
xd_f = {}
for i, d in enumerate(batch['evidence']):
xd[i, :len(d)].copy_(torch.LongTensor(d))
xd_len[i] = len(d)
# Context features
for j, para_features in enumerate(batch['features'][i]):
for feat_key, feat_val in para_features.items():
if not feat_key in xd_f:
xd_f[feat_key] = torch.zeros(batch_size, max_turn_len, max_d_len, dtype=torch.long)
xd_f[feat_key][i, j, :len(d)].copy_(feat_val)
# Part 3: Target representations
targets = torch.LongTensor(batch_size, max_turn_len, 2).fill_(-100)
for i, _target in enumerate(batch['targets']):
for j in range(len(_target)):
targets[i, j, 0] = _target[j][0]
targets[i, j, 1] = _target[j][1]
# Part 4: UNK/YES/NO answer masks
span_mask = torch.Tensor(batch_size, max_turn_len).fill_(0)
for i, _span_mask in enumerate(batch['span_mask']):
for j in range(len(_span_mask)):
span_mask[i, j] = _span_mask[j]
if config['dataset_name'] == 'coqa':
answer_type_targets = torch.LongTensor(batch_size, max_turn_len).fill_(-100)
for i, _unk_yes_no_target in enumerate(batch['answer_type_targets']):
for j in range(len(_unk_yes_no_target)):
answer_type_targets[i, j] = _unk_yes_no_target[j]
else:
unk_answer_targets = torch.Tensor(batch_size, max_turn_len).fill_(-100)
yesno_targets = torch.LongTensor(batch_size, max_turn_len).fill_(-100)
followup_targets = torch.LongTensor(batch_size, max_turn_len).fill_(-100)
for i, _unk_answer_target in enumerate(batch['unk_answer_targets']):
for j in range(len(_unk_answer_target)):
unk_answer_targets[i, j] = _unk_answer_target[j]
yesno_targets[i, j] = batch['yesno_targets'][i][j]
followup_targets[i, j] = batch['followup_targets'][i][j]
# Part 5: Previous answer markers
if config['n_history'] > 0:
if config['answer_marker_embed_dim'] != 0:
xd_answer_marker = torch.LongTensor(batch_size, max_turn_len, max_d_len, config['n_history']).fill_(0)
for i, _target in enumerate(batch['targets']):
for j in range(len(_target)):
if _target[j][0] > 0 and _target[j][1] > 0:
for prev_answer_distance in range(config['n_history']):
turn_id = j + prev_answer_distance + 1
if turn_id < len(_target):
mark_prev_answer(_target[j][0], _target[j][1], xd_answer_marker[i, turn_id, :, prev_answer_distance], prev_answer_distance)
# Part 6: Extract features from pretrained BERT models
if config['use_bert']:
with torch.set_grad_enabled(False):
# Question words
max_bert_q_num_chunks = max([len(para_bert_q) for ex_bert_q in batch['bert_question_features'] for para_bert_q in ex_bert_q])
max_bert_q_len = max([len(bert_q.input_ids) for ex_bert_q in batch['bert_question_features'] for para_bert_q in ex_bert_q for bert_q in para_bert_q])
bert_xq = torch.LongTensor(batch_size, max_turn_len, max_bert_q_num_chunks, max_bert_q_len).fill_(0)
bert_xq_mask = torch.LongTensor(batch_size, max_turn_len, max_bert_q_num_chunks, max_bert_q_len).fill_(0)
for i, ex_bert_q in enumerate(batch['bert_question_features']):
for t, para_bert_q in enumerate(ex_bert_q):
for j, bert_q in enumerate(para_bert_q):
bert_xq[i, t, j, :len(bert_q.input_ids)].copy_(torch.LongTensor(bert_q.input_ids))
bert_xq_mask[i, t, j, :len(bert_q.input_mask)].copy_(torch.LongTensor(bert_q.input_mask))
if device:
bert_xq = bert_xq.to(device)
bert_xq_mask = bert_xq_mask.to(device)
layer_indexes = list(range(config['bert_layer_indexes'][0], config['bert_layer_indexes'][1]))
all_encoder_layers, _ = bert_model(bert_xq.view(-1, bert_xq.size(-1)), token_type_ids=None, attention_mask=bert_xq_mask.view(-1, bert_xq_mask.size(-1)))
torch.cuda.empty_cache()
all_encoder_layers = torch.stack([x.view(bert_xq.shape + (-1,)) for x in all_encoder_layers], 0).detach()
all_encoder_layers = all_encoder_layers[layer_indexes]
bert_xq_f = extract_bert_ques_hidden_states(all_encoder_layers, max_q_len, batch['bert_question_features'], weighted_avg=config['use_bert_weight'])
torch.cuda.empty_cache()
# Document words
max_bert_d_num_chunks = max([len(ex_bert_d) for ex_bert_d in batch['bert_evidence_features']])
max_bert_d_len = max([len(bert_d.input_ids) for ex_bert_d in batch['bert_evidence_features'] for bert_d in ex_bert_d])
bert_xd = torch.LongTensor(batch_size, max_bert_d_num_chunks, max_bert_d_len).fill_(0)
bert_xd_mask = torch.LongTensor(batch_size, max_bert_d_num_chunks, max_bert_d_len).fill_(0)
for i, ex_bert_d in enumerate(batch['bert_evidence_features']): # Example level
for j, bert_d in enumerate(ex_bert_d): # Chunk level
bert_xd[i, j, :len(bert_d.input_ids)].copy_(torch.LongTensor(bert_d.input_ids))
bert_xd_mask[i, j, :len(bert_d.input_mask)].copy_(torch.LongTensor(bert_d.input_mask))
if device:
bert_xd = bert_xd.to(device)
bert_xd_mask = bert_xd_mask.to(device)
all_encoder_layers, _ = bert_model(bert_xd.view(-1, bert_xd.size(-1)), token_type_ids=None, attention_mask=bert_xd_mask.view(-1, bert_xd_mask.size(-1)))
torch.cuda.empty_cache()
all_encoder_layers = torch.stack([x.view(bert_xd.shape + (-1,)) for x in all_encoder_layers], 0).detach()
all_encoder_layers = all_encoder_layers[layer_indexes]
bert_xd_f = extract_bert_ctx_hidden_states(all_encoder_layers, max_d_len, batch['bert_evidence_features'], weighted_avg=config['use_bert_weight'])
torch.cuda.empty_cache()
with torch.set_grad_enabled(training):
example = {'batch_size': batch_size,
'answers': batch['answers'],
'xq': xq.to(device) if device else xq,
'xq_len': xq_len.to(device) if device else xq_len,
'xd': xd.to(device) if device else xd,
'xd_len': xd_len.to(device) if device else xd_len,
'num_turn': num_turn.to(device) if device else num_turn,
'targets': targets.to(device) if device else targets,
'span_mask': span_mask.to(device) if device else span_mask}
if config.get('static_graph', None):
example['xd_graphs'] = batch['evidence_graphs']
if config['f_tf']:
example['xd_tf'] = xd_tf.to(device) if device else xd_tf
if config['dataset_name'] == 'coqa':
example['answer_type_targets'] = answer_type_targets.to(device) if device else answer_type_targets
else:
example['unk_answer_targets'] = unk_answer_targets.to(device) if device else unk_answer_targets
example['yesno_targets'] = yesno_targets.to(device) if device else yesno_targets
example['followup_targets'] = followup_targets.to(device) if device else followup_targets
if config['predict_raw_text']:
example['raw_evidence_text'] = batch['raw_evidence_text']
example['offsets'] = batch['offsets']
else:
example['evidence_text'] = batch['evidence_text']
if config['use_bert']:
example['bert_xq_f'] = bert_xq_f
example['bert_xd_f'] = bert_xd_f
if device:
for feat_key in xd_f:
xd_f[feat_key] = xd_f[feat_key].to(device)
example['xd_f'] = xd_f
if config['n_history'] > 0:
if config['answer_marker_embed_dim'] != 0:
example['xd_answer_marker'] = xd_answer_marker.to(device) if device else xd_answer_marker
if config['use_ques_marker']:
example['xq_f'] = xq_f.to(device) if device else xq_f
return example
def featurize(question, document, feature_dict):
doc_len = len(document['word'])
features = {}
if 'f_qem' in feature_dict:
features['f_qem'] = torch.zeros(doc_len, dtype=torch.long)
if 'f_pos' in feature_dict:
features['f_pos'] = torch.zeros(doc_len, dtype=torch.long)
if 'f_ner' in feature_dict:
features['f_ner'] = torch.zeros(doc_len, dtype=torch.long)
q_uncased_words = set([w.lower() for w in question['word']])
for i in range(doc_len):
d_word = document['word'][i]
if 'f_qem' in feature_dict:
features['f_qem'][i] = feature_dict['f_qem']['yes'] if d_word.lower() in q_uncased_words else feature_dict['f_qem']['no']
if 'f_pos' in feature_dict:
assert 'pos' in document
features['f_pos'][i] = feature_dict['f_pos'][document['pos'][i]] if document['pos'][i] in feature_dict['f_pos'] \
else feature_dict['f_pos'][Constants._UNK_POS]
if 'f_ner' in feature_dict:
assert 'ner' in document
features['f_ner'][i] = feature_dict['f_ner'][document['ner'][i]] if document['ner'][i] in feature_dict['f_ner'] \
else feature_dict['f_ner'][Constants._UNK_NER]
return features
def mark_prev_answer(span_start, span_end, evidence_answer_marker, prev_answer_distance):
assert prev_answer_distance >= 0
try:
assert span_start >= 0
assert span_end >= 0
except:
raise ValueError("Previous {0:d}th answer span should have been updated!".format(prev_answer_distance))
# Modify "tags" to mark previous answer span.
if span_start == span_end:
evidence_answer_marker[span_start] = 4 * prev_answer_distance + 1
else:
evidence_answer_marker[span_start] = 4 * prev_answer_distance + 2
evidence_answer_marker[span_end] = 4 * prev_answer_distance + 3
for passage_index in range(span_start + 1, span_end):
evidence_answer_marker[passage_index] = 4 * prev_answer_distance + 4
def compute_tf(doc):
doc_len = float(len(doc))
word_count = Counter(doc)
tf = []
for word in doc:
tf.append(word_count[word] / doc_len)
return tf
def cons_batch_graph(graphs):
num_nodes = max([len(g['g_features']) for g in graphs])
num_edges = max([g['num_edges'] for g in graphs])
batch_edges = []
batch_node2edge = []
batch_edge2node = []
for g in graphs:
edges = {}
node2edge = lil_matrix(np.zeros((num_edges, num_nodes)), dtype=np.float32)
edge2node = lil_matrix(np.zeros((num_nodes, num_edges)), dtype=np.float32)
edge_index = 0
for node1, value in g['g_adj'].items():
node1 = int(node1)
for each in value:
node2 = int(each['node'])
if node1 == node2: # Ignore self-loops for now
continue
edges[edge_index] = each['edge']
node2edge[edge_index, node2] = 1
edge2node[node1, edge_index] = 1
edge_index += 1
batch_edges.append(edges)
batch_node2edge.append(node2edge)
batch_edge2node.append(edge2node)
batch_graphs = {'max_num_edges': num_edges,
'edge_features': batch_edges,
'node2edge': batch_node2edge,
'edge2node': batch_edge2node
}
return batch_graphs
def vectorize_batch_graph(graph, edge_vocab=None, config=None):
# # vectorize the graph
# edge_features = []
# for edges in graph['edge_features']:
# edges_v = []
# for idx in range(len(edges)):
# edges_v.append(edge_vocab.getIndex(edges[idx]))
# for _ in range(graph['max_num_edges'] - len(edges_v)):
# edges_v.append(edge_vocab.PAD)
# edge_features.append(edges_v)
# edge_features = torch.LongTensor(np.array(edge_features))
gv = {
# 'edge_features': edge_features.to(config['device']) if config['device'] else edge_features,
'node2edge': graph['node2edge'],
'edge2node': graph['edge2node']
}
return gv
| [
"torch.zeros",
"torch.cuda.empty_cache",
"torch.LongTensor",
"torch.Tensor",
"torch.set_grad_enabled"
] | 0.4.1 | joe3d1998/GraphFlow | 8a751e4fc69a1e0c06ded23b7d1096f3161931a1 |
1.6 | import unittest
import torch
import torch.nn as nn
from pytorch_ner.nn_modules.rnn import DynamicRNN
embeddings = torch.randn(10, 20, 128) # [batch_size, seq_len, emb_dim]
lengths = torch.arange(start=20, end=10, step=-1)
rnn = DynamicRNN(
rnn_unit=nn.RNN,
input_size=128,
hidden_size=256,
num_layers=3,
dropout=0,
bidirectional=False,
)
gru = DynamicRNN(
rnn_unit=nn.GRU,
input_size=128,
hidden_size=64,
num_layers=2,
dropout=0.2,
bidirectional=True,
)
lstm = DynamicRNN(
rnn_unit=nn.LSTM,
input_size=128,
hidden_size=128,
num_layers=1,
dropout=0,
bidirectional=True,
)
class TestRNN(unittest.TestCase):
def test_rnn_shape(self):
self.assertTrue(
rnn(embeddings, lengths).size() == torch.Size([10, 20, 256]),
)
def test_gru_shape(self):
self.assertTrue(
gru(embeddings, lengths).size() == torch.Size([10, 20, 128]),
)
def test_lstm_shape(self):
self.assertTrue(
lstm(embeddings, lengths).size() == torch.Size([10, 20, 256]),
)
if __name__ == "__main__":
unittest.main()
| [
"torch.Size",
"torch.randn",
"torch.arange"
] | 1.6.0 | abdallah1097/pytorch_ner | b1729d97ccb168e5796045cf9b387b35536803eb |
1.6 | from warnings import filterwarnings
import numpy as np
import onnx
import onnxruntime
import torch
import torch.nn as nn
from pytorch_ner.utils import to_numpy
filterwarnings(action="ignore", category=UserWarning)
def _onnx_export(
model: nn.Module,
path_to_save: str,
):
"""
Export PyTorch model to ONNX.
"""
model.cpu()
model.eval()
# hardcoded [batch_size, seq_len] = [1, 1] export
tokens = torch.tensor([[0]], dtype=torch.long)
lengths = torch.tensor([1], dtype=torch.long)
with torch.no_grad():
torch.onnx.export(
model=model,
args=(tokens, lengths),
f=path_to_save,
export_params=True,
opset_version=12, # hardcoded
do_constant_folding=True, # hardcoded
input_names=["tokens", "lengths"],
output_names=["output"],
dynamic_axes={
"tokens": {0: "batch_size", 1: "seq_len"},
"lengths": {0: "batch_size"},
"output": {0: "batch_size", 1: "seq_len"},
},
)
def _onnx_check_model(path_to_load: str):
"""
Check that the IR is well formed.
"""
onnx_model = onnx.load(path_to_load) # type: ignore
onnx.checker.check_model(onnx_model) # type: ignore
def _onnx_check_inference(
model: nn.Module, path_to_load: str, tokens: torch.Tensor, lengths: torch.Tensor
):
"""
Compute ONNX Runtime output prediction and compare with PyTorch results.
"""
# pytorch inference
model.eval()
torch_out = model(tokens, lengths)
# onnx inference
ort_session = onnxruntime.InferenceSession(path_to_load)
ort_inputs = {"tokens": to_numpy(tokens), "lengths": to_numpy(lengths)}
ort_outs = ort_session.run(None, ort_inputs)
# compare
np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
def onnx_export_and_check(
model: nn.Module,
path_to_save: str,
):
tokens = torch.tensor([[0]], dtype=torch.long)
lengths = torch.tensor([1], dtype=torch.long)
tokens_dynamic = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.long)
lengths_dynamic = torch.tensor([3, 2], dtype=torch.long)
_onnx_export(model=model, path_to_save=path_to_save)
_onnx_check_model(path_to_load=path_to_save)
_onnx_check_inference(
model=model, path_to_load=path_to_save, tokens=tokens, lengths=lengths
)
_onnx_check_inference(
model=model,
path_to_load=path_to_save,
tokens=tokens_dynamic,
lengths=lengths_dynamic,
)
| [
"torch.no_grad",
"torch.tensor",
"torch.onnx.export"
] | 1.6.0 | abdallah1097/pytorch_ner | b1729d97ccb168e5796045cf9b387b35536803eb |
1.10 | from onnxruntime.quantization.quantize import quantize
from transformers import Wav2Vec2ForCTC
import torch
import argparse
def convert_to_onnx(model_id_or_path, onnx_model_name):
print(f"Converting {model_id_or_path} to onnx")
model = Wav2Vec2ForCTC.from_pretrained(model_id_or_path)
audio_len = 250000
x = torch.randn(1, audio_len, requires_grad=True)
torch.onnx.export(model, # model being run
x, # model input (or a tuple for multiple inputs)
onnx_model_name, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=11, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = ['input'], # the model's input names
output_names = ['output'], # the model's output names
dynamic_axes={'input' : {1 : 'audio_len'}, # variable length axes
'output' : {1 : 'audio_len'}})
def quantize_onnx_model(onnx_model_path, quantized_model_path):
print("Starting quantization...")
from onnxruntime.quantization import quantize_dynamic, QuantType
quantize_dynamic(onnx_model_path,
quantized_model_path,
weight_type=QuantType.QUInt8)
print(f"Quantized model saved to: {quantized_model_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
default="ccoreilly/wav2vec2-large-100k-voxpopuli-catala",
help="Model HuggingFace ID or path that will converted to ONNX",
)
parser.add_argument(
"--quantize",
action="store_true",
help="Whether to use also quantize the model or not",
)
args = parser.parse_args()
model_id_or_path = args.model
onnx_model_name = model_id_or_path.split("/")[-1] + ".onnx"
convert_to_onnx(model_id_or_path, onnx_model_name)
if (args.quantize):
quantized_model_name = model_id_or_path.split("/")[-1] + ".quant.onnx"
quantize_onnx_model(onnx_model_name, quantized_model_name) | [
"torch.randn",
"torch.onnx.export"
] | 1.10.1 | Sewanex/wav2vec2-service | fe9c5fa61bfe85f3e36c78cf71a3bcbe3959ac13 |
1.9 | from src.model_blocks import *
import torch.nn as nn
import timm
from src.activations import Swish_Module
from src.config import YAMLConfig
import typing
import gc
from src.utils import *
""" Test two pytorch models are the same """
"""
This is a testing for RANZCR. Note that the model weights will never match unless you go and seed the initializer weights in PyTorch's
kaimin_init. Therefore, if you set pretrained=True, you will realise that the weights will not match at the head level layers because
that is where wour transfer learning stopped.
"""
class TAWARAMultiHeadResNet200D(nn.Module):
def __init__(self, out_dims_head: typing.List[int] = [3, 4, 3, 1], pretrained=True):
""""""
self.base_name = "resnet200d"
self.n_heads = len(out_dims_head)
super(TAWARAMultiHeadResNet200D, self).__init__()
# # load base model
base_model = timm.create_model(
self.base_name, num_classes=sum(out_dims_head), pretrained=False
)
in_features = base_model.num_features
if pretrained:
pretrained_model_path = "./input/resnet200d_320_chestx.pth"
state_dict = dict()
for k, v in torch.load(pretrained_model_path, map_location="cpu")["model"].items():
if k[:6] == "model.":
k = k.replace("model.", "")
state_dict[k] = v
base_model.load_state_dict(state_dict)
# # remove global pooling and head classifier
base_model.reset_classifier(0, "")
# # Shared CNN Bacbone
self.backbone = base_model
# # Multi Heads.
for i, out_dim in enumerate(out_dims_head):
layer_name = f"head_{i}"
layer = nn.Sequential(
SpatialAttentionBlock(in_features, [64, 32, 16, 1]),
nn.AdaptiveAvgPool2d(output_size=1),
nn.Flatten(start_dim=1),
nn.Linear(in_features, in_features),
Swish_Module(),
nn.Dropout(0.3),
nn.Linear(in_features, out_dim),
)
setattr(self, layer_name, layer)
def forward(self, x):
""""""
h = self.backbone(x)
hs = [getattr(self, f"head_{i}")(h) for i in range(self.n_heads)]
y = torch.cat(hs, axis=1)
return y
class MultiHeadResNet200D(nn.Module):
# heads 3431 means total 11 classes, 4 heads, first head corresponds to first 3 classes etc
def __init__(
self,
config,
out_dims_head: typing.List[int] = [3, 4, 3, 1],
pretrained=True,
custom_weights=False,
):
""""""
self.base_name = "resnet200d"
self.n_heads = len(out_dims_head)
super(MultiHeadResNet200D, self).__init__()
# # load base model
base_model = timm.create_model(
self.base_name, num_classes=sum(out_dims_head), pretrained=pretrained
)
in_features = base_model.num_features
if custom_weights is True:
print("Loading CUSTOM PRETRAINED WEIGHTS, IF YOU DID NOT CHOOSE THIS, PLEASE RESTART!")
custom_pretrained_weight_path = config.paths["custom_pretrained_weight"]
# self.model.load_state_dict(
# torch.load(custom_pretrained_weight_path)
# )
### Only for xray pretrained weights ###
state_dict = dict()
for k, v in torch.load(custom_pretrained_weight_path, map_location="cpu")[
"model"
].items():
if k[:6] == "model.":
k = k.replace("model.", "")
state_dict[k] = v
base_model.load_state_dict(state_dict)
# # remove global pooling and head classifier
base_model.reset_classifier(0, "")
# # Shared CNN Bacbone
self.backbone = base_model
# # Multi Heads.
for i, out_dim in enumerate(out_dims_head):
layer_name = f"head_{i}"
layer = nn.Sequential(
SpatialAttentionBlock(in_features, [64, 32, 16, 1]),
nn.AdaptiveAvgPool2d(output_size=1),
nn.Flatten(start_dim=1),
nn.Linear(in_features, in_features),
Swish_Module(),
# nn.ReLU(inplace=True),
nn.Dropout(0.3),
nn.Linear(in_features, out_dim),
)
setattr(self, layer_name, layer)
def forward(self, x):
""""""
h = self.backbone(x)
hs = [getattr(self, f"head_{i}")(h) for i in range(self.n_heads)]
y = torch.cat(hs, axis=1)
return y
"""A module for constructing machine learning models."""
import functools
import torch
import geffnet
import timm
from typing import *
from utils import rsetattr
from activations import Swish_Module
from model_blocks import *
class CustomModel(torch.nn.Module):
"""A custom model."""
def __init__(
self,
config: type,
pretrained: bool = True,
load_weight: bool = False,
load_url: bool = False,
out_dim_heads=[],
*args,
):
"""Construct a custom model."""
super().__init__()
self.config = config
self.pretrained = pretrained
self.load_weight = load_weight
self.load_url = load_url
# TODO: To use args and kwargs for out_dim_heads as it is throwing errors.
self.args = args
self.out_dim_heads = out_dim_heads
self.out_features = None
self.activation = Swish_Module()
self.architecture = {
"backbone": None,
"bottleneck": None,
"head": None,
}
# def __setattr__(self, name, value):
# self.model.__setattr__(self, name, value)
_model_factory = (
geffnet.create_model if config.model_factory == "geffnet" else timm.create_model
)
self.model = _model_factory(
# model_weight_path_folder=config.paths["model_weight_path_folder"],
model_name=config.model_name,
pretrained=self.pretrained,
num_classes=11,
)
# timm.create_model(
# "resnet200d", num_classes=sum(self.out_dim_heads), pretrained=True
# )
# load pretrained weight that are not available on timm or geffnet; for example, when NFNet just came out, we do not have timm's pretrained weight
if self.load_weight:
# assert (
# self.pretrained == False
# ), "if you are loading custom weight, then pretrained must be set to False"
print("Loading CUSTOM PRETRAINED WEIGHTS, IF YOU DID NOT CHOOSE THIS, PLEASE RESTART!")
custom_pretrained_weight_path = config.paths["custom_pretrained_weight"]
print("Loading custom weights with custom number of classes.")
# self.model = _model_factory(
# model_name=config.model_name,
# pretrained=False,
# num_classes=self.config.num_classes,
# )
# self.model.load_state_dict(
# torch.load(custom_pretrained_weight_path)
# )
### Only for xray pretrained weights ###
state_dict = dict()
for k, v in torch.load(custom_pretrained_weight_path, map_location="cpu")[
"model"
].items():
if k[:6] == "model.":
k = k.replace("model.", "")
state_dict[k] = v
self.model.load_state_dict(state_dict)
# self.model.load_state_dict(torch.load(custom_pretrained_weight_path))
if self.load_url:
# using torch hub to load url, can be beautified. https://pytorch.org/docs/stable/hub.html
checkpoint = "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth"
self.model.load_state_dict(
torch.hub.load_state_dict_from_url(checkpoint, progress=True, map_location="cpu")
)
self.use_custom_layers = True
NumHeads = len(self.out_dim_heads) # redundant line
if self.use_custom_layers is False:
print("NOT USING CUSTOM LAYERS! BEWARE OF OVERFITTING...")
last_layer_attr_name, self.out_features, _ = self.get_last_layer()
last_layer_attr_name = ".".join(last_layer_attr_name)
rsetattr(
self.model,
last_layer_attr_name,
torch.torch.nn.Linear(self.out_features, config.num_classes),
)
else:
# this is our backbone base, IT IS A SHARED BACKBONE, DO NOT TOUCH!
# self.architecture['backbone'] = self.model
# get our number of nodes before the last layer
# in_features = self.architecture['backbone'].num_features
if NumHeads == 1:
print("Single Head Model")
# timm has reset classifier and get classifier
self.model.reset_classifier(num_classes=0, global_pool="avg")
# this is our backbone base, IT IS A SHARED BACKBONE, DO NOT TOUCH!
self.architecture["backbone"] = self.model
in_features = self.architecture["backbone"].num_features
self.single_head_fc = torch.torch.nn.Sequential(
torch.torch.nn.Linear(in_features, in_features),
self.activation,
torch.torch.nn.Dropout(p=0.3),
torch.torch.nn.Linear(in_features, config.num_classes),
)
self.architecture["head"] = self.single_head_fc
else:
self.num_heads = len(self.out_dim_heads)
print("We are using Multi Head Model with {} Heads".format(self.num_heads))
in_features = self.model.num_features
# remove global pooling and head classifier
self.model.reset_classifier(num_classes=0, global_pool="")
# Shared CNN Bacbone
self.architecture["backbone"] = self.model
# Multi Head
for i, out_dim in enumerate(self.out_dim_heads):
layer_name = f"head_{i}"
layer = torch.nn.Sequential(
SpatialAttentionBlock(in_features, [64, 32, 16, 1]),
torch.nn.AdaptiveAvgPool2d(output_size=1),
torch.nn.Flatten(start_dim=1),
torch.nn.Linear(in_features, in_features),
self.activation,
torch.nn.Dropout(0.3),
torch.nn.Linear(in_features, out_dim),
)
setattr(self, layer_name, layer)
def forward(self, input_neurons):
"""Define the computation performed at every call."""
if self.use_custom_layers is False:
output_predictions = self.model(input_neurons)
else:
if len(self.out_dim_heads) > 1:
print("s")
output_logits_backbone = self.architecture["backbone"](input_neurons)
multi_outputs = [
getattr(self, f"head_{i}")(output_logits_backbone)
for i in range(self.num_heads)
]
output_predictions = torch.cat(multi_outputs, axis=1)
else: # only single head
output_logits_backbone = self.architecture["backbone"](input_neurons)
output_predictions = self.architecture["head"](output_logits_backbone)
return output_predictions
def get_last_layer(self):
last_layer_name = None
for name, param in self.model.named_modules():
last_layer_name = name
last_layer_attributes = last_layer_name.split(".") # + ['in_features']
linear_layer = functools.reduce(getattr, last_layer_attributes, self.model)
# reduce applies to a list recursively and reduce
in_features = functools.reduce(getattr, last_layer_attributes, self.model).in_features
return last_layer_attributes, in_features, linear_layer
## forward test
if __name__ == "__main__":
yaml_config = YAMLConfig("./config_debug.yaml")
seed_all(seed=yaml_config.seed)
import timm
HN_MODEL = CustomModel(
config=yaml_config,
pretrained=True,
load_weight=True,
load_url=False,
out_dim_heads=[3, 4, 3, 1],
)
HN_MODEL_DUPLICATE = CustomModel(
config=yaml_config,
pretrained=True,
load_weight=True,
load_url=False,
out_dim_heads=[3, 4, 3, 1],
)
# HN_MODEL = HN_MODEL.eval()
"""
Initiate TAWARA's model TAWARAMultiHeadResNet200D, not at the same time. We test TAMARA_MODEL first and get a forward value of 0.0476.
Then we test TAWARA_MODEL_DUPLICATE which is the exact same model and test to get 0.0476 OR 0.0348
Then we test TAWARA_MODEL_NEW which is a slight variation in the construction but same architecture. Setting eval is optional, but must be consistent.
CAUTION: LEAVE THE HN_MODEL intact as it needs to run sequentially, so the GPU behind runs HN_MODEL first, so if you comment it out
the values might change.
"""
# TAWARA_MODEL = TAWARAMultiHeadResNet200D(
# out_dims_head=[3, 4, 3, 1], pretrained=True
# )
# TAWARA_MODEL_DUPLICATE = TAWARAMultiHeadResNet200D(
# out_dims_head=[3, 4, 3, 1], pretrained=True
# )
# TAWARA_MODEL = TAWARA_MODEL.eval()
# TAWARA_MODEL_NEW = MultiHeadResNet200D(
# yaml_config, [3, 4, 3, 1], pretrained=True, custom_weights=True
# )
# TAWARA_MODEL_NEW = TAWARA_MODEL_NEW.eval()
### Find layers like batchnorm or conv2d ###
# print(find_layer(HN_MODEL))
### Get weight of each layer ###
# print(get_weight(TAWARA_MODEL, 1))
### Compare if two pretrained model are equal | if heads are changed drastically, then it will be different ###
# print(compare_models(TAWARA_MODEL_DUPLICATE, TAWARA_MODEL))
def forward_test(x, model):
y1 = model(x)
print("[forward test]")
print("input:\t{}\noutput:\t{}".format(x.shape, y1.shape))
print("output value", y1[0][0])
x = torch.rand(1, 3, 256, 256)
print(forward_test(x, model=HN_MODEL_DUPLICATE))
# RESNET_FREEZE_BN = timm.create_model("resnet50", pretrained=True)
# RESNET_UNFREEZE_BN = timm.create_model("resnet50", pretrained=True)
# # RESNET_FREEZE_BN.apply(set_bn_eval)
# # print(find_layer(RESNET_FREEZE_BN))
# x = torch.rand(1, 3, 256, 256)
# with torch.no_grad():
# y1 = RESNET_FREEZE_BN(x)
# y2 = RESNET_UNFREEZE_BN(x)
# print("[forward test]")
# print("input:\t{}\noutput:\t{}".format(x.shape, y1.shape))
# print("output value", y1[0][0])
# print("[forward test]")
# print("input:\t{}\noutput:\t{}".format(x.shape, y2.shape))
# print("output value", y2[0][0])
# del RESNET_UNFREEZE_BN, RESNET_FREEZE_BN
# del x
# del y1, y2
# gc.collect()
| [
"torch.nn.Linear",
"torch.rand",
"torch.cat",
"torch.nn.Dropout",
"torch.hub.load_state_dict_from_url",
"torch.torch.nn.Linear",
"torch.load",
"torch.nn.AdaptiveAvgPool2d",
"torch.torch.nn.Dropout",
"torch.nn.Flatten"
] | 1.9.0 | reigHns/RANZCR-CLiP---Catheter-and-Line-Position-Challenge | 80d4177bf74f9ffa5f7906687ebe648832ec84e1 |
1.0 | import os
import torch
import torch.nn.functional as F
from torch.optim import Adam, lr_scheduler
from sac_src.utils import soft_update, hard_update
from sac_src.model import GaussianPolicy, QNetwork, DeterministicPolicy
class SAC(object):
def __init__(self, num_inputs, action_space, args):
self.gamma = args.gamma
self.tau = args.tau
self.alpha = args.alpha
self.action_space = action_space
self.learning_rate = args.lr
self.policy_type = args.policy
self.target_update_interval = args.target_update_interval
self.automatic_entropy_tuning = args.automatic_entropy_tuning
self.device = torch.device("cuda" if args.cuda else "cpu")
self.critic = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=args.lr)
self.critic_target = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(self.device)
hard_update(self.critic_target, self.critic)
if self.policy_type == "Gaussian":
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
if self.automatic_entropy_tuning == True:
self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.alpha_optim = Adam([self.log_alpha], lr=args.lr)
self.policy = GaussianPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
else:
self.alpha = 0
self.automatic_entropy_tuning = False
self.policy = DeterministicPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
self.policy_scheduler = lr_scheduler.StepLR(self.critic_optim, step_size=args.lr_decay_steps, gamma=args.lr_decay_gamma)
def learning_rate_decay(self, decay_ratio=0.5):
self.learning_rate = self.learning_rate * decay_ratio
self.critic_optim = Adam(self.critic.parameters(), lr=self.learning_rate)
self.policy_optim = Adam(self.policy.parameters(), lr=self.learning_rate)
def select_action(self, state, eval=False):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if eval == False:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
return action.detach().cpu().numpy()[0]
def update_parameters(self, memory, batch_size, updates):
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, mask_batch = memory.sample(batch_size=batch_size)
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
reward_batch = torch.FloatTensor(reward_batch).to(self.device).unsqueeze(1)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
with torch.no_grad():
next_state_action, next_state_log_pi, _ = self.policy.sample(next_state_batch)
qf1_next_target, qf2_next_target = self.critic_target(next_state_batch, next_state_action)
min_qf_next_target = torch.min(qf1_next_target, qf2_next_target) - self.alpha * next_state_log_pi
next_q_value = reward_batch + mask_batch * self.gamma * (min_qf_next_target)
qf1, qf2 = self.critic(state_batch, action_batch) # Two Q-functions to mitigate positive bias in the policy improvement step
qf1_loss = F.mse_loss(qf1, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf2_loss = F.mse_loss(qf2, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
pi, log_pi, _ = self.policy.sample(state_batch)
qf1_pi, qf2_pi = self.critic(state_batch, pi)
min_qf_pi = torch.min(qf1_pi, qf2_pi)
policy_loss = ((self.alpha * log_pi) - min_qf_pi).mean() # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
self.critic_optim.zero_grad()
qf1_loss.backward()
self.critic_optim.step()
self.critic_optim.zero_grad()
qf2_loss.backward()
self.critic_optim.step()
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
self.policy_scheduler.step()
if self.automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = self.log_alpha.exp()
alpha_tlogs = self.alpha.clone() # For TensorboardX logs
else:
alpha_loss = torch.tensor(0.).to(self.device)
alpha_tlogs = torch.tensor(self.alpha) # For TensorboardX logs
if updates % self.target_update_interval == 0:
soft_update(self.critic_target, self.critic, self.tau)
return qf1_loss.item(), qf2_loss.item(), policy_loss.item(), alpha_loss.item(), alpha_tlogs.item()
# Save model parameters
def save_model(self, actor_path, critic_path):
torch.save(self.policy.state_dict(), actor_path)
torch.save(self.critic.state_dict(), critic_path)
# Load model parameters
def load_model(self, actor_path, critic_path):
print('Loading models from {} and {}'.format(actor_path, critic_path))
if actor_path is not None:
self.policy.load_state_dict(torch.load(actor_path))
if critic_path is not None:
self.critic.load_state_dict(torch.load(critic_path))
| [
"torch.zeros",
"torch.device",
"torch.optim.lr_scheduler.StepLR",
"torch.min",
"torch.no_grad",
"torch.optim.Adam",
"torch.FloatTensor",
"torch.nn.functional.mse_loss",
"torch.tensor",
"torch.load",
"torch.Tensor"
] | 1.0.0 | ZhiningLiu1998/mesa | fd024e4754570374b1f0935e00ca1eab6b23f584 |
1.7 | # -*- coding: utf-8 -*-
# @Time : 2021/1/14
# @Author : Chengyuan Li
# @Email : [email protected]
r"""
NNCF
################################################
Reference:
Ting Bai et al. "A Neural Collaborative Filtering Model with Interaction-based Neighborhood." in CIKM 2017.
Reference code:
https://github.com/Tbbaby/NNCF-Pytorch
"""
import torch
import torch.nn as nn
from torch.nn.init import normal_
from recbole.model.abstract_recommender import GeneralRecommender
from recbole.model.layers import MLPLayers
from recbole.utils import InputType
import numpy as np
from sklearn.metrics import jaccard_score
class NNCF(GeneralRecommender):
r"""NNCF is an neural network enhanced matrix factorization model which also captures neighborhood information.
We implement the NNCF model with three ways to process neighborhood information.
"""
input_type = InputType.POINTWISE
def __init__(self, config, dataset):
super(NNCF, self).__init__(config, dataset)
# load dataset info
self.LABEL = config['LABEL_FIELD']
self.interaction_matrix = dataset.inter_matrix(form='coo').astype(
np.float32)
# load parameters info
self.ui_embedding_size = config['ui_embedding_size']
self.neigh_embedding_size = config['neigh_embedding_size']
self.num_conv_kernel = config['num_conv_kernel']
self.conv_kernel_size = config['conv_kernel_size']
self.pool_kernel_size = config['pool_kernel_size']
self.mlp_hidden_size = config['mlp_hidden_size']
self.neigh_num = config['neigh_num']
self.neigh_info_method = config['neigh_info_method']
self.resolution = config['resolution']
# define layers and loss
self.user_embedding = nn.Embedding(self.n_users, self.ui_embedding_size)
self.item_embedding = nn.Embedding(self.n_items, self.ui_embedding_size)
self.user_neigh_embedding = nn.Embedding(self.n_items, self.neigh_embedding_size)
self.item_neigh_embedding = nn.Embedding(self.n_users, self.neigh_embedding_size)
self.user_conv = nn.Sequential(
nn.Conv1d(self.neigh_embedding_size, self.num_conv_kernel, self.conv_kernel_size),
nn.MaxPool1d(self.pool_kernel_size),
nn.ReLU())
self.item_conv = nn.Sequential(
nn.Conv1d(self.neigh_embedding_size, self.num_conv_kernel, self.conv_kernel_size),
nn.MaxPool1d(self.pool_kernel_size),
nn.ReLU())
conved_size = self.neigh_num - (self.conv_kernel_size - 1)
pooled_size = (conved_size - (self.pool_kernel_size - 1) - 1) // self.pool_kernel_size + 1
self.mlp_layers = MLPLayers([2 * pooled_size * self.num_conv_kernel + self.ui_embedding_size] + self.mlp_hidden_size, config['dropout'])
self.out_layer = nn.Sequential(nn.Linear(self.mlp_hidden_size[-1], 1),
nn.Sigmoid())
self.dropout_layer = torch.nn.Dropout(p=config['dropout'])
self.loss = nn.BCELoss()
# choose the method to use neighborhood information
if self.neigh_info_method == "random":
self.u_neigh, self.i_neigh = self.get_neigh_random()
elif self.neigh_info_method == "knn":
self.u_neigh, self.i_neigh = self.get_neigh_knn()
elif self.neigh_info_method == "louvain":
self.u_neigh, self.i_neigh = self.get_neigh_louvain()
else:
raise RuntimeError('You need to choose the right algorithm of processing neighborhood information. \
The parameter neigh_info_method can be set to random, knn or louvain.')
# parameters initialization
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Embedding):
normal_(module.weight.data, mean=0.0, std=0.01)
# Unify embedding length
def Max_ner(self, lst, max_ner):
r"""Unify embedding length of neighborhood information for efficiency consideration.
Truncate the list if the length is larger than max_ner.
Otherwise, pad it with 0.
Args:
lst (list): The input list contains node's neighbors.
max_ner (int): The number of neighbors we choose for each node.
Returns:
list: The list of a node's community neighbors.
"""
for i in range(len(lst)):
if len(lst[i]) >= max_ner:
lst[i] = lst[i][:max_ner]
else:
length = len(lst[i])
for _ in range(max_ner - length):
lst[i].append(0)
return lst
# Find other nodes in the same community
def get_community_member(self, partition, community_dict, node, kind):
r"""Find other nodes in the same community.
e.g. If the node starts with letter "i",
the other nodes start with letter "i" in the same community dict group are its community neighbors.
Args:
partition (dict): The input dict that contains the community each node belongs.
community_dict (dict): The input dict that shows the nodes each community contains.
node (int): The id of the input node.
kind (char): The type of the input node.
Returns:
list: The list of a node's community neighbors.
"""
comm = community_dict[partition[node]]
return [x for x in comm if x.startswith(kind)]
# Prepare neiborhood embeddings, i.e. I(u) and U(i)
def prepare_vector_element(self, partition, relation, community_dict):
r"""Find the community neighbors of each node, i.e. I(u) and U(i).
Then reset the id of nodes.
Args:
partition (dict): The input dict that contains the community each node belongs.
relation (list): The input list that contains the relationships of users and items.
community_dict (dict): The input dict that shows the nodes each community contains.
Returns:
list: The list of nodes' community neighbors.
"""
item2user_neighbor_lst = [[] for _ in range(self.n_items)]
user2item_neighbor_lst = [[] for _ in range(self.n_users)]
for r in range(len(relation)):
user, item = relation[r][0], relation[r][1]
item2user_neighbor = self.get_community_member(partition, community_dict, user, 'u')
np.random.shuffle(item2user_neighbor)
user2item_neighbor = self.get_community_member(partition, community_dict, item, 'i')
np.random.shuffle(user2item_neighbor)
_, user = user.split('_', 1)
user = int(user)
_, item = item.split('_', 1)
item = int(item)
for i in range(len(item2user_neighbor)):
name, index = item2user_neighbor[i].split('_', 1)
item2user_neighbor[i] = int(index)
for i in range(len(user2item_neighbor)):
name, index = user2item_neighbor[i].split('_', 1)
user2item_neighbor[i] = int(index)
item2user_neighbor_lst[item] = item2user_neighbor
user2item_neighbor_lst[user] = user2item_neighbor
return user2item_neighbor_lst, item2user_neighbor_lst
# Get neighborhood embeddings using louvain method
def get_neigh_louvain(self):
r"""Get neighborhood information using louvain algorithm.
First, change the id of node,
for example, the id of user node "1" will be set to "u_1" in order to use louvain algorithm.
Second, use louvain algorithm to seperate nodes into different communities.
Finally, find the community neighbors of each node with the same type and reset the id of the nodes.
Returns:
torch.IntTensor: The neighborhood nodes of a batch of user or item, shape: [batch_size, neigh_num]
"""
inter_M = self.interaction_matrix
pairs = list(zip(inter_M.row, inter_M.col))
tmp_relation = []
for i in range(len(pairs)):
tmp_relation.append(['user_' + str(pairs[i][0]), 'item_' + str(pairs[i][1])])
import networkx as nx
G = nx.Graph()
G.add_edges_from(tmp_relation)
resolution = self.resolution
import community
partition = community.best_partition(G, resolution=resolution)
community_dict = {}
community_dict.setdefault(0, [])
for i in range(len(partition.values())):
community_dict[i] = []
for node, part in partition.items():
community_dict[part] = community_dict[part] + [node]
tmp_user2item, tmp_item2user = self.prepare_vector_element(partition, tmp_relation, community_dict)
u_neigh = self.Max_ner(tmp_user2item, self.neigh_num)
i_neigh = self.Max_ner(tmp_item2user, self.neigh_num)
u_neigh = torch.tensor(u_neigh, device=self.device)
i_neigh = torch.tensor(i_neigh, device=self.device)
return u_neigh, i_neigh
# Count the similarity of node and direct neighbors using jaccard method
def count_jaccard(self, inters, node, neigh_list, kind):
r""" Count the similarity of the node and its direct neighbors using jaccard similarity.
Args:
inters (list): The input list that contains the relationships of users and items.
node (int): The id of the input node.
neigh_list (list): The input list that contains the neighbors of the input node.
kind (char): The type of the input node.
Returns:
list: The list of jaccard similarity score between the node and its neighbors.
"""
if kind == 'u':
if node in neigh_list:
return 0
vec_node = inters[:, node]
score = 0
for neigh in neigh_list:
vec_neigh = inters[:, neigh]
tmp = jaccard_score(vec_node, vec_neigh)
score += tmp
return score
else:
if node in neigh_list:
return 0
vec_node = inters[node]
score = 0
for neigh in neigh_list:
vec_neigh = inters[neigh]
tmp = jaccard_score(vec_node, vec_neigh)
score += tmp
return score
# Get neighborhood embeddings using knn method
def get_neigh_knn(self):
r"""Get neighborhood information using knn algorithm.
Find direct neighbors of each node, if the number of direct neighbors is less than neigh_num,
add other similar neighbors using jaccard similarity.
Otherwise, select random top k direct neighbors, k equals to the number of neighbors.
Returns:
torch.IntTensor: The neighborhood nodes of a batch of user or item, shape: [batch_size, neigh_num]
"""
inter_M = self.interaction_matrix
pairs = list(zip(inter_M.row, inter_M.col))
ui_inters = np.zeros((self.n_users, self.n_items), dtype=np.int8)
for i in range(len(pairs)):
ui_inters[pairs[i][0], pairs[i][1]] = 1
u_neigh, i_neigh = [], []
for u in range(self.n_users):
neigh_list = ui_inters[u].nonzero()[0]
direct_neigh_num = len(neigh_list)
if len(neigh_list) == 0:
u_neigh.append(self.neigh_num * [0])
elif direct_neigh_num < self.neigh_num:
knn_neigh_dict = {}
for i in range(self.n_items):
score = self.count_jaccard(ui_inters, i, neigh_list, 'u')
knn_neigh_dict[i] = score
knn_neigh_dict_sorted = dict(sorted(knn_neigh_dict.items(), key=lambda item:item[1], reverse=True))
knn_neigh_list = knn_neigh_dict_sorted.keys()
neigh_list = list(neigh_list) + list(knn_neigh_list)
u_neigh.append(neigh_list[:self.neigh_num])
else:
mask = np.random.randint(0, len(neigh_list), size=self.neigh_num)
u_neigh.append(neigh_list[mask])
for i in range(self.n_items):
neigh_list = ui_inters[:, i].nonzero()[0]
direct_neigh_num = len(neigh_list)
if len(neigh_list) == 0:
i_neigh.append(self.neigh_num * [0])
elif direct_neigh_num < self.neigh_num:
knn_neigh_dict = {}
for i in range(self.n_users):
score = self.count_jaccard(ui_inters, i, neigh_list, 'i')
knn_neigh_dict[i] = score
knn_neigh_dict_sorted = dict(sorted(knn_neigh_dict.items(), key=lambda item:item[1], reverse=True))
knn_neigh_list = knn_neigh_dict_sorted.keys()
neigh_list = list(neigh_list) + list(knn_neigh_list)
i_neigh.append(neigh_list[:self.neigh_num])
else:
mask = np.random.randint(0, len(neigh_list), size=self.neigh_num)
i_neigh.append(neigh_list[mask])
u_neigh = torch.tensor(u_neigh, device=self.device)
i_neigh = torch.tensor(i_neigh, device=self.device)
return u_neigh, i_neigh
# Get neighborhood embeddings using random method
def get_neigh_random(self):
r"""Get neighborhood information using random algorithm.
Select random top k direct neighbors, k equals to the number of neighbors.
Returns:
torch.IntTensor: The neighborhood nodes of a batch of user or item, shape: [batch_size, neigh_num]
"""
inter_M = self.interaction_matrix
pairs = list(zip(inter_M.row, inter_M.col))
ui_inters = np.zeros((self.n_users, self.n_items), dtype=np.int8)
for i in range(len(pairs)):
ui_inters[pairs[i][0], pairs[i][1]] = 1
u_neigh, i_neigh = [], []
for u in range(self.n_users):
neigh_list = ui_inters[u].nonzero()[0]
if len(neigh_list) == 0:
u_neigh.append(self.neigh_num * [0])
else:
mask = np.random.randint(0, len(neigh_list), size=self.neigh_num)
u_neigh.append(neigh_list[mask])
for i in range(self.n_items):
neigh_list = ui_inters[:, i].nonzero()[0]
if len(neigh_list) == 0:
i_neigh.append(self.neigh_num * [0])
else:
mask = np.random.randint(0, len(neigh_list), size=self.neigh_num)
i_neigh.append(neigh_list[mask])
u_neigh = torch.tensor(u_neigh, device=self.device)
i_neigh = torch.tensor(i_neigh, device=self.device)
return u_neigh, i_neigh
# Get neighborhood embeddings
def get_neigh_info(self, user, item):
r"""Get a batch of neighborhood embedding tensor according to input id.
Args:
user (torch.LongTensor): The input tensor that contains user's id, shape: [batch_size, ]
item (torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
torch.FloatTensor: The neighborhood embedding tensor of a batch of user, shape: [batch_size, neigh_embedding_size]
torch.FloatTensor: The neighborhood embedding tensor of a batch of item, shape: [batch_size, neigh_embedding_size]
"""
batch_u_neigh = self.u_neigh[user]
batch_i_neigh = self.i_neigh[item]
return batch_u_neigh, batch_i_neigh
def forward(self, user, item):
user_embedding = self.user_embedding(user)
item_embedding = self.item_embedding(item)
user_neigh_input, item_neigh_input = self.get_neigh_info(user, item)
user_neigh_embedding = self.user_neigh_embedding(user_neigh_input)
item_neigh_embedding = self.item_neigh_embedding(item_neigh_input)
user_neigh_embedding = user_neigh_embedding.permute(0, 2, 1)
user_neigh_conv_embedding = self.user_conv(user_neigh_embedding)
# batch_size * out_channel * pool_size
batch_size = user_neigh_conv_embedding.size(0)
user_neigh_conv_embedding = user_neigh_conv_embedding.view(batch_size, -1)
item_neigh_embedding = item_neigh_embedding.permute(0, 2, 1)
item_neigh_conv_embedding = self.item_conv(item_neigh_embedding)
# batch_size * out_channel * pool_size
item_neigh_conv_embedding = item_neigh_conv_embedding.view(batch_size, -1)
mf_vec = torch.mul(user_embedding, item_embedding)
last = torch.cat((mf_vec, user_neigh_conv_embedding, item_neigh_conv_embedding), dim=-1)
output = self.mlp_layers(last)
out = self.out_layer(output)
out = out.squeeze(-1)
return out
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
label = interaction[self.LABEL]
output = self.forward(user, item)
return self.loss(output, label)
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
return self.forward(user, item)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.mul",
"torch.cat",
"torch.nn.Conv1d",
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.BCELoss",
"torch.nn.MaxPool1d",
"torch.nn.Embedding"
] | 1.7.0 | ShanleiMu/RecBole-1 | 9ec15faf90126dfb512901d0f2303ef3c2efb71d |
1.4 | # part of the code are from https://github.com/hill-a/stable-baselines/
import random
from collections import namedtuple
import numpy as np
import torch
from generic import to_np
from segment_tree import SumSegmentTree, MinSegmentTree
Transition = namedtuple('Transition', ('observation_list',
'action_candidate_list',
'tasks',
'chosen_indices',
'graph_triplets',
'reward', 'graph_reward', 'count_reward', 'is_final',
'level'))
class PrioritizedReplayMemory(object):
def __init__(self, capacity=100000, priority_fraction=0.0, discount_gamma_game_reward=1.0, discount_gamma_graph_reward=1.0, discount_gamma_count_reward=1.0, accumulate_reward_from_final=False):
# prioritized replay memory
self._storage = []
self.capacity = capacity
self._next_idx = 0
assert priority_fraction >= 0
self._alpha = priority_fraction
it_capacity = 1
while it_capacity < capacity:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
self.discount_gamma_game_reward = discount_gamma_game_reward
self.discount_gamma_graph_reward = discount_gamma_graph_reward
self.discount_gamma_count_reward = discount_gamma_count_reward
self.accumulate_reward_from_final = accumulate_reward_from_final
self.avg_rewards_dict = {3:0.0, 7:0.0, 5:0.0, 9:0.0}
def __len__(self):
return len(self._storage)
@property
def storage(self):
"""[(np.ndarray, float, float, np.ndarray, bool)]: content of the replay buffer"""
return self._storage
@property
def buffer_size(self):
"""float: Max capacity of the buffer"""
return self.capacity
def can_sample(self, n_samples):
"""
Check if n_samples samples can be sampled
from the buffer.
:param n_samples: (int)
:return: (bool)
"""
return len(self) >= n_samples
def is_full(self):
"""
Check whether the replay buffer is full or not.
:return: (bool)
"""
return len(self) == self.buffer_size
def add(self, *args):
"""
add a new transition to the buffer
"""
idx = self._next_idx
data = Transition(*args)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self.capacity
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def get_next_final_pos(self, which_memory, head):
i = head
while True:
if i >= len(self._storage):
return None
if self._storage[i].is_final:
return i
i += 1
return None
def _get_single_transition(self, idx, n):
assert n > 0
head = idx
# if n is 1, then head can't be is_final
if n == 1:
if self._storage[head].is_final:
return None
# if n > 1, then all except tail can't be is_final
else:
if np.any([item.is_final for item in self._storage[head: head + n]]):
return None
next_final = self.get_next_final_pos(self._storage, head)
if next_final is None:
return None
# all good
obs = self._storage[head].observation_list
candidate = self._storage[head].action_candidate_list
tasks = self._storage[head].tasks
chosen_indices = self._storage[head].chosen_indices
graph_triplets = self._storage[head].graph_triplets
next_obs = self._storage[head + n].observation_list
next_candidate = self._storage[head + n].action_candidate_list
# no tasks_next
next_graph_triplets = self._storage[head + n].graph_triplets
tmp = next_final - head + 1 if self.accumulate_reward_from_final else n + 1
rewards_up_to_next_final = [self.discount_gamma_game_reward ** i * self._storage[head + i].reward for i in range(tmp)]
reward = torch.sum(torch.stack(rewards_up_to_next_final))
graph_rewards_up_to_next_final = [self.discount_gamma_graph_reward ** i * self._storage[head + i].graph_reward for i in range(tmp)]
graph_reward = torch.sum(torch.stack(graph_rewards_up_to_next_final))
count_rewards_up_to_next_final = [self.discount_gamma_count_reward ** i * self._storage[head + i].count_reward for i in range(tmp)]
count_reward = torch.sum(torch.stack(count_rewards_up_to_next_final))
return (obs, candidate,
tasks,
chosen_indices, graph_triplets, reward + graph_reward + count_reward,
next_obs, next_candidate,
next_graph_triplets)
def _encode_sample(self, idxes, ns):
actual_indices, actual_ns = [], []
obs, candidate, chosen_indices, graph_triplets, reward, next_obs, next_candidate, next_graph_triplets = [], [], [], [], [], [], [], []
tasks = []
for i, n in zip(idxes, ns):
t = self._get_single_transition(i, n)
if t is None:
continue
actual_indices.append(i)
actual_ns.append(n)
obs.append(t[0])
candidate.append(t[1])
tasks.append(t[2])
chosen_indices.append(t[3])
graph_triplets.append(t[4])
reward.append(t[5])
next_obs.append(t[6])
next_candidate.append(t[7])
next_graph_triplets.append(t[8])
if len(actual_indices) == 0:
return None
chosen_indices = np.array(chosen_indices) # batch
reward = torch.stack(reward, 0) # batch
actual_ns = np.array(actual_ns)
return [obs, candidate,
tasks,
chosen_indices, graph_triplets, reward,
next_obs, next_candidate,
next_graph_triplets, actual_indices, actual_ns]
def sample(self, batch_size, beta=0, multi_step=1):
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
ns = np.random.randint(1, multi_step + 1, size=batch_size)
encoded_sample = self._encode_sample(idxes, ns)
if encoded_sample is None:
return None
actual_indices = encoded_sample[-2]
for idx in actual_indices:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
return encoded_sample + [weights]
def _get_single_sequence_transition(self, idx, sample_history_length):
assert sample_history_length > 0
head = idx
# if n is 1, then head can't be is_final
if sample_history_length == 1:
if self._storage[head].is_final:
return None
# if n > 1, then all except tail can't be is_final
else:
if np.any([item.is_final for item in self._storage[head: head + sample_history_length]]):
return None
next_final = self.get_next_final_pos(self._storage, head)
if next_final is None:
return None
# all good
res = []
for m in range(sample_history_length):
obs = self._storage[head + m].observation_list
candidate = self._storage[head + m].action_candidate_list
tasks = self._storage[head + m].tasks
chosen_indices = self._storage[head + m].chosen_indices
graph_triplets = self._storage[head + m].graph_triplets
next_obs = self._storage[head + m + 1].observation_list
next_candidate = self._storage[head + m + 1].action_candidate_list
next_graph_triplets = self._storage[head + m + 1].graph_triplets
tmp = next_final - (head + m) + 1 if self.accumulate_reward_from_final else 1
rewards_up_to_next_final = [self.discount_gamma_game_reward ** i * self._storage[head + m + i].reward for i in range(tmp)]
reward = torch.sum(torch.stack(rewards_up_to_next_final))
graph_rewards_up_to_next_final = [self.discount_gamma_graph_reward ** i * self._storage[head + m + i].graph_reward for i in range(tmp)]
graph_reward = torch.sum(torch.stack(graph_rewards_up_to_next_final))
count_rewards_up_to_next_final = [self.discount_gamma_count_reward ** i * self._storage[head + m + i].count_reward for i in range(tmp)]
count_reward = torch.sum(torch.stack(count_rewards_up_to_next_final))
res.append([obs, candidate,
tasks,
chosen_indices, graph_triplets, reward + graph_reward + count_reward,
next_obs, next_candidate,
next_graph_triplets])
return res
def _encode_sample_sequence(self, idxes, sample_history_length):
assert sample_history_length > 0
res = []
for _ in range(sample_history_length):
tmp = []
for i in range(10):
tmp.append([])
res.append(tmp)
actual_indices = []
for i in idxes:
t = self._get_single_sequence_transition(i, sample_history_length)
if t is None:
continue
actual_indices.append(i)
for step in range(sample_history_length):
t_s = t[step]
res[step][0].append(t_s[0]) # obs
res[step][1].append(t_s[1]) # candidate
res[step][2].append(t_s[2]) # tasks
res[step][3].append(t_s[3]) # chosen_indices
res[step][4].append(t_s[4]) # graph_triplets
res[step][5].append(t_s[5]) # reward
res[step][6].append(t_s[6]) # next_obs
res[step][7].append(t_s[7]) # next_candidate
res[step][8].append(t_s[8]) # next_graph_triplets
if len(actual_indices) == 0:
return None
# chosen_indices, reward
for i in range(sample_history_length):
res[i][3] = np.array(res[i][3]) # 'chosen_indices', batch
res[i][5] = torch.stack(res[i][5], 0) # 'reward', batch
return res + [actual_indices]
def sample_sequence(self, batch_size, beta=0, sample_history_length=1):
assert beta > 0
idxes = self._sample_proportional(batch_size)
res_weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
encoded_sample = self._encode_sample_sequence(idxes, sample_history_length)
if encoded_sample is None:
return None
actual_indices = encoded_sample[-1]
for _h in range(sample_history_length):
tmp_weights = []
for idx in actual_indices:
p_sample = self._it_sum[idx + _h] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
tmp_weights.append(weight / max_weight)
tmp_weights = np.array(tmp_weights)
res_weights.append(tmp_weights)
return encoded_sample + [res_weights]
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
mass = random.random() * self._it_sum.sum(0, len(self._storage) - 1)
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def update_priorities(self, idxes, priorities):
"""
Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
:param idxes: ([int]) List of idxes of sampled transitions
:param priorities: ([float]) List of updated priorities corresponding to transitions at the sampled idxes
denoted by variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
def update_avg_rewards(self):
if len(self._storage) > 0:
# collect rewards
rewards_dict = {3: [], 7: [], 5: [], 9: []}
for ii in range(len(self._storage)):
curr_transition = self._storage[ii]
assert(curr_transition.level in {3,5,7,9})
rewards_dict[curr_transition.level].append(curr_transition.reward)
# compute average
for jj in [3,7,5,9]:
if len(rewards_dict[jj]) > 0:
self.avg_rewards_dict[jj] = to_np(torch.mean(torch.stack(rewards_dict[jj])))
| [
"torch.stack"
] | 1.4.0 | YunqiuXu/H-KGA | 694a36baf9e51ffb97be269d8182a2b906eb0da5 |
1.1 | import argparse
import os
import torch
class BaseHyperparameters:
def __init__(self):
self.parser = argparse.ArgumentParser()
self.opt = None
def initalize(self):
# set directory for inputs and outputs
self.parser.add_argument('--data_dir', type=str, default='./', help='dataset directory')
self.parser.add_argument('--out_dir', type=str, default='./results', help='out directory')
self.parser.add_argument('--log_dir', type=str, default='./logs', help='log directory')
# model setting
self.parser.add_argument('--model_name', type=str, help='model name',
choices=['deeperlab', 'deeperlabv3+', 'fcn', 'unet'])
self.parser.add_argument('--segmentation_only', action='store_true',
help='egmentation only: True, add CAM: False, default is false')
# model structure
# which is not supported to change, otherwise may cause some problems
self.parser.add_argument('--backbone_channels', type=int, default=256)
self.parser.add_argument('--aspp_channels', type=int, default=256)
# input size
self.parser.add_argument('--data_size', type=int, default=512, help='input image size')
self.parser.add_argument('--batch_size', type=int, default=1)
# device
self.parser.add_argument('--device', type=str, default="cuda:0")
return
def parse(self):
if self.opt is not None:
# check device
if not torch.cuda.is_available():
self.opt.device = 'cpu'
print('Warning: use cpu to run')
return
class TrainingHyperparameters(BaseHyperparameters):
"""
training parameters
"""
def __init__(self):
super(TrainingHyperparameters, self).__init__()
self.parse()
def init_train_params(self):
# setting intervals
self.parser.add_argument('--model_intervals', type=int, default=1,
help='interval numbers for saving models')
self.parser.add_argument('--image_intervals', type=int, default=10,
help='interval numbers for saving images')
# model prefix
self.parser.add_argument('--model_prefix', type=int, default=0,
help='prefix epoch of pretraining weights')
# visdom
self.parser.add_argument('--use_visdom', action='store_true',
help='use visdom for visualization')
# training settings
self.parser.add_argument('--epochs', type=int, default=500, help='total training epochs')
self.parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
return
def parse(self):
self.initalize()
self.init_train_params()
self.opt = self.parser.parse_args()
super().parse()
# make dirs
os.makedirs(os.path.join(self.opt.out_dir, 'images'), exist_ok=True)
os.makedirs(os.path.join(self.opt.out_dir, 'models'), exist_ok=True)
os.makedirs(self.opt.log_dir, exist_ok=True)
return
class EvalHyperparameters(BaseHyperparameters):
"""
evaluation parameters
"""
def __init__(self):
super(EvalHyperparameters, self).__init__()
self.parse()
def init_eval_params(self):
# model prefix
self.parser.add_argument('--model_prefix', type=int, default=0,
help='prefix epoch of pretraining weights')
def parse(self):
self.initalize()
self.init_eval_params()
self.opt = self.parser.parse_args()
super().parse()
# make dirs
os.makedirs(os.path.join(self.opt.out_dir, 'images'), exist_ok=True)
os.makedirs(self.opt.log_dir, exist_ok=True)
return
class InferenceParameters(BaseHyperparameters):
def __init__(self):
super(InferenceParameters, self).__init__()
self.parse()
def init_inference_params(self):
self.parser.add_argument('--model_dir', type=str, help='path to model, e.g ./results/50.pt')
self.parser.add_argument('--model_prefix', type=int,
help='model file prefix, it can be set automatically')
return
def parse(self):
self.initalize()
self.init_inference_params()
self.opt = self.parser.parse_args()
super().parse()
os.makedirs(self.opt.out_dir, exist_ok=True)
os.makedirs(self.opt.log_dir, exist_ok=True)
self.opt.model_prefix = int(os.path.basename(self.opt.model_dir).split('.')[0])
| [
"torch.cuda.is_available"
] | 1.1 | YcShentu/CoralSegmentation | 6ae30c61f7efa1caef9d191d29a7668296f75f8d |
1.5 | import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss
from transformers import DistilBertModel
from Utils.Eval.Metrics import ComputeMetrics as CoMe
class FFNNMulti(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2, hidden_dropout_prob_1, hidden_dropout_prob_2):
super(FFNNMulti, self).__init__()
self.input_size = input_size
self.hidden_size_1 = hidden_size_1
self.hidden_size_2 = hidden_size_2
self.hidden_dropout_prob_1 = hidden_dropout_prob_1
self.hidden_dropout_prob_2 = hidden_dropout_prob_2
self.dropout_1 = nn.Dropout(hidden_dropout_prob_1)
self.dropout_2 = nn.Dropout(hidden_dropout_prob_2)
self.first_layer = nn.Linear(input_size, hidden_size_1)
self.second_layer = nn.Linear(hidden_size_1, hidden_size_2)
self.classifier = nn.Linear(hidden_size_2, 4)
def forward(self, x):
x = self.first_layer(x)
x = nn.ReLU()(x)
x = self.dropout_1(x)
x = self.second_layer(x)
x = nn.ReLU()(x)
x = self.dropout_2(x)
x = self.classifier(x)
return x
def __str__(self):
return f"Input size: {self.input_size} \nHidden size 1: {self.hidden_size_1} \nHidden size 2: {self.hidden_size_2} \nDropout 1: {self.hidden_dropout_prob_1} \nDropout 2: {self.hidden_dropout_prob_2} \nOutput Size: 4 \n"
def get_params_string(self):
return f"multi_output_{self.input_size}_{self.hidden_size_1}_{self.hidden_size_2}_{self.hidden_dropout_prob_1}_{self.hidden_dropout_prob_2}"
class FFNNDual(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2, hidden_dropout_prob_1, hidden_dropout_prob_2):
super(FFNNDual, self).__init__()
self.input_size = input_size
self.hidden_size_1 = hidden_size_1
self.hidden_size_2 = hidden_size_2
self.hidden_dropout_prob_1 = hidden_dropout_prob_1
self.hidden_dropout_prob_2 = hidden_dropout_prob_2
self.dropout_1 = nn.Dropout(hidden_dropout_prob_1)
self.dropout_2 = nn.Dropout(hidden_dropout_prob_2)
self.first_layer = nn.Linear(input_size, hidden_size_1)
self.second_layer = nn.Linear(hidden_size_1, hidden_size_2)
self.classifier = nn.Linear(hidden_size_2, 2)
def forward(self, x):
x = self.first_layer(x)
x = nn.ReLU()(x)
x = self.dropout_1(x)
x = self.second_layer(x)
x = nn.ReLU()(x)
x = self.dropout_2(x)
x = self.classifier(x)
return x
def __str__(self):
return f"Input size: {self.input_size} \nHidden size 1: {self.hidden_size_1} \nHidden size 2: {self.hidden_size_2} \nDropout 1: {self.hidden_dropout_prob_1} \nDropout 2: {self.hidden_dropout_prob_2} \nOutput Size: 2 \n"
def get_params_string(self):
return f"dual_output_{self.input_size}_{self.hidden_size_1}_{self.hidden_size_2}_{self.hidden_dropout_prob_1}_{self.hidden_dropout_prob_2}"
class FFNN2(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2, hidden_dropout_prob_1, hidden_dropout_prob_2):
super(FFNN2, self).__init__()
self.input_size = input_size
self.hidden_size_1 = hidden_size_1
self.hidden_size_2 = hidden_size_2
self.hidden_dropout_prob_1 = hidden_dropout_prob_1
self.hidden_dropout_prob_2 = hidden_dropout_prob_2
self.dropout_1 = nn.Dropout(hidden_dropout_prob_1)
self.dropout_2 = nn.Dropout(hidden_dropout_prob_2)
self.first_layer = nn.Linear(input_size, hidden_size_1)
self.second_layer = nn.Linear(hidden_size_1, hidden_size_2)
self.classifier = nn.Linear(hidden_size_2, 1)
def forward(self, x):
x = self.first_layer(x)
x = nn.ReLU()(x)
x = self.dropout_1(x)
x = self.second_layer(x)
x = nn.ReLU()(x)
x = self.dropout_2(x)
x = self.classifier(x)
return x
def __str__(self):
return f"Input size: {self.input_size} \nHidden size 1: {self.hidden_size_1} \nHidden size 2: {self.hidden_size_2} \nDropout 1: {self.hidden_dropout_prob_1} \nDropout 2: {self.hidden_dropout_prob_2} \nOutput Size: 1 \n"
def get_params_string(self):
return f"{self.input_size}_{self.hidden_size_1}_{self.hidden_size_2}_{self.hidden_dropout_prob_1}_{self.hidden_dropout_prob_2}"
class FFNN1(nn.Module):
def __init__(self, input_size, hidden_size, hidden_dropout_prob):
super(FFNN1, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.hidden_dropout_prob = hidden_dropout_prob
self.dropout = nn.Dropout(hidden_dropout_prob)
self.first_layer = nn.Linear(input_size, hidden_size)
self.classifier = nn.Linear(hidden_size, 1)
def forward(self, x):
x = self.first_layer(x)
x = nn.ReLU()(x)
x = self.dropout(x)
x = self.classifier(x)
return x
def __str__(self):
return f"Input size: {self.input_size} \nHidden size: {self.hidden_size} \nDropout: {self.hidden_dropout_prob} \nOutput Size: 1 \n"
def get_params_string(self):
return f"{self.input_size}_{self.hidden_size}_{self.hidden_dropout_prob_1}"
class DistilBertMultiClassifier(nn.Module):
def __init__(self, ffnn_params):
super().__init__()
self.bert = DistilBertModel.from_pretrained("distilbert-base-multilingual-cased")
self.ffnn = FFNNMulti(**ffnn_params)
def forward(
self,
input_ids=None,
input_features=None, # the second input
attention_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None, ):
distilbert_output = self.bert(
input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = torch.cat([pooled_output, input_features.float()], dim=1)
logits = self.ffnn(pooled_output) # (bs, dim)
preds = torch.sigmoid(logits)
if labels is not None:
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits.view(-1), labels.view(-1).float())
# Declaring the class containing the metrics
preds_arr = preds.detach().cpu().numpy()
labels_arr = labels.detach().cpu().numpy()
output_list = []
for i in range(4):
#print(preds_arr[:, i])
#print(labels_arr[:, i])
outputs = (loss, logits[:, i], preds_arr[:, i],)
output_list.append(outputs)
return output_list # (loss), logits, (hidden_states), (attentions), (preds, prauc, rce, conf, max_pred, min_pred, avg)
else:
return (logits,)
def __str__(self):
return str(self.ffnn)
def get_params_string(self):
return self.ffnn.get_params_string()
class DistilBertDualClassifier(nn.Module):
def __init__(self, ffnn_params):
super().__init__()
self.bert = DistilBertModel.from_pretrained("distilbert-base-multilingual-cased")
self.ffnn = FFNNDual(**ffnn_params)
def forward(
self,
input_ids=None,
input_features=None, # the second input
attention_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None, ):
distilbert_output = self.bert(
input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = torch.cat([pooled_output, input_features.float()], dim=1)
logits = self.ffnn(pooled_output) # (bs, dim)
preds = torch.sigmoid(logits)
if labels is not None:
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits.view(-1), labels.view(-1).float())
# Declaring the class containing the metrics
preds_arr = preds.detach().cpu().numpy()
labels_arr = labels.detach().cpu().numpy()
output_list = []
for i in range(2):
#print(preds_arr[:, i])
#print(labels_arr[:, i])
outputs = (loss, logits[:, i], preds_arr[:, i],)
output_list.append(outputs)
return output_list # (loss), logits, (hidden_states), (attentions), (preds, prauc, rce, conf, max_pred, min_pred, avg)
else:
return (logits,)
def __str__(self):
return str(self.ffnn)
def get_params_string(self):
return self.ffnn.get_params_string()
class DistilBertClassifierDoubleInput(nn.Module):
def __init__(self, ffnn_params):
super().__init__()
self.bert = DistilBertModel.from_pretrained("distilbert-base-multilingual-cased")
self.ffnn = FFNN2(**ffnn_params)
def forward(
self,
input_ids=None,
input_features=None, # the second input
attention_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None, ):
distilbert_output = self.bert(
input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = torch.cat([pooled_output, input_features.float()], dim=1)
logits = self.ffnn(pooled_output) # (bs, dim)
preds = torch.sigmoid(logits)
outputs = (logits,) + distilbert_output[1:]
if labels is not None:
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits.view(-1), labels.view(-1).float())
# Declaring the class containing the metrics
#cm = CoMe(preds.detach().cpu().numpy(), labels.detach().cpu().numpy())
# Evaluating
#prauc = cm.compute_prauc()
#rce = cm.compute_rce()
# Confusion matrix
#conf = cm.confMatrix()
# Prediction stats
#max_pred, min_pred, avg = cm.computeStatistics()
outputs = (loss,) + outputs + (preds,) #, prauc, rce, conf, max_pred, min_pred, avg)
return outputs # (loss), logits, (hidden_states), (attentions), (preds, prauc, rce, conf, max_pred, min_pred, avg)
def __str__(self):
return str(self.ffnn)
def get_params_string(self):
return self.ffnn.get_params_string()
| [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.nn.BCEWithLogitsLoss"
] | 1.5.0 | MaurizioFD/recsys-challenge-2020-twitter | 567f0db40be7db3d21c360f2ca6cdf2addc7c698 |
1.6 | import json
import logging
import os
import shutil
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable
from zipfile import ZipFile
import requests
import numpy as np
from numpy import ndarray
import transformers
import torch
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from tqdm.autonotebook import trange
import math
import queue
from . import __DOWNLOAD_SERVER__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, http_get
from .models import Transformer, Pooling
from . import __version__
logger = logging.getLogger(__name__)
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
"""
def __init__(self, model_name_or_path: str = None, modules: Iterable[nn.Module] = None, device: str = None):
save_model_to = None
if model_name_or_path is not None and model_name_or_path != "":
logger.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
model_path = model_name_or_path
if not os.path.isdir(model_path) and not model_path.startswith('http://') and not model_path.startswith('https://'):
logger.info("Did not find folder {}".format(model_path))
if '\\' in model_path or model_path.count('/') > 1:
raise AttributeError("Path {} not found".format(model_path))
model_path = __DOWNLOAD_SERVER__ + model_path + '.zip'
logger.info("Search model on server: {}".format(model_path))
if model_path.startswith('http://') or model_path.startswith('https://'):
model_url = model_path
folder_name = model_url.replace("https://", "").replace("http://", "").replace("/", "_")[:250][0:-4] #remove .zip file end
cache_folder = os.getenv('SENTENCE_TRANSFORMERS_HOME')
if cache_folder is None:
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
cache_folder = os.path.join(torch_cache_home, 'sentence_transformers')
model_path = os.path.join(cache_folder, folder_name)
if not os.path.exists(model_path) or not os.listdir(model_path):
if os.path.exists(model_path):
os.remove(model_path)
model_url = model_url.rstrip("/")
logger.info("Downloading sentence transformer model from {} and saving it at {}".format(model_url, model_path))
model_path_tmp = model_path.rstrip("/").rstrip("\\")+"_part"
try:
zip_save_path = os.path.join(model_path_tmp, 'model.zip')
http_get(model_url, zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(model_path_tmp)
os.remove(zip_save_path)
os.rename(model_path_tmp, model_path)
except requests.exceptions.HTTPError as e:
shutil.rmtree(model_path_tmp)
if e.response.status_code == 429:
raise Exception("Too many requests were detected from this IP for the model {}. Please contact [email protected] for more information.".format(model_name_or_path))
if e.response.status_code == 404:
logger.warning('SentenceTransformer-Model {} not found. Try to create it from scratch'.format(model_url))
logger.warning('Try to create Transformer Model {} with mean pooling'.format(model_name_or_path))
save_model_to = model_path
model_path = None
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension())
modules = [transformer_model, pooling_model]
else:
raise e
except Exception as e:
shutil.rmtree(model_path)
raise e
#### Load from disk
if model_path is not None:
logger.info("Load SentenceTransformer from folder: {}".format(model_path))
if os.path.exists(os.path.join(model_path, 'config.json')):
with open(os.path.join(model_path, 'config.json')) as fIn:
config = json.load(fIn)
if config['__version__'] > __version__:
logger.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(config['__version__'], __version__))
with open(os.path.join(model_path, 'modules.json')) as fIn:
contained_modules = json.load(fIn)
modules = OrderedDict()
for module_config in contained_modules:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
#We created a new model from scratch based on a Transformer model. Save the SBERT model in the cache folder
if save_model_to is not None:
self.save(save_model_to)
def encode(self, sentences: Union[str, List[str], List[int]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
device: str = None,
normalize_embeddings: bool = False) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings.
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
:param device: Which torch.device to use for the computation
:param normalize_embeddings: If set to true, returned vectors will have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used.
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logger.getEffectiveLevel()==logging.INFO or logger.getEffectiveLevel()==logging.DEBUG)
if convert_to_tensor:
convert_to_numpy = False
if output_value == 'token_embeddings':
convert_to_tensor = False
convert_to_numpy = False
input_was_string = False
if isinstance(sentences, str) or not hasattr(sentences, '__len__'): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([-self._text_length(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar):
sentences_batch = sentences_sorted[start_index:start_index+batch_size]
features = self.tokenize(sentences_batch)
features = batch_to_device(features, device)
with torch.no_grad():
out_features = self.forward(features)
if output_value == 'token_embeddings':
embeddings = []
for token_emb, attention in zip(out_features[output_value], out_features['attention_mask']):
last_mask_id = len(attention)-1
while last_mask_id > 0 and attention[last_mask_id].item() == 0:
last_mask_id -= 1
embeddings.append(token_emb[0:last_mask_id+1])
else: #Sentence embeddings
embeddings = out_features[output_value]
embeddings = embeddings.detach()
if normalize_embeddings:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
# fixes for #522 and #487 to avoid oom problems on gpu with large datasets
if convert_to_numpy:
embeddings = embeddings.cpu()
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logger.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logger.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], batch_size: int = 32, chunk_size: int = None):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param batch_size: Encode sentences with batch size
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
:return: Numpy matrix with all embeddings
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logger.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
chunk = []
for sentence in sentences:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
chunk = []
if len(chunk) > 0:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(last_chunk_id)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, batch_size, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, show_progress_bar=False, convert_to_numpy=True, batch_size=batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, text: str):
"""
Tokenizes the text
"""
return self._first_module().tokenize(text)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
for mod in reversed(self._modules.values()):
sent_embedding_dim_method = getattr(mod, "get_sentence_embedding_dimension", None)
if callable(sent_embedding_dim_method):
return sent_embedding_dim_method()
return None
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
"""
if path is None:
return
os.makedirs(path, exist_ok=True)
logger.info("Save model to {}".format(path))
contained_modules = []
for idx, name in enumerate(self._modules):
module = self._modules[name]
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
contained_modules.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(contained_modules, fOut, indent=2)
with open(os.path.join(path, 'config.json'), 'w') as fOut:
json.dump({'__version__': __version__}, fOut, indent=2)
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
Here, batch is a list of tuples: [(tokens, label), ...]
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0].texts)
texts = [[] for _ in range(num_texts)]
labels = []
for example in batch:
for idx, text in enumerate(example.texts):
texts[idx].append(text)
labels.append(example.label)
labels = torch.tensor(labels).to(self._target_device)
sentence_features = []
for idx in range(num_texts):
tokenized = self.tokenize(texts[idx])
batch_to_device(tokenized, self._target_device)
sentence_features.append(tokenized)
return sentence_features, labels
def _text_length(self, text: Union[List[int], List[List[int]]]):
"""
Help function to get the length for the input text. Text can be either
a list of ints (which means a single text as input), or a tuple of list of ints
(representing several text inputs to the model).
"""
if isinstance(text, dict): #{key: value} case
return len(next(iter(text.values())))
elif not hasattr(text, '__len__'): #Object has no len() method
return 1
elif len(text) == 0 or isinstance(text[0], int): #Empty string or list of ints
return len(text)
else:
return sum([len(t) for t in text]) #Sum of length of individual strings
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator = None,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
show_progress_bar: bool = True
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
:param show_progress_bar: If True, output a tqdm progress bar
"""
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
for loss_model in loss_models:
loss_model.to(self._target_device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05, disable=not show_progress_bar):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = data
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1, callback)
if evaluator is None and output_path is not None: #No evaluator, but output path: save final model version
self.save(output_path)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
if evaluator is not None:
score = evaluator(self, output_path=output_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score:
self.best_score = score
if save_best_model:
self.save(output_path)
@staticmethod
def _get_scheduler(optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that is should used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
| [
"torch.device",
"torch.nn.functional.normalize",
"torch.stack",
"torch.cuda.amp.autocast",
"torch.is_tensor",
"torch.no_grad",
"torch.hub._get_torch_home",
"torch.cuda.device_count",
"torch.multiprocessing.get_context",
"torch.cuda.is_available",
"torch.tensor",
"torch.cuda.amp.GradScaler"
] | 1.6.0 | adbelniak/sentence-transformers | a36e6f126cf0e333e1b00eb7cfcef2863f8919ad |
1.9 | import random
from typing import Any, Callable, Optional
import torch
from torchmetrics import Metric
class CentroidAggregator(Metric):
"""
The centroid aggregator aggregates kmeans centroids over batches and processes.
"""
def __init__(
self,
num_clusters: int,
num_features: int,
*,
dist_sync_fn: Optional[Callable[[Any], Any]] = None,
):
super().__init__(dist_sync_fn=dist_sync_fn) # type: ignore
self.num_clusters = num_clusters
self.num_features = num_features
self.centroids: torch.Tensor
self.add_state("centroids", torch.zeros(num_clusters, num_features), dist_reduce_fx="sum")
self.cluster_counts: torch.Tensor
self.add_state("cluster_counts", torch.zeros(num_clusters), dist_reduce_fx="sum")
def update(self, data: torch.Tensor, assignments: torch.Tensor) -> None:
indices = assignments.unsqueeze(1).expand(-1, self.num_features)
self.centroids.scatter_add_(0, indices, data)
counts = assignments.bincount(minlength=self.num_clusters).float()
self.cluster_counts.add_(counts)
def compute(self) -> torch.Tensor:
return self.centroids / self.cluster_counts.unsqueeze(-1)
class UniformSampler(Metric):
"""
The uniform sampler randomly samples a specified number of datapoints uniformly from all
datapoints. The idea is the following: sample the number of choices from each batch and
track the number of datapoints that was already sampled from. When sampling from the union of
existing choices and a new batch, more weight is put on the existing choices (according to the
number of datapoints they were already sampled from).
"""
def __init__(
self,
num_choices: int,
num_features: int,
*,
dist_sync_fn: Optional[Callable[[Any], Any]] = None,
):
super().__init__(dist_sync_fn=dist_sync_fn) # type: ignore
self.num_choices = num_choices
self.choices: torch.Tensor
self.add_state("choices", torch.empty(num_choices, num_features), dist_reduce_fx="cat")
self.choice_weights: torch.Tensor
self.add_state("choice_weights", torch.zeros(num_choices), dist_reduce_fx="cat")
def update(self, data: torch.Tensor) -> None:
if self.num_choices == 1:
# If there is only one choice, the fastest thing is to use the `random` package. The
# cumulative weight of the data is its size, the cumulative weight of the current
# choice is some value.
cum_weight = data.size(0) + self.choice_weights.item()
if random.random() * cum_weight < data.size(0):
# Use some item from the data, else keep the current choice
self.choices.copy_(data[random.randrange(data.size(0))])
else:
# The choices are computed from scratch every time, weighting the current choices by
# the cumulative weight put on them
weights = torch.cat(
[
torch.ones(data.size(0), device=data.device, dtype=data.dtype),
self.choice_weights,
]
)
pool = torch.cat([data, self.choices])
samples = weights.multinomial(self.num_choices)
self.choices.copy_(pool[samples])
# The weights are the cumulative counts, divided by the number of choices
self.choice_weights.add_(data.size(0) / self.num_choices)
def compute(self) -> torch.Tensor:
# In the ddp setting, there are "too many" choices, so we sample
if self.choices.size(0) > self.num_choices:
samples = self.choice_weights.multinomial(self.num_choices)
return self.choices[samples]
return self.choices
class DistanceSampler(Metric):
"""
The distance sampler may be used for kmeans++ initialization, to iteratively select centroids
according to their squared distances to existing choices. Computing the distance to existing
choices is not part of this sampler. Within each "cycle", it computes a given number of
candidates. Candidates are sampled independently and may be duplicates.
"""
def __init__(
self,
num_choices: int,
num_features: int,
*,
dist_sync_fn: Optional[Callable[[Any], Any]] = None,
):
super().__init__(dist_sync_fn=dist_sync_fn) # type: ignore
self.num_choices = num_choices
self.num_features = num_features
self.choices: torch.Tensor
self.add_state("choices", torch.empty(num_choices, num_features), dist_reduce_fx="cat")
# Cumulative distance is the same for all choices
self.cumulative_squared_distance: torch.Tensor
self.add_state("cumulative_squared_distance", torch.zeros(1), dist_reduce_fx="cat")
def update(self, data: torch.Tensor, shortest_distances: torch.Tensor) -> None:
eps = torch.finfo(data.dtype).eps
squared_distances = shortest_distances.square()
# For all choices, check if we should use a sample from the data or the existing choice
data_dist = squared_distances.sum()
cum_dist = data_dist + eps + self.cumulative_squared_distance
use_choice_from_data = (
torch.rand(self.num_choices, device=data.device, dtype=data.dtype) * cum_dist
< data_dist + eps
)
# Then, we sample from the data `num_choices` times and replace if needed
choices = (squared_distances + eps).multinomial(self.num_choices, replacement=True)
self.choices.masked_scatter_(
use_choice_from_data.unsqueeze(1), data[choices[use_choice_from_data]]
)
# In any case, the cumulative distances are updated
self.cumulative_squared_distance.add_(data_dist)
def compute(self) -> torch.Tensor:
# Upon computation, we sample if there is more than one choice (ddp setting)
if self.choices.size(0) > self.num_choices:
# choices now have shape [num_choices, num_processes, num_features]
choices = self.choices.reshape(-1, self.num_choices, self.num_features).transpose(0, 1)
# For each choice, we sample across processes
choice_indices = torch.arange(self.num_choices, device=self.choices.device)
process_indices = self.cumulative_squared_distance.multinomial(
self.num_choices, replacement=True
)
return choices[choice_indices, process_indices]
# Otherwise, we can return the choices
return self.choices
class BatchSummer(Metric):
"""
Sums the values for a batch of items independently.
"""
def __init__(self, num_values: int, *, dist_sync_fn: Optional[Callable[[Any], Any]] = None):
super().__init__(dist_sync_fn=dist_sync_fn) # type: ignore
self.sums: torch.Tensor
self.add_state("sums", torch.zeros(num_values), dist_reduce_fx="sum")
def update(self, values: torch.Tensor) -> None:
self.sums.add_(values.sum(0))
def compute(self) -> torch.Tensor:
return self.sums
class BatchAverager(Metric):
"""
Averages the values for a batch of items independently.
"""
def __init__(
self,
num_values: int,
for_variance: bool,
*,
dist_sync_fn: Optional[Callable[[Any], Any]] = None,
):
super().__init__(dist_sync_fn=dist_sync_fn) # type: ignore
self.for_variance = for_variance
self.sums: torch.Tensor
self.add_state("sums", torch.zeros(num_values), dist_reduce_fx="sum")
self.counts: torch.Tensor
self.add_state("counts", torch.zeros(num_values), dist_reduce_fx="sum")
def update(self, values: torch.Tensor) -> None:
self.sums.add_(values.sum(0))
self.counts.add_(values.size(0))
def compute(self) -> torch.Tensor:
return self.sums / (self.counts - 1 if self.for_variance else self.counts)
| [
"torch.zeros",
"torch.rand",
"torch.cat",
"torch.arange",
"torch.finfo",
"torch.empty"
] | 1.9.0 | borchero/leviathan | 7430fa4a515fe5ea7afbaad108226b4cd9111d8c |
1.3 | import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
# Code from https://github.com/locuslab/TCN
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.Conv1d"
] | 1.3.1 | abelrguezr/Deep-SAD-PyTorch | 268ca21570a4a8dadc331a9dbf26ccade36ae5d9 |
1.0 | """Contains classes describing linguistic tasks of interest on annotated data."""
from collections import Counter, OrderedDict
import numpy as np
import sys
import torch
import math
import itertools
PTB_TRAIN_EMPIRICAL_POS_DISTRIBUTION = [0.00003789361998, 0.00006105083219, 0.0001021022538, 0.0001494692788, 0.0001768368932, 0.0002463085299, 0.0003894622053, 0.0004747228503, 0.0009083942789, 0.001437852358, 0.001448378364, 0.001860997781, 0.00204941328, 0.002255722989, 0.002487295111, 0.002802022677, 0.002813601283, 0.003408320597, 0.004519866783, 0.005023009848, 0.00728294324, 0.007465043136, 0.007759771291, 0.008849212865, 0.009158677428, 0.01031864324, 0.01314803353, 0.01562690784, 0.01835314328, 0.02107727351, 0.02281195923, 0.02353299061, 0.02520662549, 0.02782865347, 0.03146117799, 0.03259903919, 0.03849149709, 0.04155456471, 0.05129006724, 0.06300445882, 0.06443704817, 0.08614693462, 0.09627716236, 0.1037379951, 0.1399274548]
#PTB_DEV_EMPIRICAL_DEPTH_DISTRIBUTION = {14: 0.00009970835307, 13: 0.000373906324, 12: 0.0007228855597, 11: 0.001395916943, 10: 0.003938479946, 9: 0.007702470274, 8: 0.01570406561, 7: 0.02921454745, 0: 0.04237605005, 6: 0.05309469801, 5: 0.08729466311, 4: 0.1302440362, 3: 0.183563078, 1: 0.2192088142, 2: 0.22506668}
PTB_DEV_EMPIRICAL_DEPTH_DISTRIBUTION = [0.00009970835307, 0.000373906324, 0.0007228855597, 0.001395916943, 0.003938479946, 0.007702470274, 0.01570406561, 0.02921454745, 0.04237605005, 0.05309469801, 0.08729466311, 0.1302440362, 0.183563078, 0.2192088142, 0.22506668]
PTB_TRAIN_EMPIRICAL_DEP_SEQ_LEN_dict = OrderedDict({-44: 7.690651244347372e-06, -3: 0.047819370772888475, -2: 0.1088534777124927, -1: 0.277384211752194, 4: 0.035580248649741374, 1: 0.17205854563192982, 3: 0.06036172428795556, 2: 0.09961151224571411, -4: 0.02238199244997781, 15: 0.003433326448369362, 6: 0.01574166443271559, 7: 0.011697480542652352, 8: 0.009206808203947281, 11: 0.00579765237377444, -13: 0.0016556873464616411, -11: 0.002414864490725075, 5: 0.022290803299509117, -8: 0.004191404928169318, 19: 0.0021665663219790025, -7: 0.005423007791728375, -5: 0.012027079881695811, 9: 0.00793565341970301, 22: 0.0015447222356503435, -10: 0.0029543087422928688, -19: 0.0007163292301877837, -6: 0.00748410232521347, 12: 0.004976950019556227, 35: 0.0003317966679704152, 13: 0.004389164531595393, 18: 0.002396187194845945, -9: 0.0034783716913719684, 28: 0.0008723395840016876, 43: 0.00011865576205564516, -17: 0.0009151874980773372, -12: 0.0020545025467042263, 26: 0.0009964886683747236, 25: 0.0011404137130903674, -23: 0.0003471779704591099, -26: 0.00023731152411129032, 20: 0.001866630923449455, 34: 0.00038343389775389035, 10: 0.006666695964385693, 36: 0.0002955407406756347, -22: 0.00042518314736606183, -15: 0.0012920294090503583, -21: 0.0005306549358599686, 16: 0.0030652738531041666, 17: 0.0026005387850528898, -16: 0.001105256450259065, 14: 0.003947501417277158, 23: 0.001423869144667742, -20: 0.0005767988433260529, 21: 0.0017677511217364173, 32: 0.00048780702178431896, 38: 0.0002647781356982452, 37: 0.0002450021753556377, 50: 4.834123639304062e-05, 46: 6.042654549130078e-05, 31: 0.0005910814813512694, -14: 0.0015601035381390383, 27: 0.0009470487675182048, 45: 0.00010107713063999403, 24: 0.0012953254024407929, 42: 0.00013623439347129629, 29: 0.000745993170701695, 40: 0.00020654891913390083, 41: 0.00013953038686173087, 47: 5.49332231739098e-05, 30: 0.0006273374086460499, -18: 0.0008174063608277777, 56: 1.7578631415651135e-05, -35: 4.1749249612171444e-05, -27: 0.0001658983339852076, 39: 0.00019885826788955345, 33: 0.0004647350680512769, -31: 8.789315707825567e-05, 57: 2.1973289269563917e-05, 61: 1.867729587912933e-05, -30: 0.00011975442651912336, 44: 8.239983476086469e-05, -24: 0.00028455409604085275, -29: 0.000106570452957385, -25: 0.0002614821423078106, 65: 8.789315707825568e-06, 49: 4.834123639304062e-05, 51: 3.186126944086768e-05, 62: 1.0986644634781959e-05, 90: 1.098664463478196e-06, -36: 3.405859836782407e-05, -28: 0.00013953038686173087, -38: 2.1973289269563917e-05, -33: 6.921586119912634e-05, 52: 2.3071953733042113e-05, 55: 1.867729587912933e-05, 72: 4.394657853912784e-06, 73: 3.295993390434588e-06, 77: 2.197328926956392e-06, 85: 1.098664463478196e-06, 48: 5.603188763738799e-05, 68: 5.493322317390979e-06, -32: 6.482120334521356e-05, -40: 1.4282638025216547e-05, 53: 2.417061819652031e-05, 54: 2.5269282659998507e-05, 100: 1.098664463478196e-06, -34: 6.372253888173536e-05, -39: 2.3071953733042113e-05, -48: 3.295993390434588e-06, -37: 2.3071953733042113e-05, -67: 1.098664463478196e-06, -64: 2.197328926956392e-06, -63: 1.098664463478196e-06, -59: 1.098664463478196e-06, -41: 9.887980171303763e-06, 58: 1.2085309098260154e-05, -47: 3.295993390434588e-06, 59: 9.887980171303763e-06, 60: 9.887980171303763e-06, 63: 1.0986644634781959e-05, 67: 3.295993390434588e-06, 79: 3.295993390434588e-06, 64: 6.591986780869176e-06, 69: 2.197328926956392e-06, -43: 5.493322317390979e-06, 80: 1.098664463478196e-06, 81: 1.098664463478196e-06, -58: 1.098664463478196e-06, -56: 1.098664463478196e-06, -42: 5.493322317390979e-06, -49: 1.098664463478196e-06, 74: 4.394657853912784e-06, 75: 3.295993390434588e-06, 117: 1.098664463478196e-06, -62: 1.098664463478196e-06, 76: 1.098664463478196e-06, 78: 2.197328926956392e-06, -53: 2.197328926956392e-06, -65: 1.098664463478196e-06, -61: 1.098664463478196e-06, 127: 1.098664463478196e-06, -45: 4.394657853912784e-06, -46: 1.098664463478196e-06, -50: 1.098664463478196e-06, -77: 1.098664463478196e-06, -74: 1.098664463478196e-06, 70: 2.197328926956392e-06, 66: 1.098664463478196e-06, -55: 1.098664463478196e-06, -54: 2.197328926956392e-06, -66: 1.098664463478196e-06, 71: 2.197328926956392e-06, 83: 1.098664463478196e-06, 87: 1.098664463478196e-06, 86: 1.098664463478196e-06})
PTB_TRAIN_EMPIRICAL_DEP_SEQ_LEN_dists = list(sorted(PTB_TRAIN_EMPIRICAL_DEP_SEQ_LEN_dict.keys()))
PTB_TRAIN_EMPIRICAL_DEP_SEQ_LEN_probs = [PTB_TRAIN_EMPIRICAL_DEP_SEQ_LEN_dict[x] for x in PTB_TRAIN_EMPIRICAL_DEP_SEQ_LEN_dists]
class Task:
"""Abstract class representing a linguistic task mapping texts to labels."""
def __init__(self, args):
args['ignore_punct'] = True
def _register_observation(self):
"""
For labeling tasks that require a label vocabulary, keep state
that determines how future observations' labels should be encoded
as integers, etc.
"""
pass
def prepare(self, train_obs, dev_obs, test_obs):
"""Prepares task with corpus-specific information.
If the distribution of a certain quantity in the dataset must be known
for the definition of the task, this computes the necessary
statistics and stores them in the state of the task for future
use in label assignment.
A noop if no statistics are needed.
Args:
observations: the (training) observations of a dataset
"""
pass
def labels(self, observation):
"""Maps an observation to a matrix of labels.
Should be overriden in implementing classes.
"""
raise NotImplementedError
class ParseDistanceTask(Task):
"""Maps observations to dependency parse distances between words."""
@staticmethod
def labels(observation):
"""Computes the distances between all pairs of words; returns them as a torch tensor.
Args:
observation: a single Observation class for a sentence:
Returns:
A torch tensor of shape (sentence_length, sentence_length) of distances
in the parse tree as specified by the observation annotation.
"""
sentence_length = len(observation[0]) #All observation fields must be of same length
distances = torch.zeros((sentence_length, sentence_length))
for i in range(sentence_length):
for j in range(i,sentence_length):
i_j_distance = ParseDistanceTask.distance_between_pairs(observation, i, j)
distances[i][j] = i_j_distance
distances[j][i] = i_j_distance
return distances
@staticmethod
def distance_between_pairs(observation, i, j, head_indices=None):
'''Computes path distance between a pair of words
TODO: It would be (much) more efficient to compute all pairs' distances at once;
this pair-by-pair method is an artefact of an older design, but
was unit-tested for correctness...
Args:
observation: an Observation namedtuple, with a head_indices field.
or None, if head_indies != None
i: one of the two words to compute the distance between.
j: one of the two words to compute the distance between.
head_indices: the head indices (according to a dependency parse) of all
words, or None, if observation != None.
Returns:
The integer distance d_path(i,j)
'''
if i == j:
return 0
if observation:
head_indices = []
number_of_underscores = 0
for elt in observation.head_indices:
if elt == '_':
head_indices.append(0)
number_of_underscores += 1
else:
head_indices.append(int(elt) + number_of_underscores)
i_path = [i+1]
j_path = [j+1]
i_head = i+1
j_head = j+1
while True:
if not (i_head == 0 and (i_path == [i+1] or i_path[-1] == 0)):
i_head = head_indices[i_head - 1]
i_path.append(i_head)
if not (j_head == 0 and (j_path == [j+1] or j_path[-1] == 0)):
j_head = head_indices[j_head - 1]
j_path.append(j_head)
if i_head in j_path:
j_path_length = j_path.index(i_head)
i_path_length = len(i_path) - 1
break
elif j_head in i_path:
i_path_length = i_path.index(j_head)
j_path_length = len(j_path) - 1
break
elif i_head == j_head:
i_path_length = len(i_path) - 1
j_path_length = len(j_path) - 1
break
total_length = j_path_length + i_path_length
return total_length
#class CorruptedParseDistanceTask(Task):
# """Unfinished..."""
#
# def __init__(self, args):
# args['ignore_punct'] = False # Global setting to make sure the tree doesn't get messed up at test time
# self.target_corrupted_token_percent = args['probe']['misc']['corrupted_token_percent']
# #self.REPLACE_TOKEN = '<RPL>'
# self.dist = PTB_DEV_EMPIRICAL_DEPTH_DISTRIBUTION
# self.args = args
# self.rand_root_type_vocab = set()
# self.rand_type_vocab = {}
#
# def labels(self, observation):
# sentence_length = len(observation[0]) #All observation fields must be of same length
# #distances = torch.zeros((sentence_length, sentence_length))
# depths = torch.tensor([self.rand_type_vocab[x] for x in observation.sentence]).float()
# depth_differences = depths.repeat(sentence_length, 1) - depths.repeat(sentence_length, 1).t()
# positions = torch.tensor(list(range(sentence_length))).float()
# position_differences = (positions.repeat(sentence_length, 1) - positions.repeat(sentence_length, 1).t()) * .25
# #print(position_differences)
# #print(depth_differences)
# distances = torch.abs(depth_differences) + torch.abs(position_differences)
# #print(torch.abs(depth_differences))
# #print(torch.abs(position_differences))
# #print(distances)
# #print()
# #print()
# #print()
# return distances
#
# def _register_type(self, string):
# if string not in self.rand_type_vocab:
# ints = list(range(5))
# self.rand_type_vocab[string] = int(np.random.choice(ints))
# #self.rand_type_vocab[string] = np.random.random()
# return self.rand_type_vocab[string]
#
# def prepare(self, train_obs, dev_obs, test_obs):
# """Chooses the word types to be part-of-speech-corrupted in all datasets.
# """
# np.random.seed(self.args['seed'])
# root_type_counter = Counter()
# root_intmapping = {}
#
# type_counter = Counter()
# intmapping = {}
#
# for observation in itertools.chain(train_obs, dev_obs, test_obs):
# type_counter.update(observation.sentence)
# for string, head_index in zip(observation.sentence, observation.head_indices):
#
# if string not in intmapping:
# intmapping[string] = len(intmapping)
#
# if int(head_index) == 0: # Only consider the root of each sent
# root_type_counter.update([string])
# if string not in root_intmapping:
# root_intmapping[string] = len(root_intmapping)
#
# root_counts = [root_type_counter[string] for string in sorted(root_intmapping, key=lambda x: root_intmapping[x])]
# root_strings = [string for string in sorted(root_intmapping, key=lambda x: root_intmapping[x])]
# root_count_sum = sum(root_counts)
# root_probs = [x/root_count_sum for x in root_counts]
#
# corrupted_token_percent = 0
# while corrupted_token_percent < self.target_corrupted_token_percent - .00000001:
# remaining_strings = list(filter(lambda x: x, root_strings))
# string = np.random.choice(remaining_strings)
# prob = root_probs[root_intmapping[string]]
# root_strings[root_intmapping[string]] = None
# if string not in self.rand_root_type_vocab:
# self.rand_root_type_vocab.add(string)
# self._register_type(string)
# corrupted_token_percent += prob
#
# for string in intmapping:
# if string not in self.rand_type_vocab:
# self._register_type(string)
# print('CORRUPTED', self.rand_type_vocab)
# print('CORRUPTED', self.rand_root_type_vocab)
# print('CORRUPTED', corrupted_token_percent)
#
# def get_head_indices(observation):
# for index, string in enumerate(observation.sentence):
# pass
#class EdgeLabelTask(Task):
#
# @staticmethod
# def labels(observation):
# """Computes the distances between all pairs of words; returns them as a torch tensor.
#
# Args:
# observation: a single Observation class for a sentence:
# Returns:
# A torch tensor of shape (sentence_length, sentence_length) of distances
# in the parse tree as specified by the observation annotation.
# """
# sentence_length = len(observation[0]) #All observation fields must be of same length
# labels = torch.zeros((sentence_length, sentence_length))
# modified_head_indices = [int(x)-1 if x != '0' else 0 for x in observation.head_indices]
# for i, word_i in enumerate(observation.sentence):
# for j_prime, word_j in enumerate(observation.sentence[i:]):
# j = j_prime + i
# i_j_label = int(modified_head_indices[i] == j) #or modified_head_indices[j] == i)
# labels[i][j] = i_j_label
# #labels[j][i] = i_j_label
# return labels
class CorruptedEdgePositionTask(Task):
def __init__(self, args):
self.label_dict = {}
self.strings = set()
args['ignore_punct'] = True
self.args = args
self.target_corrupted_token_percent = args['probe']['misc']['corrupted_token_percent']
def _register_type(self, tup):
if tup not in self.label_dict:
#a = torch.tensor(np.random.choice(PTB_TRAIN_EMPIRICAL_DEP_SEQ_LEN_dists, p=PTB_TRAIN_EMPIRICAL_DEP_SEQ_LEN_probs))
#a = torch.tensor(int(np.random.choice([-1,0,1,2], p=[0.25,0.25,0.25,0.25])))
a = torch.tensor(int(np.random.choice([0,1,2], p=[1/3,1/3,1/3])))
self.label_dict[tup] = a
return self.label_dict[tup]
def prepare(self, train_obs, dev_obs, test_obs):
tuple_counter = Counter()
for observation in itertools.chain(train_obs, dev_obs, test_obs):
for word1 in observation.sentence:
tuple_counter.update([word1])
np.random.seed(self.args['seed'])
seen_tuples = set()
all_tuples = list(tuple_counter.keys())
np.random.shuffle(all_tuples)
tuple_count_sum = sum(tuple_counter.values())
corrupted_pair_percent = 0
index = 0
while corrupted_pair_percent < self.target_corrupted_token_percent - 0.00000001:
next_tuple = all_tuples[index]
if next_tuple not in seen_tuples:
seen_tuples.add(next_tuple)
tuple_probability_mass = tuple_counter[next_tuple] / tuple_count_sum
corrupted_pair_percent += tuple_probability_mass
self._register_type(next_tuple)
index += 1
#print('CORRUPTED', self.label_dict)
print('CORRUPTED', corrupted_pair_percent)
def labels(self, observation):
sentence_length = len(observation[0]) #All observation fields must be of same length
labels = torch.zeros(sentence_length)
modified_head_indices = torch.tensor([int(x)-1 if x != '0' else -1 for x in observation.head_indices])
root_index = observation.head_indices.index('0')
for i, word_i in enumerate(observation.sentence):
if word_i in self.label_dict:
#modified_head_indices[i] = max(min(i + self.label_dict[word_i], len(observation.sentence)-1),0)
#if self.label_dict[word_i] == -1:
# modified_head_indices[i] = root_index
if self.label_dict[word_i] == 0:
modified_head_indices[i] = i
elif self.label_dict[word_i] == 1:
modified_head_indices[i] = 0
elif self.label_dict[word_i] == 2:
modified_head_indices[i] = len(observation.sentence) - 1
else:
raise ValueError("Fix this")
return modified_head_indices
class CorruptedEdgeLabelTask(Task):
def __init__(self, args):
self.label_dict = {}
self.strings = set()
args['ignore_punct'] = True
self.args = args
self.target_corrupted_token_percent = args['probe']['misc']['corrupted_token_percent']
def _register_type(self, tup):
if tup not in self.label_dict:
ints = list(range(2))
#probs = [0.25, 0.75]
probs = [0.5,0.5]
#probs = [0,1]
label1 = int(np.random.choice(ints,p=probs))
label2 = int(np.random.choice(ints,p=probs))
self.label_dict[tup] = label1
self.label_dict[(tup[1], tup[0])] = label2
return self.label_dict[tup]
def prepare(self, train_obs, dev_obs, test_obs):
tuple_counter = Counter()
for observation in itertools.chain(train_obs, dev_obs, test_obs):
for word1 in observation.sentence:
for word2 in observation.sentence:
if (word1, word2) not in self.label_dict:
tuple_counter.update([(word1, word2), (word2, word1)])
np.random.seed(self.args['seed'])
seen_tuples = set()
all_tuples = list(tuple_counter.keys())
np.random.shuffle(all_tuples)
tuple_count_sum = sum(tuple_counter.values())
corrupted_pair_percent = 0
index = 0
while corrupted_pair_percent < self.target_corrupted_token_percent - 0.00000001:
next_tuple = all_tuples[index]
if next_tuple not in seen_tuples:
seen_tuples.add(next_tuple)
tuple_probability_mass = tuple_counter[next_tuple] / tuple_count_sum
corrupted_pair_percent += tuple_probability_mass
self._register_type(next_tuple)
index += 1
#print('CORRUPTED', self.label_dict)
print('CORRUPTED', corrupted_pair_percent)
def labels(self, observation):
sentence_length = len(observation[0]) #All observation fields must be of same length
labels = torch.zeros(sentence_length)
modified_head_indices = torch.tensor([int(x)-1 if x != '0' else -1 for x in observation.head_indices])
for i, word_i in enumerate(observation.sentence):
tups = [(x, word_i) for x in observation.sentence]
scores = [self.label_dict[tup] if tup in self.label_dict else -1 for tup in tups]
closest_score = sys.maxsize
closest = -1
for index, score in enumerate(scores):
if score == 1:
diff = abs(i - index)
if diff != 0 and diff < closest_score:
closest = index
closest_score = diff
if closest != -1:
modified_head_indices[i]= closest
return modified_head_indices
class BalancedBinaryTreeDistanceTask:
def __init__(self, args):
self.distance_dict = {}
args['ignore_punct'] = False # Global setting to make sure the tree doesn't get messed up at test time
def labels(self, observation):
sentence_length = len(observation[0]) #All observation fields must be of same length
if sentence_length in self.distance_dict:
return self.distance_dict[sentence_length]
distances = torch.zeros((sentence_length, sentence_length))
head_indices = BalancedBinaryTreeDistanceTask.get_head_indices(sentence_length)
for i in range(sentence_length):
for j in range(i,sentence_length):
i_j_distance = ParseDistanceTask.distance_between_pairs(None, i, j, head_indices)
distances[i][j] = i_j_distance
distances[j][i] = i_j_distance
self.distance_dict[sentence_length] = distances
return distances
@staticmethod
def get_head_indices(sentence_length):
head_indices = [-1 for x in range(sentence_length)]
root_index = int(sentence_length/2) # Even or odd, doesn't matter
BalancedBinaryTreeDistanceTask._assign(head_indices, 0, root_index - 1, root_index)
BalancedBinaryTreeDistanceTask._assign(head_indices, root_index + 1, sentence_length-1, root_index)
head_indices = [x+1 for x in head_indices]
return head_indices
@staticmethod
def _assign(array, start_index, end_index, parent_index):
if ((start_index < 0 or end_index > len(array) - 1) or
(end_index < 0 or start_index > len(array) - 1)):
return
# Base case -- single node
if start_index == end_index:
array[start_index] = parent_index
return
# Choose the child index
if (end_index - start_index) % 2 == 0: # Odd # of elts
child_index = int((end_index + start_index)/2)
else:
right_child_candidate = math.ceil((end_index + start_index)/2)
left_child_candidate = math.floor((end_index + start_index)/2)
if abs(right_child_candidate - parent_index) > abs(left_child_candidate - parent_index):
child_index = right_child_candidate
elif abs(left_child_candidate - parent_index) > abs(right_child_candidate - parent_index):
child_index = left_child_candidate
else:
raise ValueError("Something's going on with child indices you don't understand.")
# Assign child to parent
array[child_index] = parent_index
# Call new functions for newly made subdivisions
if child_index != start_index:
BalancedBinaryTreeDistanceTask._assign(array, start_index, child_index-1, child_index)
if child_index != end_index:
BalancedBinaryTreeDistanceTask._assign(array, child_index+1, end_index, child_index)
class ParseDepthTask(Task):
"""Maps observations to a depth in the parse tree for each word"""
@staticmethod
def labels(observation):
"""Computes the depth of each word; returns them as a torch tensor.
Args:
observation: a single Observation class for a sentence:
Returns:
A torch tensor of shape (sentence_length,) of depths
in the parse tree as specified by the observation annotation.
"""
sentence_length = len(observation[0]) #All observation fields must be of same length
depths = torch.zeros(sentence_length)
for i in range(sentence_length):
depths[i] = ParseDepthTask.get_ordering_index(observation, i)
return depths
@staticmethod
def get_ordering_index(observation, i, head_indices=None):
'''Computes tree depth for a single word in a sentence
Args:
observation: an Observation namedtuple, with a head_indices field.
or None, if head_indies != None
i: the word in the sentence to compute the depth of
head_indices: the head indices (according to a dependency parse) of all
words, or None, if observation != None.
Returns:
The integer depth in the tree of word i
'''
if observation:
head_indices = []
number_of_underscores = 0
for elt in observation.head_indices:
if elt == '_':
head_indices.append(0)
number_of_underscores += 1
else:
head_indices.append(int(elt) + number_of_underscores)
length = 0
i_head = i+1
while True:
i_head = head_indices[i_head - 1]
if i_head != 0:
length += 1
else:
return length
class RandomParseDepthTask(Task):
"""Maps observations to a random sample from depths in the parse tree"""
def __init__(self, args):
self.vocab = {}
self.args = args
self.dist = PTB_DEV_EMPIRICAL_DEPTH_DISTRIBUTION
args['ignore_punct'] = True
np.random.seed(args['seed'])
def get_label(self):
ints = list(range(15))
return int(np.random.choice(ints, p=self.dist))
def _register_observation(self, observation):
for string in observation.sentence:
if string not in self.vocab:
self.vocab[string] = self.get_label()
def labels(self, observation):
self._register_observation(observation)
sentence_length = len(observation[0])
labels = torch.zeros(sentence_length)
for i in range(sentence_length):
labels[i] = self.vocab[observation.sentence[i]]
return labels
class CorruptedParseDepthTask(ParseDepthTask):
def __init__(self, args):
self.distance_dict = {}
args['ignore_punct'] = True # Global setting to make sure the tree doesn't get messed up at test time
self.args = args
self.rand_type_vocab = {}
self.target_corrupted_token_percent = args['probe']['misc']['corrupted_token_percent']
def labels(self, observation):
sentence_length = len(observation[0]) #All observation fields must be of same length
distances = torch.zeros((sentence_length, sentence_length))
def _register_type(self, string):
if string not in self.rand_type_vocab:
#self.rand_type_vocab[string] = int(np.random.choice(ints, p=self.dist))
ints = list(range(5))
self.rand_type_vocab[string] = int(np.random.choice(ints))
return self.rand_type_vocab[string]
def prepare(self, train_obs, dev_obs, test_obs):
"""Chooses the word types to be part-of-speech-corrupted in all datasets.
"""
np.random.seed(self.args['seed'])
type_counter = Counter()
intmapping = {}
for observation in itertools.chain(train_obs, dev_obs, test_obs):
type_counter.update(observation.sentence)
for string in observation.sentence:
if string not in intmapping:
intmapping[string] = len(intmapping)
counts = [type_counter[string] for string in sorted(intmapping, key=lambda x: intmapping[x])]
strings = [string for string in sorted(intmapping, key=lambda x: intmapping[x])]
count_sum = sum(counts)
probs = [x/count_sum for x in counts]
corrupted_token_percent = 0
while corrupted_token_percent < self.target_corrupted_token_percent - .00000001:
remaining_strings = list(filter(lambda x: x, strings))
string = np.random.choice(remaining_strings)
prob = probs[intmapping[string]]
strings[intmapping[string]] = None
if string not in self.rand_type_vocab:
self._register_type(string)
corrupted_token_percent += prob
#print('CORRUPTED', self.rand_type_vocab)
print('CORRUPTED', corrupted_token_percent)
def labels(self, observation):
labels = super(CorruptedParseDepthTask, self).labels(observation)
for index, string in enumerate(observation.sentence):
if string in self.rand_type_vocab:
labels[index] = self.rand_type_vocab[string]
return labels
class RandomLinearParseDepthTask(Task):
"""Maps observations to a random sample from depths in the parse tree
plus their linear position in the sequence."""
def __init__(self, args):
self.vocab = {}
self.args = args
self.dist = PTB_DEV_EMPIRICAL_DEPTH_DISTRIBUTION
args['ignore_punct'] = True
def get_label(self):
ints = list(range(15))
return int(np.random.choice(ints, p=self.dist))
def _register_observation(self, observation):
for string in observation.sentence:
if string not in self.vocab:
self.vocab[string] = self.get_label()
def labels(self, observation):
self._register_observation(observation)
sentence_length = len(observation[0])
labels = torch.zeros(sentence_length)
for i in range(sentence_length):
labels[i] = self.vocab[observation.sentence[i]] + i
return labels
class PartOfSpeechLabelTask(Task):
"""
Computes the POS of the word in the sentence.
Requires the pos_sentence field.
"""
def __init__(self, args):
self.vocab = {}
self.args = args
def _register_observation(self, observation):
for string in observation.xpos_sentence:
if string not in self.vocab:
self.vocab[string] = len(self.vocab)
self.args['probe']['label_space_size'] = len(self.vocab)
self.args['probe']['label_space'] = self.vocab
def labels(self, observation):
self._register_observation(observation)
sentence_length = len(observation[0])
labels = torch.zeros(sentence_length)
for i in range(sentence_length):
labels[i] = self.vocab[observation.xpos_sentence[i]]
return labels
class CorruptedPartOfSpeechLabelTask(PartOfSpeechLabelTask):
def __init__(self, args):
super(CorruptedPartOfSpeechLabelTask, self).__init__(args)
self.rand_type_vocab = {}
self.dist = PTB_TRAIN_EMPIRICAL_POS_DISTRIBUTION
self.target_corrupted_token_percent = args['probe']['misc']['corrupted_token_percent']
np.random.seed(args['seed'])
def prepare(self, train_obs, dev_obs, test_obs):
"""Chooses the word types to be part-of-speech-corrupted in all datasets.
"""
type_counter = Counter()
intmapping = {}
for observation in itertools.chain(train_obs, dev_obs, test_obs):
type_counter.update(observation.sentence)
for string in observation.sentence:
if string not in intmapping:
intmapping[string] = len(intmapping)
counts = [type_counter[string] for string in sorted(intmapping, key=lambda x: intmapping[x])]
strings = [string for string in sorted(intmapping, key=lambda x: intmapping[x])]
count_sum = sum(counts)
probs = [x/count_sum for x in counts]
np.random.shuffle(strings)
index = 0
corrupted_token_percent = 0
while corrupted_token_percent < self.target_corrupted_token_percent - .00000001:
#remaining_strings = list(filter(lambda x: x, strings))
#string = np.random.choice(remaining_strings)
string = strings[index]
index += 1
prob = probs[intmapping[string]]
#strings[intmapping[string]] = None
if string not in self.rand_type_vocab:
self._register_type(string)
corrupted_token_percent += prob
#print('CORRUPTED', self.rand_type_vocab)
print('CORRUPTED', corrupted_token_percent)
def _register_type(self, string):
if string not in self.rand_type_vocab:
ints = list(range(45))
self.rand_type_vocab[string] = int(np.random.choice(ints, p=self.dist))
return self.rand_type_vocab[string]
def labels(self, observation):
labels = super(CorruptedPartOfSpeechLabelTask, self).labels(observation)
for index, string in enumerate(observation.sentence):
if string in self.rand_type_vocab:
labels[index] = self.rand_type_vocab[string]
#if random.random() < 0.2:
# labels[index] = self._register_type(string)
self.args['probe']['label_space_size'] = 45
return labels
class RandomPrefixLabelTask(Task):
"""
Computes the POS of the word in the sentence.
Requires the pos_sentence field.
"""
def __init__(self, args):
self.vocab = {}
self.args = args
self.condition_length = args['probe']['misc']['rand_label_condition_length']
self.dist = PTB_TRAIN_EMPIRICAL_POS_DISTRIBUTION
def get_label(self):
ints = list(range(45))
return int(np.random.choice(ints, p=self.dist))
def _register_observation(self, observation):
prefix = ()
for string in observation.sentence:
prefix = prefix + (string,)
if prefix[-1 - self.condition_length:] not in self.vocab:
self.vocab[prefix[-1- self.condition_length:]] = self.get_label()
self.args['probe']['label_space_size'] = 45
def labels(self, observation):
self._register_observation(observation)
sentence_length = len(observation[0])
labels = torch.zeros(sentence_length)
prefix = ()
for i in range(sentence_length):
prefix = prefix + (observation.sentence[i],)
labels[i] = self.vocab[prefix[-1- self.condition_length:]]
return labels
class RandomWordLabelTask(Task):
"""
Computes the POS of the word in the sentence.
Requires the pos_sentence field.
"""
def __init__(self, args):
self.vocab = {}
self.args = args
self.dist = PTB_TRAIN_EMPIRICAL_POS_DISTRIBUTION
def get_label(self):
ints = list(range(45))
return int(np.random.choice(ints, p=self.dist))
def _register_observation(self, observation):
for string in observation.sentence:
if string not in self.vocab:
self.vocab[string] = self.get_label()
self.args['probe']['label_space_size'] = 45
#self.args['probe']['label_space'] = self.vocab
def labels(self, observation):
self._register_observation(observation)
sentence_length = len(observation[0])
labels = torch.zeros(sentence_length)
for i in range(sentence_length):
labels[i] = self.vocab[observation.sentence[i]]
return labels
| [
"torch.zeros"
] | 1.0.0 | lunayach/control-tasks | f333b03d7dc31f89ae56c82e22a9ea588547590f |
1.6 | import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from ..utils import common_functions as c_f
# modified from https://github.com/allenai/allennlp
def is_distributed():
return torch.distributed.is_available() and torch.distributed.is_initialized()
# modified from https://github.com/JohnGiorgi/DeCLUTR
def all_gather(embeddings, labels):
labels = labels.to(embeddings.device)
# If we are not using distributed training, this is a no-op.
if not is_distributed():
return embeddings, labels
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
# Gather the embeddings on all replicas
embeddings_list = [torch.ones_like(embeddings) for _ in range(world_size)]
labels_list = [torch.ones_like(labels) for _ in range(world_size)]
torch.distributed.all_gather(embeddings_list, embeddings.contiguous())
torch.distributed.all_gather(labels_list, labels.contiguous())
# The gathered copy of the current replicas embeddings have no gradients, so we overwrite
# them with the embeddings generated on this replica, which DO have gradients.
embeddings_list[rank] = embeddings
labels_list[rank] = labels
# Finally, we concatenate the embeddings
embeddings = torch.cat(embeddings_list)
labels = torch.cat(labels_list)
return embeddings, labels
def all_gather_embeddings_labels(embeddings, labels):
if c_f.is_list_or_tuple(embeddings):
assert c_f.is_list_or_tuple(labels)
all_embeddings, all_labels = [], []
for i in range(len(embeddings)):
E, L = all_gather(embeddings[i], labels[i])
all_embeddings.append(E)
all_labels.append(L)
embeddings = torch.cat(all_embeddings, dim=0)
labels = torch.cat(all_labels, dim=0)
else:
embeddings, labels = all_gather(embeddings, labels)
return embeddings, labels
class DistributedLossWrapper(torch.nn.Module):
def __init__(self, loss, **kwargs):
super().__init__()
has_parameters = len([p for p in loss.parameters()]) > 0
self.loss = DDP(loss, **kwargs) if has_parameters else loss
def forward(self, embeddings, labels, *args, **kwargs):
embeddings, labels = all_gather_embeddings_labels(embeddings, labels)
return self.loss(embeddings, labels, *args, **kwargs)
class DistributedMinerWrapper(torch.nn.Module):
def __init__(self, miner):
super().__init__()
self.miner = miner
def forward(self, embeddings, labels, ref_emb=None, ref_labels=None):
embeddings, labels = all_gather_embeddings_labels(embeddings, labels)
if ref_emb is not None:
ref_emb, ref_labels = all_gather_embeddings_labels(ref_emb, ref_labels)
return self.miner(embeddings, labels, ref_emb, ref_labels)
| [
"torch.distributed.get_world_size",
"torch.cat",
"torch.distributed.is_available",
"torch.nn.parallel.DistributedDataParallel",
"torch.distributed.is_initialized",
"torch.ones_like",
"torch.distributed.get_rank"
] | 1.6.0 | mlopezantequera/pytorch-metric-learning | 1fb343124d15fd2f63d535df26aa1463daf4ceee |
1.6 | import torch
from ..distances import CosineSimilarity
from ..reducers import DivisorReducer
from ..utils import common_functions as c_f
from ..utils import loss_and_miner_utils as lmu
from .base_metric_loss_function import BaseMetricLossFunction
from .mixins import WeightRegularizerMixin
# adapted from
# https://github.com/tjddus9597/Proxy-Anchor-CVPR2020/blob/master/code/losses.py
# https://github.com/geonm/proxy-anchor-loss/blob/master/pytorch-proxy-anchor.py
# suggested in this issue: https://github.com/KevinMusgrave/pytorch-metric-learning/issues/32
class ProxyAnchorLoss(WeightRegularizerMixin, BaseMetricLossFunction):
def __init__(self, num_classes, embedding_size, margin=0.1, alpha=32, **kwargs):
super().__init__(**kwargs)
self.proxies = torch.nn.Parameter(torch.Tensor(num_classes, embedding_size))
self.weight_init_func(self.proxies)
self.num_classes = num_classes
self.margin = margin
self.alpha = alpha
self.add_to_recordable_attributes(
list_of_names=["num_classes", "alpha", "margin"], is_stat=False
)
def cast_types(self, dtype, device):
self.proxies.data = self.proxies.data.to(device).type(dtype)
def compute_loss(self, embeddings, labels, indices_tuple):
dtype, device = embeddings.dtype, embeddings.device
self.cast_types(dtype, device)
miner_weights = lmu.convert_to_weights(
indices_tuple, labels, dtype=dtype
).unsqueeze(1)
miner_weights = miner_weights - 1
cos = self.distance(embeddings, self.proxies)
pos_mask = torch.nn.functional.one_hot(labels, self.num_classes)
neg_mask = 1 - pos_mask
with_pos_proxies = torch.where(torch.sum(pos_mask, dim=0) != 0)[0]
pos_exp = self.distance.margin(cos, self.margin)
neg_exp = self.distance.margin(-self.margin, cos)
pos_term = lmu.logsumexp(
(self.alpha * pos_exp) + miner_weights,
keep_mask=pos_mask.bool(),
add_one=True,
dim=0,
)
neg_term = lmu.logsumexp(
(self.alpha * neg_exp) + miner_weights,
keep_mask=neg_mask.bool(),
add_one=True,
dim=0,
)
loss_indices = c_f.torch_arange_from_size(self.proxies)
loss_dict = {
"pos_loss": {
"losses": pos_term.squeeze(0),
"indices": loss_indices,
"reduction_type": "element",
"divisor_summands": {"num_pos_proxies": len(with_pos_proxies)},
},
"neg_loss": {
"losses": neg_term.squeeze(0),
"indices": loss_indices,
"reduction_type": "element",
"divisor_summands": {"num_classes": self.num_classes},
},
}
self.add_weight_regularization_to_loss_dict(loss_dict, self.proxies)
return loss_dict
def get_default_reducer(self):
return DivisorReducer()
def get_default_distance(self):
return CosineSimilarity()
def get_default_weight_init_func(self):
return c_f.TorchInitWrapper(torch.nn.init.kaiming_normal_, mode="fan_out")
def _sub_loss_names(self):
return ["pos_loss", "neg_loss"]
| [
"torch.nn.functional.one_hot",
"torch.Tensor",
"torch.sum"
] | 1.6.0 | mlopezantequera/pytorch-metric-learning | 1fb343124d15fd2f63d535df26aa1463daf4ceee |
1.7 | import numpy as np
import torch
import torch.optim as optim
from torch import nn as nn
from collections import OrderedDict
import lifelong_rl.torch.pytorch_util as ptu
from lifelong_rl.torch.distributions import TanhNormal
from lifelong_rl.util.eval_util import create_stats_ordered_dict
from lifelong_rl.core.rl_algorithms.torch_rl_algorithm import TorchTrainer
from lifelong_rl.torch.pytorch_util import np_to_pytorch_batch
# from torch_batch_svd import svd
ACTION_MIN = -1.0
ACTION_MAX = 1.0
class SACTrainer(TorchTrainer):
"""
Soft Actor Critic (Haarnoja et al. 2018). (Offline training ver.)
Continuous maximum Q-learning algorithm with parameterized actor.
"""
def __init__(
self,
env, # Associated environment for learning
policy, # Associated policy (should be TanhGaussian)
qfs, # Q functions
target_qfs, # Slow updater to Q functions
discount=0.99, # Discount factor
reward_scale=1.0, # Scaling of rewards to modulate entropy bonus
use_automatic_entropy_tuning=True, # Whether to use the entropy-constrained variant
target_entropy=None, # Target entropy for entropy-constraint variant
policy_lr=3e-4, # Learning rate of policy and entropy weight
qf_lr=3e-4, # Learning rate of Q functions
optimizer_class=optim.Adam, # Class of optimizer for all networks
soft_target_tau=5e-3, # Rate of update of target networks
target_update_period=1, # How often to update target networks
max_q_backup=False,
deterministic_backup=False,
policy_eval_start=0,
eta=-1.0,
num_qs=10,
replay_buffer=None,
):
super().__init__()
self.env = env
self.policy = policy
self.qfs = qfs
self.target_qfs = target_qfs
self.num_qs = num_qs
self.discount = discount
self.reward_scale = reward_scale
self.soft_target_tau = soft_target_tau
self.target_update_period = target_update_period
self.max_q_backup = max_q_backup
self.deterministic_backup = deterministic_backup
self.eta = eta
self.replay_buffer = replay_buffer
self.use_automatic_entropy_tuning = use_automatic_entropy_tuning
if self.use_automatic_entropy_tuning:
if target_entropy:
self.target_entropy = target_entropy
else:
# Heuristic value: dimension of action space
self.target_entropy = -np.prod(
self.env.action_space.shape).item()
self.log_alpha = ptu.zeros(1, requires_grad=True)
self.alpha_optimizer = optimizer_class(
[self.log_alpha],
lr=policy_lr,
)
self.qf_criterion = nn.MSELoss(reduction='none')
self.policy_optimizer = optimizer_class(
self.policy.parameters(),
lr=policy_lr,
)
self.qfs_optimizer = optimizer_class(
self.qfs.parameters(),
lr=qf_lr,
)
self.eval_statistics = OrderedDict()
self._need_to_update_eval_statistics = True
self.policy_eval_start = policy_eval_start
def _get_tensor_values(self, obs, actions, network=None):
action_shape = actions.shape[0]
obs_shape = obs.shape[0]
num_repeat = int(action_shape / obs_shape)
obs_temp = obs.unsqueeze(1).repeat(1, num_repeat,
1).view(obs.shape[0] * num_repeat,
obs.shape[1])
preds = network(obs_temp, actions)
preds = preds.view(-1, obs.shape[0], num_repeat, 1)
return preds
def _get_policy_actions(self, obs, num_actions, network=None):
obs_temp = obs.unsqueeze(1).repeat(1, num_actions,
1).view(obs.shape[0] * num_actions,
obs.shape[1])
new_obs_actions, _, _, new_obs_log_pi, *_ = network(
obs_temp,
reparameterize=True,
return_log_prob=True,
)
return new_obs_actions.detach(), new_obs_log_pi.view(
obs.shape[0], num_actions, 1).detach()
def train_from_torch(self, batch, indices):
obs= batch['observations']
next_obs = batch['next_observations']
actions = batch['actions']
rewards = batch['rewards']
terminals = batch['terminals']
if self.eta > 0:
actions.requires_grad_(True)
"""
Policy and Alpha Loss
"""
new_obs_actions, policy_mean, policy_log_std, log_pi, *_ = self.policy(
obs,
reparameterize=True,
return_log_prob=True,
)
if self.use_automatic_entropy_tuning:
alpha_loss = -(self.log_alpha *
(log_pi + self.target_entropy).detach()).mean()
alpha = self.log_alpha.exp()
else:
alpha_loss = 0
alpha = 1
q_new_actions = self.qfs.sample(obs, new_obs_actions)
policy_loss = (alpha * log_pi - q_new_actions).mean()
if self._num_train_steps < self.policy_eval_start:
"""
For the initial few epochs, try doing behaivoral cloning, if needed
conventionally, there's not much difference in performance with having 20k
gradient steps here, or not having it
"""
policy_log_prob = self.policy.get_log_probs(obs.detach(), actions)
policy_loss = (alpha * log_pi - policy_log_prob).mean()
"""
QF Loss
"""
# (num_qs, batch_size, output_size)
qs_pred = self.qfs(obs, actions)
new_next_actions, _, _, new_log_pi, *_ = self.policy(
next_obs,
reparameterize=False,
return_log_prob=True,
)
if not self.max_q_backup:
target_q_values = self.target_qfs.sample(next_obs, new_next_actions)
if not self.deterministic_backup:
target_q_values -= alpha * new_log_pi
else:
# if self.max_q_backup
next_actions_temp, _ = self._get_policy_actions(
next_obs, num_actions=10, network=self.policy)
target_q_values = self._get_tensor_values(
next_obs, next_actions_temp,
network=self.qfs).max(2)[0].min(0)[0]
future_values = (1. - terminals) * self.discount * target_q_values
q_target = self.reward_scale * rewards + future_values
qfs_loss = self.qf_criterion(qs_pred, q_target.detach().unsqueeze(0))
qfs_loss = qfs_loss.mean(dim=(1, 2)).sum()
qfs_loss_total = qfs_loss
if self.eta > 0:
obs_tile = obs.unsqueeze(0).repeat(self.num_qs, 1, 1)
actions_tile = actions.unsqueeze(0).repeat(self.num_qs, 1, 1).requires_grad_(True)
qs_preds_tile = self.qfs(obs_tile, actions_tile)
qs_pred_grads, = torch.autograd.grad(qs_preds_tile.sum(), actions_tile, retain_graph=True, create_graph=True)
qs_pred_grads = qs_pred_grads / (torch.norm(qs_pred_grads, p=2, dim=2).unsqueeze(-1) + 1e-10)
qs_pred_grads = qs_pred_grads.transpose(0, 1)
qs_pred_grads = torch.einsum('bik,bjk->bij', qs_pred_grads, qs_pred_grads)
masks = torch.eye(self.num_qs, device=ptu.device).unsqueeze(dim=0).repeat(qs_pred_grads.size(0), 1, 1)
qs_pred_grads = (1 - masks) * qs_pred_grads
grad_loss = torch.mean(torch.sum(qs_pred_grads, dim=(1, 2))) / (self.num_qs - 1)
qfs_loss_total += self.eta * grad_loss
if self.use_automatic_entropy_tuning and not self.deterministic_backup:
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
self.qfs_optimizer.zero_grad()
qfs_loss_total.backward()
self.qfs_optimizer.step()
self.try_update_target_networks()
"""
Save some statistics for eval
"""
if self._need_to_update_eval_statistics:
self._need_to_update_eval_statistics = False
policy_loss = ptu.get_numpy(log_pi - q_new_actions).mean()
policy_avg_std = ptu.get_numpy(torch.exp(policy_log_std)).mean()
self.eval_statistics['QFs Loss'] = np.mean(
ptu.get_numpy(qfs_loss)) / self.num_qs
if self.eta > 0:
self.eval_statistics['Q Grad Loss'] = np.mean(
ptu.get_numpy(grad_loss))
self.eval_statistics['Policy Loss'] = np.mean(policy_loss)
self.eval_statistics.update(
create_stats_ordered_dict(
'Qs Predictions',
ptu.get_numpy(qs_pred),
))
self.eval_statistics.update(
create_stats_ordered_dict(
'Qs Targets',
ptu.get_numpy(q_target),
))
self.eval_statistics.update(
create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(
create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics.update(
create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
self.eval_statistics['Policy std'] = np.mean(policy_avg_std)
if self.use_automatic_entropy_tuning:
self.eval_statistics['Alpha'] = alpha.item()
self.eval_statistics['Alpha Loss'] = alpha_loss.item()
def try_update_target_networks(self):
if self._num_train_steps % self.target_update_period == 0:
self.update_target_networks()
def update_target_networks(self):
ptu.soft_update_from_to(self.qfs, self.target_qfs,
self.soft_target_tau)
def get_diagnostics(self):
return self.eval_statistics
def end_epoch(self, epoch):
self._need_to_update_eval_statistics = True
@property
def networks(self):
base_list = [
self.policy,
self.qfs,
self.target_qfs,
]
return base_list
def get_snapshot(self):
return dict(
policy=self.policy,
qfs=self.qfs,
target_qfs=self.qfs,
log_alpha=self.log_alpha,
policy_optim=self.policy_optimizer,
qfs_optim=self.qfs_optimizer,
alpha_optim=self.alpha_optimizer,
)
| [
"torch.nn.MSELoss",
"torch.einsum",
"torch.norm",
"torch.eye",
"torch.exp",
"torch.sum"
] | 1.7.1 | snu-mllab/EDAC | c21d8aa354d13dbe884fd2fb809fe9a85c65e6c9 |
1.0 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Hubert model. """
import math
import unittest
import pytest
from transformers import SEWDConfig, is_torch_available
from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
if is_torch_available():
import torch
from transformers import (
SEWDForCTC,
SEWDForSequenceClassification,
SEWDModel,
Wav2Vec2FeatureExtractor,
Wav2Vec2Processor,
)
from transformers.models.hubert.modeling_hubert import _compute_mask_indices
class SEWDModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=32,
feat_extract_norm="group",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(64, 32, 32),
conv_stride=(5, 2, 1),
conv_kernel=(10, 3, 1),
conv_bias=False,
num_conv_pos_embeddings=31,
num_conv_pos_embedding_groups=2,
squeeze_factor=2,
max_position_embeddings=512,
position_buckets=256,
share_att_key=True,
relative_attention=True,
position_biased_input=False,
pos_att_type=("p2c", "c2p"),
norm_rel_ebd="layer_norm",
num_hidden_layers=4,
num_attention_heads=2,
hidden_dropout=0.1,
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.squeeze_factor = squeeze_factor
self.max_position_embeddings = max_position_embeddings
self.position_buckets = position_buckets
self.share_att_key = share_att_key
self.relative_attention = relative_attention
self.position_biased_input = position_biased_input
self.pos_att_type = pos_att_type
self.norm_rel_ebd = norm_rel_ebd
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length // self.squeeze_factor
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_values, attention_mask
def get_config(self):
return SEWDConfig(
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
squeeze_factor=self.squeeze_factor,
max_position_embeddings=self.max_position_embeddings,
position_buckets=self.position_buckets,
share_att_key=self.share_att_key,
relative_attention=self.relative_attention,
position_biased_input=self.position_biased_input,
pos_att_type=self.pos_att_type,
norm_rel_ebd=self.norm_rel_ebd,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout=self.hidden_dropout,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
)
def create_and_check_model(self, config, input_values, attention_mask):
model = SEWDModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_values, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def create_and_check_batch_inference(self, config, input_values, *args):
# test does not pass for models making use of `group_norm`
# check: https://github.com/pytorch/fairseq/issues/3227
model = SEWDModel(config=config)
model.to(torch_device)
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0.0
batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state
for i in range(input_values.shape[0]):
input_slice = input_values[i : i + 1, : input_lengths[i]]
output = model(input_slice).last_hidden_state
batch_output = batch_outputs[i : i + 1, : output.shape[1]]
self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3))
def check_ctc_loss(self, config, input_values, *args):
model = SEWDForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
self.parent.assertTrue(isinstance(sum_loss, float))
self.parent.assertTrue(isinstance(mean_loss, float))
def check_ctc_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = SEWDForCTC(config=config)
model.to(torch_device)
model.train()
# freeze feature encoder
model.freeze_feature_encoder()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
if max_length_labels[i] < labels.shape[-1]:
# it's important that we make sure that target lenghts are at least
# one shorter than logit lenghts to prevent -inf
labels[i, max_length_labels[i] - 1 :] = -100
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_seq_classifier_loss(self, config, input_values, *args):
model = SEWDForSequenceClassification(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
unmasked_loss = model(input_values, labels=labels).loss.item()
self.parent.assertTrue(isinstance(masked_loss, float))
self.parent.assertTrue(isinstance(unmasked_loss, float))
self.parent.assertTrue(masked_loss != unmasked_loss)
def check_seq_classifier_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = SEWDForSequenceClassification(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_labels_out_of_vocab(self, config, input_values, *args):
model = SEWDForCTC(config)
model.to(torch_device)
model.train()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100)
with pytest.raises(ValueError):
model(input_values, labels=labels)
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class SEWDModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (SEWDForCTC, SEWDModel, SEWDForSequenceClassification) if is_torch_available() else ()
test_pruning = False
test_headmasking = False
test_torchscript = False
def setUp(self):
self.model_tester = SEWDModelTester(self)
self.config_tester = ConfigTester(self, config_class=SEWDConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# Hubert has no inputs_embeds
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
def test_forward_signature(self):
pass
# SEW cannot resize token embeddings
# since it has no tokens embeddings
def test_resize_tokens_embeddings(self):
pass
# SEW has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_common_attributes(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
uniform_init_parms = [
"conv.weight",
"masked_spec_embed",
"quantizer.weight_proj.weight",
]
if param.requires_grad:
if any([x in name for x in uniform_init_parms]):
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.data.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = SEWDModel.from_pretrained("asapp/sew-d-tiny-100k")
self.assertIsNotNone(model)
@require_torch
class SEWDUtilsTest(unittest.TestCase):
def test_compute_mask_indices(self):
batch_size = 4
sequence_length = 60
mask_prob = 0.5
mask_length = 1
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
mask = torch.from_numpy(mask).to(torch_device)
self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)])
def test_compute_mask_indices_overlap(self):
batch_size = 4
sequence_length = 80
mask_prob = 0.5
mask_length = 4
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
mask = torch.from_numpy(mask).to(torch_device)
# because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal
for batch_sum in mask.sum(axis=-1):
self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)
@require_torch
@require_soundfile
@slow
class SEWDModelIntegrationTest(unittest.TestCase):
def _load_datasamples(self, num_samples):
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").filter(
lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]
)[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_inference_pretrained_batched(self):
model = SEWDModel.from_pretrained("asapp/sew-d-tiny-100k").to(torch_device)
processor = Wav2Vec2FeatureExtractor.from_pretrained("asapp/sew-d-tiny-100k")
input_speech = self._load_datasamples(2)
inputs = processor(input_speech, return_tensors="pt", padding=True)
input_values = inputs.input_values.to(torch_device)
with torch.no_grad():
outputs = model(input_values).last_hidden_state
# expected outputs taken from the original SEW-D implementation
expected_outputs_first = torch.tensor(
[
[
[-0.1619, 0.6995, 0.4062, -0.1014],
[-0.1364, 0.5960, 0.0952, -0.0873],
[-0.1572, 0.5718, 0.4228, -0.0864],
[-0.1325, 0.6823, 0.1387, -0.0871],
],
[
[-0.1296, 0.4008, 0.4952, -0.1450],
[-0.1152, 0.3693, 0.3037, -0.1290],
[-0.1194, 0.6074, 0.3531, -0.1466],
[-0.1113, 0.3135, 0.2224, -0.1338],
],
],
device=torch_device,
)
expected_outputs_last = torch.tensor(
[
[
[-0.1577, 0.5108, 0.8553, 0.2550],
[-0.1530, 0.3580, 0.6143, 0.2672],
[-0.1535, 0.4954, 0.8503, 0.1387],
[-0.1572, 0.3363, 0.6217, 0.1490],
],
[
[-0.1338, 0.5459, 0.9607, -0.1133],
[-0.1502, 0.3738, 0.7313, -0.0986],
[-0.0953, 0.4708, 1.0821, -0.0944],
[-0.1474, 0.3598, 0.7248, -0.0748],
],
],
device=torch_device,
)
expected_output_sum = 54201.0469
self.assertTrue(torch.allclose(outputs[:, :4, :4], expected_outputs_first, atol=1e-3))
self.assertTrue(torch.allclose(outputs[:, -4:, -4:], expected_outputs_last, atol=1e-3))
self.assertTrue(abs(outputs.sum() - expected_output_sum) < 1)
def test_inference_ctc_batched(self):
model = SEWDForCTC.from_pretrained("asapp/sew-d-tiny-100k-ft-ls100h").to(torch_device)
processor = Wav2Vec2Processor.from_pretrained("asapp/sew-d-tiny-100k-ft-ls100h", do_lower_case=True)
input_speech = self._load_datasamples(2)
inputs = processor(input_speech, return_tensors="pt", padding=True)
input_values = inputs.input_values.to(torch_device)
with torch.no_grad():
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
predicted_trans = processor.batch_decode(predicted_ids)
EXPECTED_TRANSCRIPTIONS = [
"a man said to the universe sir i exist",
"swet covered breon's body trickling into the titlowing closs that was the only garmened he war",
]
self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
| [
"torch.no_grad",
"torch.ones",
"torch.from_numpy",
"torch.tensor",
"torch.isinf",
"torch.ones_like",
"torch.allclose",
"torch.argmax"
] | 1.0 | dctelus/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b |
1.0 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Union
import numpy as np
import PIL.Image
import PIL.ImageOps
import requests
from .utils import is_torch_available
from .utils.generic import _is_torch
IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406]
IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225]
IMAGENET_STANDARD_MEAN = [0.5, 0.5, 0.5]
IMAGENET_STANDARD_STD = [0.5, 0.5, 0.5]
ImageInput = Union[
PIL.Image.Image, np.ndarray, "torch.Tensor", List[PIL.Image.Image], List[np.ndarray], List["torch.Tensor"] # noqa
]
def is_torch_tensor(obj):
return _is_torch(obj) if is_torch_available() else False
def load_image(image: Union[str, "PIL.Image.Image"]) -> "PIL.Image.Image":
"""
Loads `image` to a PIL Image.
Args:
image (`str` or `PIL.Image.Image`):
The image to convert to the PIL Image format.
Returns:
`PIL.Image.Image`: A PIL Image.
"""
if isinstance(image, str):
if image.startswith("http://") or image.startswith("https://"):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
image = PIL.Image.open(requests.get(image, stream=True).raw)
elif os.path.isfile(image):
image = PIL.Image.open(image)
else:
raise ValueError(
f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path"
)
elif isinstance(image, PIL.Image.Image):
image = image
else:
raise ValueError(
"Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image."
)
image = PIL.ImageOps.exif_transpose(image)
image = image.convert("RGB")
return image
# In the future we can add a TF implementation here when we have TF models.
class ImageFeatureExtractionMixin:
"""
Mixin that contain utilities for preparing image features.
"""
def _ensure_format_supported(self, image):
if not isinstance(image, (PIL.Image.Image, np.ndarray)) and not is_torch_tensor(image):
raise ValueError(
f"Got type {type(image)} which is not supported, only `PIL.Image.Image`, `np.array` and "
"`torch.Tensor` are."
)
def to_pil_image(self, image, rescale=None):
"""
Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
needed.
Args:
image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
The image to convert to the PIL Image format.
rescale (`bool`, *optional*):
Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
default to `True` if the image type is a floating type, `False` otherwise.
"""
self._ensure_format_supported(image)
if is_torch_tensor(image):
image = image.numpy()
if isinstance(image, np.ndarray):
if rescale is None:
# rescale default to the array being of floating type.
rescale = isinstance(image.flat[0], np.floating)
# If the channel as been moved to first dim, we put it back at the end.
if image.ndim == 3 and image.shape[0] in [1, 3]:
image = image.transpose(1, 2, 0)
if rescale:
image = image * 255
image = image.astype(np.uint8)
return PIL.Image.fromarray(image)
return image
def to_numpy_array(self, image, rescale=None, channel_first=True):
"""
Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first
dimension.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to convert to a NumPy array.
rescale (`bool`, *optional*):
Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will
default to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise.
channel_first (`bool`, *optional*, defaults to `True`):
Whether or not to permute the dimensions of the image to put the channel dimension first.
"""
self._ensure_format_supported(image)
if isinstance(image, PIL.Image.Image):
image = np.array(image)
if is_torch_tensor(image):
image = image.numpy()
if rescale is None:
rescale = isinstance(image.flat[0], np.integer)
if rescale:
image = image.astype(np.float32) / 255.0
if channel_first and image.ndim == 3:
image = image.transpose(2, 0, 1)
return image
def normalize(self, image, mean, std):
"""
Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array
if it's a PIL Image.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to normalize.
mean (`List[float]` or `np.ndarray` or `torch.Tensor`):
The mean (per channel) to use for normalization.
std (`List[float]` or `np.ndarray` or `torch.Tensor`):
The standard deviation (per channel) to use for normalization.
"""
self._ensure_format_supported(image)
if isinstance(image, PIL.Image.Image):
image = self.to_numpy_array(image)
if isinstance(image, np.ndarray):
if not isinstance(mean, np.ndarray):
mean = np.array(mean).astype(image.dtype)
if not isinstance(std, np.ndarray):
std = np.array(std).astype(image.dtype)
elif is_torch_tensor(image):
import torch
if not isinstance(mean, torch.Tensor):
mean = torch.tensor(mean)
if not isinstance(std, torch.Tensor):
std = torch.tensor(std)
if image.ndim == 3 and image.shape[0] in [1, 3]:
return (image - mean[:, None, None]) / std[:, None, None]
else:
return (image - mean) / std
def resize(self, image, size, resample=PIL.Image.BILINEAR, default_to_square=True, max_size=None):
"""
Resizes `image`. Note that this will trigger a conversion of `image` to a PIL Image.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to resize.
size (`int` or `Tuple[int, int]`):
The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be
matched to this.
If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
`size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to
this number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
resample (`int`, *optional*, defaults to `PIL.Image.BILINEAR`):
The filter to user for resampling.
default_to_square (`bool`, *optional*, defaults to `True`):
How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a
square (`size`,`size`). If set to `False`, will replicate
[`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
with support for resizing only the smallest edge and providing an optional `max_size`.
max_size (`int`, *optional*, defaults to `None`):
The maximum allowed for the longer edge of the resized image: if the longer edge of the image is
greater than `max_size` after being resized according to `size`, then the image is resized again so
that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller
edge may be shorter than `size`. Only used if `default_to_square` is `False`.
"""
self._ensure_format_supported(image)
if not isinstance(image, PIL.Image.Image):
image = self.to_pil_image(image)
if isinstance(size, list):
size = tuple(size)
if isinstance(size, int) or len(size) == 1:
if default_to_square:
size = (size, size) if isinstance(size, int) else (size[0], size[0])
else:
width, height = image.size
# specified size only for the smallest edge
short, long = (width, height) if width <= height else (height, width)
requested_new_short = size if isinstance(size, int) else size[0]
if short == requested_new_short:
return image
new_short, new_long = requested_new_short, int(requested_new_short * long / short)
if max_size is not None:
if max_size <= requested_new_short:
raise ValueError(
f"max_size = {max_size} must be strictly greater than the requested "
f"size for the smaller edge size = {size}"
)
if new_long > max_size:
new_short, new_long = int(max_size * new_short / new_long), max_size
size = (new_short, new_long) if width <= height else (new_long, new_short)
return image.resize(size, resample=resample)
def center_crop(self, image, size):
"""
Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the
size given, it will be padded (so the returned result has the size asked).
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to resize.
size (`int` or `Tuple[int, int]`):
The size to which crop the image.
"""
self._ensure_format_supported(image)
if not isinstance(size, tuple):
size = (size, size)
# PIL Image.size is (width, height) but NumPy array and torch Tensors have (height, width)
image_shape = (image.size[1], image.size[0]) if isinstance(image, PIL.Image.Image) else image.shape[-2:]
top = (image_shape[0] - size[0]) // 2
bottom = top + size[0] # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result.
left = (image_shape[1] - size[1]) // 2
right = left + size[1] # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result.
# For PIL Images we have a method to crop directly.
if isinstance(image, PIL.Image.Image):
return image.crop((left, top, right, bottom))
# Check if all the dimensions are inside the image.
if top >= 0 and bottom <= image_shape[0] and left >= 0 and right <= image_shape[1]:
return image[..., top:bottom, left:right]
# Otherwise, we may need to pad if the image is too small. Oh joy...
new_shape = image.shape[:-2] + (max(size[0], image_shape[0]), max(size[1], image_shape[1]))
if isinstance(image, np.ndarray):
new_image = np.zeros_like(image, shape=new_shape)
elif is_torch_tensor(image):
new_image = image.new_zeros(new_shape)
top_pad = (new_shape[-2] - image_shape[0]) // 2
bottom_pad = top_pad + image_shape[0]
left_pad = (new_shape[-1] - image_shape[1]) // 2
right_pad = left_pad + image_shape[1]
new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image
top += top_pad
bottom += top_pad
left += left_pad
right += left_pad
return new_image[
..., max(0, top) : min(new_image.shape[-2], bottom), max(0, left) : min(new_image.shape[-1], right)
]
| [
"torch.tensor"
] | 1.0 | dctelus/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b |
1.0 | # coding=utf-8
# Copyright 2020 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ..test_modeling_common import floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
BartForConditionalGeneration,
BartTokenizer,
GPT2LMHeadModel,
GPT2Tokenizer,
ImageGPTForCausalImageModeling,
Speech2TextForConditionalGeneration,
SpeechEncoderDecoderModel,
VisionEncoderDecoderModel,
top_k_top_p_filtering,
)
from transformers.generation_beam_constraints import DisjunctiveConstraint, PhrasalConstraint
from transformers.generation_beam_search import BeamSearchScorer, ConstrainedBeamSearchScorer
from transformers.generation_logits_process import (
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitsProcessorList,
MinLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from transformers.generation_stopping_criteria import MaxLengthCriteria, StoppingCriteria, StoppingCriteriaList
from transformers.generation_utils import (
BeamSampleDecoderOnlyOutput,
BeamSampleEncoderDecoderOutput,
BeamSearchDecoderOnlyOutput,
BeamSearchEncoderDecoderOutput,
GreedySearchDecoderOnlyOutput,
GreedySearchEncoderDecoderOutput,
SampleDecoderOnlyOutput,
SampleEncoderDecoderOutput,
)
class GenerationTesterMixin:
model_tester = None
all_generative_model_classes = ()
input_name = "input_ids"
def _get_input_ids_and_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict[self.input_name]
attention_mask = torch.ones_like(input_ids, dtype=torch.long)
# cut to half length & take max batch_size 3
max_batch_size = 2
sequence_length = input_ids.shape[-1] // 2
input_ids = input_ids[:max_batch_size, :sequence_length]
attention_mask = attention_mask[:max_batch_size, :sequence_length]
# generate max 3 tokens
max_length = input_ids.shape[-1] + 3
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
config.pad_token_id = config.eos_token_id
return config, input_ids, attention_mask, max_length
@staticmethod
def _get_logits_processor_and_kwargs(
input_length,
eos_token_id,
forced_bos_token_id=None,
forced_eos_token_id=None,
max_length=None,
diversity_penalty=None,
):
process_kwargs = {
"min_length": input_length + 1,
"bad_words_ids": [[1, 0]],
"no_repeat_ngram_size": 2,
"repetition_penalty": 1.2,
}
logits_processor = LogitsProcessorList(
(
[
HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2),
]
if diversity_penalty is not None
else []
)
+ (
[
MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id),
]
if eos_token_id is not None
else []
)
+ (
[
ForcedBOSTokenLogitsProcessor(forced_bos_token_id),
]
if forced_bos_token_id is not None
else []
)
+ (
[ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)]
if forced_eos_token_id is not None
else []
)
+ [
NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id),
NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"]),
RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"]),
]
)
return process_kwargs, logits_processor
@staticmethod
def _get_warper_and_kwargs(num_beams):
warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7}
logits_warper = LogitsProcessorList(
[
TemperatureLogitsWarper(warp_kwargs["temperature"]),
TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)),
TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)),
]
)
return warp_kwargs, logits_warper
@staticmethod
def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": 2,
"num_return_sequences": num_return_sequences,
}
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": 2,
"num_return_sequences": num_return_sequences,
"num_beam_groups": 2, # one beam per group
"diversity_penalty": 2.0,
}
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=beam_kwargs["num_beam_groups"],
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_constrained_beam_scorer_and_kwargs(batch_size, max_length, constraints, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": num_return_sequences * 4,
"num_return_sequences": num_return_sequences,
}
beam_scorer = ConstrainedBeamSearchScorer(
batch_size=batch_size,
constraints=constraints,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_encoder_outputs(
model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1
):
encoder = model.get_encoder()
encoder_outputs = encoder(
input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave(
num_interleave, dim=0
)
input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id()
attention_mask = None
return encoder_outputs, input_ids, attention_mask
def _greedy_generate(
self,
model,
input_ids,
attention_mask,
max_length,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
eos_token_id=model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
kwargs = {}
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
num_beams=1,
max_length=max_length,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**logits_process_kwargs,
)
if model.config.is_encoder_decoder:
encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
with torch.no_grad():
output_greedy = model.greedy_search(
input_ids,
max_length=max_length,
attention_mask=attention_mask,
logits_processor=logits_processor,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_greedy, output_generate
def _sample_generate(
self,
model,
input_ids,
attention_mask,
max_length,
num_return_sequences,
logits_processor,
logits_warper,
logits_warper_kwargs,
process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
torch.manual_seed(0)
output_generate = model.generate(
input_ids,
do_sample=True,
num_beams=1,
max_length=max_length,
num_return_sequences=num_return_sequences,
attention_mask=attention_mask,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**logits_warper_kwargs,
**process_kwargs,
)
torch.manual_seed(0)
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=num_return_sequences,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(num_return_sequences, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(num_return_sequences, dim=0)
input_ids_clone = input_ids.repeat_interleave(num_return_sequences, dim=0)
# prevent flaky generation test failures
logits_processor.append(InfNanRemoveLogitsProcessor())
with torch.no_grad():
output_sample = model.sample(
input_ids_clone,
attention_mask=attention_mask_clone,
max_length=max_length,
logits_processor=logits_processor,
logits_warper=logits_warper,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_sample, output_generate
def _beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
beam_scorer,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_process_kwargs,
)
# beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_beam_search = model.beam_search(
input_ids_clone,
beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_beam_search
def _beam_sample_generate(
self,
model,
input_ids,
attention_mask,
max_length,
num_return_sequences,
beam_scorer,
beam_kwargs,
logits_warper,
logits_warper_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
torch.manual_seed(0)
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=True,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_warper_kwargs,
)
# beam_search does not automatically interleave `batch_size` dim for `num_beams * num_return_sequences`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams * num_return_sequences,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
else:
attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0)
# prevent flaky generation test failures
logits_processor = LogitsProcessorList()
logits_processor.append(InfNanRemoveLogitsProcessor())
torch.manual_seed(0)
with torch.no_grad():
output_beam_sample = model.beam_sample(
input_ids.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0),
beam_scorer,
max_length=max_length,
attention_mask=attention_mask,
logits_warper=logits_warper,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_beam_sample
def _group_beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
beam_scorer,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_process_kwargs,
)
# group_beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_group_beam_search = model.group_beam_search(
input_ids_clone,
beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_group_beam_search
def _constrained_beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
constrained_beam_scorer,
constraints,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
constraints=constraints,
**beam_kwargs,
**logits_process_kwargs,
)
# group_beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=constrained_beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(constrained_beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_group_beam_search = model.constrained_beam_search(
input_ids_clone,
constrained_beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_group_beam_search
def test_greedy_generate(self):
# check `generate()` and `greedy_search()` are equal
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# test old generation output for backwards compatibility
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length
)
self.assertListEqual(output_greedy.tolist(), output_generate.tolist())
def test_greedy_generate_dict_outputs(self):
for model_class in self.all_generative_model_classes:
# disable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist())
for output in (output_greedy, output_generate):
self._check_outputs(output, input_ids, model.config)
def test_greedy_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
# enable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
if not hasattr(config, "use_cache"):
# only relevant if model has "use_cache"
return
config.use_cache = True
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist())
for output in (output_greedy, output_generate):
self._check_outputs(output, input_ids, model.config, use_cache=True)
def test_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
# check `generate()` and `sample()` are equal
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=1,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
)
self.assertListEqual(output_sample.tolist(), output_generate.tolist())
# check `generate()` and `sample()` yield equal results for `num_return_sequences`
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=3,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
)
self.assertListEqual(output_sample.tolist(), output_generate.tolist())
def test_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
# disable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=2,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_sample, SampleEncoderDecoderOutput)
self.assertIsInstance(output_generate, SampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_sample, SampleDecoderOnlyOutput)
self.assertIsInstance(output_generate, SampleDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist())
for output in (output_sample, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=2)
def test_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
# check `generate()` and `beam_search()` are equal
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
# check `generate()` and `beam_search()` are equal for `num_return_sequences`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
def test_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_search, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams)
def test_beam_search_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
# enable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
if not hasattr(config, "use_cache"):
# only relevant if model has "use_cache"
return
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
config.use_cache = True
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_beam, output_generate = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist())
for output in (output_beam, output_generate):
self._check_outputs(
output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams
)
def test_beam_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
model = model_class(config).to(torch_device).eval()
# check `generate()` and `beam_search()` are equal
# change `num_return_sequences = 2` but not for `beam_scorer`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0] * num_return_sequences, max_length
)
beam_kwargs["num_return_sequences"] = num_return_sequences
output_generate, output_beam_sample = self._beam_sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=num_return_sequences,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist())
def test_beam_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0] * num_return_sequences, max_length
)
beam_kwargs["num_return_sequences"] = num_return_sequences
output_beam_sample, output_generate = self._beam_sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=num_return_sequences,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_sample, output_generate):
self._check_outputs(
output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams
)
def test_generate_without_input_ids(self):
config, _, _, max_length = self._get_input_ids_and_config()
# if no bos token id => cannot generate from None
if config.bos_token_id is None:
return
for model_class in self.all_generative_model_classes:
model = model_class(config).to(torch_device)
model.eval()
output_ids_generate = model.generate(
do_sample=False,
max_length=max_length,
remove_invalid_values=True,
)
self.assertIsNotNone(output_ids_generate)
def test_group_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
diversity_penalty=2.0,
)
# check `generate()` and `group_beam_search()` are equal
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist())
# check `generate()` and `group_beam_search()` are equal for `num_return_sequences`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist())
def test_group_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
diversity_penalty=2.0,
)
num_return_sequences = 1
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(
output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3
)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_group_beam_search, output_generate):
self._check_outputs(
output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams
)
def test_constrained_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
max_length = 20
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
# check `generate()` and `constrained_beam_search()` are equal
# Sample constraints
if not input_ids.dtype == torch.float32:
min_id = torch.min(input_ids) + 3
max_id = torch.max(input_ids)
else:
# otherwise this throws an error for Speech2TextModel since its inputs are floating points
min_id = 3
max_id = 100
force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0]
constraints = [
PhrasalConstraint(force_tokens),
]
beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, constraints, num_return_sequences=1
)
output_generate, output_beam_search = self._constrained_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
constrained_beam_scorer=beam_scorer,
constraints=constraints,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
for generation_output in output_generate:
self._check_sequence_inside_sequence(force_tokens, generation_output)
# check `generate()` and `constrained_beam_search()` are equal for `num_return_sequences`
# Sample constraints
force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0]
constraints = [
PhrasalConstraint(force_tokens),
]
num_return_sequences = 2
max_length = 20
beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, constraints, num_return_sequences=num_return_sequences
)
output_generate, output_beam_search = self._constrained_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
constrained_beam_scorer=beam_scorer,
constraints=constraints,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
for generation_output in output_generate:
self._check_sequence_inside_sequence(force_tokens, generation_output)
def test_constrained_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 20
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
# Sample constraints
if not input_ids.dtype == torch.float32:
min_id = torch.min(input_ids) + 3
max_id = torch.max(input_ids)
else:
# otherwise this throws an error for Speech2TextModel since its inputs are floating points
min_id = 3
max_id = 100
force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0]
constraints = [
PhrasalConstraint(force_tokens),
]
beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, constraints, num_return_sequences=1
)
output_generate, output_beam_search = self._constrained_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
constrained_beam_scorer=beam_scorer,
constraints=constraints,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_search, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams)
def test_generate_with_head_masking(self):
"""Test designed for encoder-decoder models to ensure the attention head masking is used."""
attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
model = model_class(config).to(torch_device)
# We want to test only encoder-decoder models
if not config.is_encoder_decoder:
continue
head_masking = {
"head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device),
"decoder_head_mask": torch.zeros(
config.decoder_layers, config.decoder_attention_heads, device=torch_device
),
"cross_attn_head_mask": torch.zeros(
config.decoder_layers, config.decoder_attention_heads, device=torch_device
),
}
signature = inspect.signature(model.forward)
# We want to test only models where encoder/decoder head masking is implemented
if not set(head_masking.keys()) < set([*signature.parameters.keys()]):
continue
for attn_name, (name, mask) in zip(attention_names, head_masking.items()):
out = model.generate(
input_ids,
attention_mask=attention_mask,
num_beams=1,
output_attentions=True,
return_dict_in_generate=True,
remove_invalid_values=True,
**{name: mask},
)
# We check the state of decoder_attentions and cross_attentions just from the last step
attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0)
def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1):
batch_size, seq_length = input_ids.shape
num_sequences_in_output = batch_size * num_return_sequences
gen_len = (
output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length
)
# scores
self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config)
# Attentions
if config.is_encoder_decoder:
# encoder
self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length)
# decoder
self._check_attentions_for_generate(
num_sequences_in_output,
output.decoder_attentions,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
else:
# if use_cache first input is equal to no use_cache, so skip here
attentions = output.attentions if not use_cache else output.attentions[1:]
min_length = seq_length if not use_cache else seq_length + 1
self._check_attentions_for_generate(
num_sequences_in_output,
attentions=attentions,
min_length=min_length,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
# Hidden States
if config.is_encoder_decoder:
# encoder
self._check_encoder_hidden_states_for_generate(
output.encoder_hidden_states, batch_size, config, seq_length
)
# decoder
self._check_hidden_states_for_generate(
num_sequences_in_output,
output.decoder_hidden_states,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
else:
# if use_cache first input is equal to no use_cache, so skip here
hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:]
min_length = seq_length if not use_cache else seq_length + 1
self._check_hidden_states_for_generate(
num_sequences_in_output,
hidden_states,
min_length=min_length,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
def _check_scores(self, batch_size, scores, length, config):
expected_shape = (batch_size, config.vocab_size)
self.assertIsInstance(scores, tuple)
self.assertEqual(len(scores), length)
self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores))
def _check_attentions_for_generate(
self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1
):
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions)
)
self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(attentions):
tgt_len = min_length + idx if not use_cache else 1
src_len = min_length + idx
expected_shape = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions)
)
def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length):
encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length)
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[layer_attentions.shape for layer_attentions in attentions],
[encoder_expected_shape] * len(attentions),
)
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1
):
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(hidden_states):
seq_len = min_length + idx if not use_cache else 1
expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],
[expected_shape] * len(iter_hidden_states),
)
def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length):
encoder_expected_shape = (batch_size, seq_length, config.hidden_size)
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in hidden_states],
[encoder_expected_shape] * len(hidden_states),
)
def _check_sequence_inside_sequence(self, tensor_1, tensor_2):
# check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1.
# set to same device. we don't care what device.
if not isinstance(tensor_1, list):
tensor_1 = tensor_1.cpu().tolist()
if not isinstance(tensor_2, list):
tensor_2 = tensor_2.cpu().tolist()
in_order = len(tensor_1) <= len(tensor_2)
longer = tensor_2 if in_order else tensor_1
shorter = tensor_1 if in_order else tensor_2
flag = False
chunk_size = len(shorter)
for chunk_idx in range(len(longer) - chunk_size + 1):
subseq = longer[chunk_idx : chunk_idx + chunk_size]
if subseq == shorter:
flag = True
break
self.assertTrue(flag)
@require_torch
class UtilsFunctionsTest(unittest.TestCase):
# tests whether the top_k_top_p function behaves as expected
def test_top_k_top_p_filtering(self):
logits = torch.tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276,
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 4 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958,
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 4 highest values <= 0.6
],
dtype=torch.float,
device=torch_device,
)
non_inf_expected_idx = torch.tensor(
[[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]],
dtype=torch.long,
device=torch_device,
) # expected non filtered idx as noted above
non_inf_expected_output = torch.tensor(
[
8.2221,
8.4321,
7.4402,
9.3845,
6.2712,
8.8275,
7.3858,
9.6770,
], # expected non filtered values as noted above
dtype=torch.float,
device=torch_device,
)
output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[output != -float("inf")].to(device=torch_device)
non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device)
self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12))
self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx)))
@require_torch
class GenerationIntegrationTests(unittest.TestCase):
@slow
def test_diverse_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood.
The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People.
"Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports.
The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both."""
bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
outputs = bart_model.generate(
input_ids,
num_beams=4,
num_return_sequences=2,
num_beam_groups=4,
diversity_penalty=2.0,
remove_invalid_values=True,
)
generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle name, as well as his father's first. It is the first baby for both of them.",
"Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the first child for both. The couple announced the pregnancy in January. The name Silas is the middle name of Timberlake's maternal grandfather. It's also his own middle name.",
],
)
def test_max_length_backward_compat_greedy(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
max_length = 20
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with self.assertWarns(UserWarning):
bart_model.greedy_search(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
def test_max_length_backward_compat_sample(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
max_length = 20
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with torch.no_grad():
with self.assertWarns(UserWarning):
bart_model.sample(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
def test_max_length_backward_compat_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 2
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
with self.assertWarns(UserWarning):
_ = bart_model.beam_search(
input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs
)
def test_max_length_backward_compat_group_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 6
num_beam_groups = 3
num_return_sequences = num_beams * batch_size
input_ids = input_ids.expand(6, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
with self.assertWarns(UserWarning):
bart_model.group_beam_search(
input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs
)
def test_max_length_warning_if_different(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 6
num_beam_groups = 3
num_return_sequences = num_beams * batch_size
stopping_criteria_max_length = 18
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)])
# Greedy
input_ids = input_ids.expand(6, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with self.assertWarns(UserWarning):
bart_model.greedy_search(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
stopping_criteria=stopping_criteria,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
# Sample
with self.assertWarns(UserWarning):
with torch.no_grad():
bart_model.sample(
input_ids,
max_length=max_length,
stopping_criteria=stopping_criteria,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
# Beam
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
with self.assertWarns(UserWarning):
with torch.no_grad():
bart_model.beam_search(
input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
max_length=max_length,
beam_scorer=beam_scorer,
**model_kwargs,
)
# Grouped beam search
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
with self.assertWarns(UserWarning):
bart_model.group_beam_search(
input_ids,
diverse_beam_scorer,
stopping_criteria=stopping_criteria,
num_beams=num_beams,
max_length=max_length,
**model_kwargs,
)
def test_beam_search_warning_if_max_length_is_passed(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
batch_size = 1
num_beams = 3
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
input_ids = input_ids.expand(num_beams, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
# pretend decoder_input_ids correspond to first encoder input id
decoder_input_ids = input_ids[:, :1]
stopping_criteria_max_length = 18
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)])
with self.assertWarns(UserWarning):
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
max_length=10,
)
generated_ids = bart_model.beam_search(
decoder_input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
beam_scorer=beam_scorer,
**model_kwargs,
)
beam_scorer_no_max_len = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
generated_ids_no_max_len = bart_model.beam_search(
decoder_input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
beam_scorer=beam_scorer_no_max_len,
**model_kwargs,
)
# BeamSearchScorer max_length should not influence "real" max_length
self.assertEqual(generated_ids.tolist(), generated_ids_no_max_len.tolist())
def test_custom_stopping_criteria_overload_error(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
stopping_criteria = StoppingCriteriaList()
stopping_criteria.append(MaxLengthCriteria(max_length=42))
with self.assertRaises(ValueError):
bart_model.generate(input_ids, stopping_criteria=stopping_criteria)
with self.assertRaises(ValueError):
bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32)
def test_custom_stopping_criteria(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
class DummyCriteria(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return input_ids.shape[-1] >= 20
stopping_criteria = StoppingCriteriaList()
stopping_criteria.append(DummyCriteria())
self.assertEqual(
list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape),
[1, 20],
)
self.assertEqual(
list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape),
[1, 18],
)
def test_custom_logits_processor(self):
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random", min_length=1).to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
logits_processor = LogitsProcessorList()
logits_processor.append(MinLengthLogitsProcessor(min_length=10, eos_token_id=0))
# it should not be allowed to both define `min_length` via config and `logits_processor` list
with self.assertRaises(ValueError):
bart_model.generate(input_ids, logits_processor=logits_processor)
bart_model.config.min_length = None
bart_model.generate(input_ids, logits_processor=logits_processor)
def test_max_new_tokens_encoder_decoder(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
self.assertEqual(list(input_ids.shape), [1, 29])
max_new_tokens = 3
bart_model.config.max_length = 20
bart_model.config.eos_token_id = None
# Encoder decoder call
outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens)
# 1 BOS + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 4])
# Decoder only call
outputs = bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=max_new_tokens)
# 29 + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 32])
# Encoder decoder call > 20
outputs = bart_model.generate(max_new_tokens=max_new_tokens + 20)
# 1 BOS + 20 + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 24])
# max_new_tokens and max_length serve the same purpose and should not be used together.
with self.assertWarns(UserWarning):
bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=10, max_length=20)
def test_max_new_tokens_decoder_only(self):
article = """Justin Timberlake."""
gpt2_tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
gpt2_model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
input_ids = gpt2_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
self.assertEqual(list(input_ids.shape), [1, 9])
max_new_tokens = 3
gpt2_model.config.max_length = 20
# call < 20
outputs = gpt2_model.generate(input_ids, max_new_tokens=max_new_tokens)
# 9 input_ids + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 12])
# call > 20
outputs = gpt2_model.generate(max_new_tokens=max_new_tokens + 20)
# 1 BOS token + 23 new tokens
self.assertEqual(list(outputs.shape), [1, 24])
# max_new_tokens and max_length serve the same purpose and should not be used together.
with self.assertWarns(UserWarning):
gpt2_model.generate(decoder_input_ids=input_ids, max_new_tokens=10, max_length=20)
def test_encoder_decoder_generate_with_inputs_embeds(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5).to(
torch_device
)
model.config.eos_token_id = None
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
inputs_embeds = model.get_input_embeddings()(input_ids)
output_sequences = model.generate(inputs_embeds=inputs_embeds)
# make sure model generated correctly until `max_length`
self.assertEqual(output_sequences.shape, (1, 5))
def test_encoder_decoder_generate_attention_mask(self):
articles = ["Timberlake", "Jessica Biel, welcome to parenthood among other things"]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
# need extrem generation values here to force this test
# to fail when `attention_mask` is not correctly treated in generate
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart", max_length=50, num_beams=5, num_return_sequences=5
).to(torch_device)
model.config.eos_token_id = None
input_ids = tokenizer(articles[0], return_tensors="pt").input_ids.to(torch_device)
input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device)
output_sequences_batched = model.generate(
input_ids=input_ids_batched, return_dict_in_generate=True, output_scores=True
)
output_sequences = model.generate(input_ids=input_ids, return_dict_in_generate=True, output_scores=True)
batched_out = output_sequences_batched.sequences_scores
out = output_sequences.sequences_scores
diff = (batched_out[:5].sum() - out.sum()).abs()
self.assertTrue(diff < 1e-4)
def test_decoder_generate_with_inputs_embeds(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=5).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
inputs_embeds = model.get_input_embeddings()(input_ids)
# cannot generate from `inputs_embeds` for decoder only
with self.assertRaises(ValueError):
model.generate(inputs_embeds=inputs_embeds)
def test_generate_input_ids_as_kwarg(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=15).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
output_sequences_kwargs = model.generate(input_ids=input_ids).cpu()
output_sequences = model.generate(input_ids).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (1, 15))
def test_generate_non_nlp_input_ids_as_kwarg(self):
model = ImageGPTForCausalImageModeling.from_pretrained(
"hf-internal-testing/tiny-random-imagegpt", max_length=10
).to(torch_device)
input_ids = ids_tensor((3, 5), vocab_size=10)
output_sequences_kwargs = model.generate(input_ids=input_ids).cpu()
output_sequences = model.generate(input_ids).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (3, 10))
def test_generate_input_ids_as_encoder_kwarg(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5).to(
torch_device
)
model.config.eos_token_id = None
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
output_sequences_kwargs = model.generate(input_ids=input_ids).cpu()
output_sequences = model.generate(input_ids).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (1, 5))
def test_generate_inputs_and_encoder_kwargs(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
with self.assertRaises(ValueError):
model.generate(input_ids, input_ids=input_ids)
def test_generate_too_many_encoder_kwargs(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
with self.assertRaises(ValueError):
model.generate(input_ids=input_ids, inputs_embeds=input_ids)
def test_generate_input_values_as_encoder_kwarg(self):
input_values = floats_tensor((2, 250))
model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder")
model = model.to(torch_device)
output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu()
output_sequences = model.generate(input_values, max_length=5).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (2, 5))
def test_generate_input_features_as_encoder_kwarg(self):
input_features = floats_tensor((3, 20, 24))
model = Speech2TextForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-speech_to_text")
model = model.to(torch_device)
output_sequences_kwargs = model.generate(input_features=input_features, max_length=5).cpu()
output_sequences = model.generate(input_features, max_length=5).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (3, 5))
def test_generate_pixel_values_as_encoder_kwarg(self):
pixel_values = floats_tensor((2, 3, 30, 30))
model = VisionEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-vision-encoder-decoder")
model = model.to(torch_device)
output_sequences_kwargs = model.generate(pixel_values=pixel_values, max_length=5).cpu()
output_sequences = model.generate(pixel_values, max_length=5).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (2, 5))
def test_generate_encoder_outputs_attention_mask(self):
input_values = floats_tensor((2, 250)).to(torch_device)
attention_mask = torch.ones_like(input_values)
model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder")
model = model.to(torch_device)
encoder = model.get_encoder()
encoder_outputs = encoder(input_values)
output_sequences_no_mask = model.generate(encoder_outputs=encoder_outputs).cpu()
output_sequences_with_mask = model.generate(encoder_outputs=encoder_outputs, attention_mask=attention_mask)
output_sequences_with_mask = output_sequences_with_mask.cpu()
self.assertListEqual(output_sequences_no_mask.tolist(), output_sequences_with_mask.tolist())
def test_transition_scores_beam_search_encoder_decoder(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
max_length=10,
num_beams=4,
num_return_sequences=2,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_beam_search_encoder_decoder_with_eos(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
max_length=10,
num_beams=4,
num_return_sequences=2,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_beam_search_decoder_only(self):
articles = [
"Justin Timberlake",
"Michael Phelps",
]
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
tokenizer.pad_token = tokenizer.eos_token
model = GPT2LMHeadModel.from_pretrained(
"hf-internal-testing/tiny-random-gpt2",
max_length=10,
num_beams=4,
num_return_sequences=2,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_beam_sample_encoder_decoder(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
do_sample=True,
max_length=10,
num_beams=4,
num_return_sequences=2,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_group_beam_search_encoder_decoder(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
max_length=10,
num_beams=2,
num_beam_groups=2,
num_return_sequences=2,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
@slow
def test_beam_search_example_integration(self):
# exactly the example provided in the docstrings of beam search, which previously
# failed after directly copying from it. Refer to PR #15555
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
# lets run beam search using 3 beams
num_beams = 3
# define decoder start token ids
input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
input_ids = input_ids * model.config.decoder_start_token_id
# add encoder_outputs to model keyword arguments
model_kwargs = {
"encoder_outputs": model.get_encoder()(
encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
)
}
# instantiate beam scorer
beam_scorer = BeamSearchScorer(
batch_size=1,
num_beams=num_beams,
device=model.device,
)
# instantiate logits processors
logits_processor = LogitsProcessorList(
[
MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
]
)
outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(outputs, ["Wie alt bist du?"])
@slow
def test_constrained_beam_search(self):
model = GPT2LMHeadModel.from_pretrained("../gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("../gpt2")
force_tokens = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids
force_tokens_2 = tokenizer("big weapons", add_prefix_space=True, add_special_tokens=False).input_ids
constraints = [
PhrasalConstraint(force_tokens),
PhrasalConstraint(force_tokens_2),
]
starting_text = ["The soldiers were not prepared and"]
input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device)
outputs = model.generate(
input_ids,
constraints=constraints,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
max_length=30,
remove_invalid_values=True,
)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The soldiers were not prepared and didn't know how big the big weapons would be, so they scared them off. They had no idea what to do",
],
)
@slow
def test_constrained_beam_search_mixed(self):
model = GPT2LMHeadModel.from_pretrained("../gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("../gpt2")
force_phrase = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids
flexible_phrases = tokenizer(
["scream", "screams", "screaming", "screamed"], add_prefix_space=True, add_special_tokens=False
).input_ids
constraints = [
PhrasalConstraint(force_phrase),
DisjunctiveConstraint(flexible_phrases),
]
starting_text = ["The soldiers", "The child"]
input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device)
outputs = model.generate(
input_ids,
constraints=constraints,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
# max_length=20,
remove_invalid_values=True,
)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The soldiers, who were all scared and screaming at each other as they tried to get out of the",
"The child was taken to a local hospital where she screamed and scared for her life, police said.",
],
)
@slow
def test_constrained_beam_search_mixed_mixin(self):
model = GPT2LMHeadModel.from_pretrained("../gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("../gpt2")
force_word = "scared"
force_flexible = ["scream", "screams", "screaming", "screamed"]
force_words_ids = [
tokenizer([force_word], add_prefix_space=True, add_special_tokens=False).input_ids,
tokenizer(force_flexible, add_prefix_space=True, add_special_tokens=False).input_ids,
]
starting_text = ["The soldiers", "The child"]
input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device)
outputs = model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The soldiers, who were all scared and screaming at each other as they tried to get out of the",
"The child was taken to a local hospital where she screamed and scared for her life, police said.",
],
)
@slow
def test_constrained_beam_search_example_translation_mixin(self):
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
force_words = ["sind"]
input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids
outputs = model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(outputs, ["Wie alter sind Sie?"])
@slow
def test_constrained_beam_search_example_integration(self):
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
# lets run beam search using 5 beams
num_beams = 5
# define decoder start token ids
input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
input_ids = input_ids * model.config.decoder_start_token_id
# add encoder_outputs to model keyword arguments
model_kwargs = {
"encoder_outputs": model.get_encoder()(
encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
)
}
constraint_str = "sind"
constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token
constraints = [PhrasalConstraint(token_ids=constraint_token_ids)]
# instantiate beam scorer
beam_scorer = ConstrainedBeamSearchScorer(
batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints
)
# instantiate logits processors
logits_processor = LogitsProcessorList(
[
MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
]
)
outputs = model.constrained_beam_search(
input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs
)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(outputs, ["Wie alter sind Sie?"])
def test_constrained_beam_search_mixin_type_checks(self):
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
with self.assertRaises(ValueError):
force_words = ["sind"]
force_words_ids = tokenizer(force_words, return_tensors="pt").input_ids
model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
with self.assertRaises(ValueError):
force_words = ["sind"]
force_words_ids = [tokenizer(force_words, return_tensors="pt").input_ids]
model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
with self.assertRaises(ValueError):
model.generate(input_ids, force_words_ids=[])
with self.assertRaises(ValueError):
model.generate(input_ids, force_words_ids=[[-1]])
with self.assertRaises(ValueError):
model.generate(input_ids, force_words_ids=[[[-1]]])
| [
"torch.zeros",
"torch.min",
"torch.eq",
"torch.max",
"torch.no_grad",
"torch.ones",
"torch.manual_seed",
"torch.randint",
"torch.tensor",
"torch.ones_like",
"torch.zeros_like",
"torch.allclose"
] | 1.0 | dctelus/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b |
1.0 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
""" Pre-training a 🤗 Transformers model for simple masked image modeling (SimMIM).
Any model supported by the AutoModelForMaskedImageModeling API can be used.
"""
logger = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.18.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to
specify them on the command line.
"""
dataset_name: Optional[str] = field(
default="cifar10", metadata={"help": "Name of a dataset from the datasets package"}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
image_column_name: Optional[str] = field(
default=None,
metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."},
)
train_dir: Optional[str] = field(default=None, metadata={"help": "A folder containing the training data."})
validation_dir: Optional[str] = field(default=None, metadata={"help": "A folder containing the validation data."})
train_val_split: Optional[float] = field(
default=0.15, metadata={"help": "Percent to split off of train for validation."}
)
mask_patch_size: int = field(default=32, metadata={"help": "The size of the square patches to use for masking."})
mask_ratio: float = field(
default=0.6,
metadata={"help": "Percentage of patches to mask."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
def __post_init__(self):
data_files = dict()
if self.train_dir is not None:
data_files["train"] = self.train_dir
if self.validation_dir is not None:
data_files["val"] = self.validation_dir
self.data_files = data_files if data_files else None
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/feature extractor we are going to pre-train.
"""
model_name_or_path: str = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name_or_path: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
config_overrides: Optional[str] = field(
default=None,
metadata={
"help": "Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
},
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
feature_extractor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."})
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
image_size: Optional[int] = field(
default=None,
metadata={
"help": "The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
},
)
patch_size: Optional[int] = field(
default=None,
metadata={
"help": "The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
},
)
encoder_stride: Optional[int] = field(
default=None,
metadata={"help": "Stride to use for the encoder."},
)
class MaskGenerator:
"""
A class to generate boolean masks for the pretraining task.
A mask is a 1D tensor of shape (model_patch_size**2,) where the value is either 0 or 1,
where 1 indicates "masked".
"""
def __init__(self, input_size=192, mask_patch_size=32, model_patch_size=4, mask_ratio=0.6):
self.input_size = input_size
self.mask_patch_size = mask_patch_size
self.model_patch_size = model_patch_size
self.mask_ratio = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size")
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size")
self.rand_size = self.input_size // self.mask_patch_size
self.scale = self.mask_patch_size // self.model_patch_size
self.token_count = self.rand_size**2
self.mask_count = int(np.ceil(self.token_count * self.mask_ratio))
def __call__(self):
mask_idx = np.random.permutation(self.token_count)[: self.mask_count]
mask = np.zeros(self.token_count, dtype=int)
mask[mask_idx] = 1
mask = mask.reshape((self.rand_size, self.rand_size))
mask = mask.repeat(self.scale, axis=0).repeat(self.scale, axis=1)
return torch.tensor(mask.flatten())
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
mask = torch.stack([example["mask"] for example in examples])
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Initialize our dataset.
ds = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
data_files=data_args.data_files,
cache_dir=model_args.cache_dir,
)
# If we don't have a validation split, split off a percentage of train as validation.
data_args.train_val_split = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0:
split = ds["train"].train_test_split(data_args.train_val_split)
ds["train"] = split["train"]
ds["validation"] = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
config = AutoConfig.from_pretrained(model_args.config_name_or_path, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(config, "decoder_type"):
config.decoder_type = "simmim"
# adapt config
model_args.image_size = model_args.image_size if model_args.image_size is not None else config.image_size
model_args.patch_size = model_args.patch_size if model_args.patch_size is not None else config.patch_size
model_args.encoder_stride = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
}
)
# create feature extractor
if model_args.feature_extractor_name:
feature_extractor = AutoFeatureExtractor.from_pretrained(model_args.feature_extractor_name, **config_kwargs)
elif model_args.model_name_or_path:
feature_extractor = AutoFeatureExtractor.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
FEATURE_EXTRACTOR_TYPES = {
conf.model_type: feature_extractor_class
for conf, feature_extractor_class in FEATURE_EXTRACTOR_MAPPING.items()
}
feature_extractor = FEATURE_EXTRACTOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
model = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForMaskedImageModeling.from_config(config)
if training_args.do_train:
column_names = ds["train"].column_names
else:
column_names = ds["validation"].column_names
if data_args.image_column_name is not None:
image_column_name = data_args.image_column_name
elif "image" in column_names:
image_column_name = "image"
elif "img" in column_names:
image_column_name = "img"
else:
image_column_name = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
transforms = Compose(
[
Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img),
RandomResizedCrop(model_args.image_size, scale=(0.67, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0)),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std),
]
)
# create mask generator
mask_generator = MaskGenerator(
input_size=model_args.image_size,
mask_patch_size=data_args.mask_patch_size,
model_patch_size=model_args.patch_size,
mask_ratio=data_args.mask_ratio,
)
def preprocess_images(examples):
"""Preprocess a batch of images by applying transforms + creating a corresponding mask, indicating
which patches to mask."""
examples["pixel_values"] = [transforms(image) for image in examples[image_column_name]]
examples["mask"] = [mask_generator() for i in range(len(examples[image_column_name]))]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset")
if data_args.max_train_samples is not None:
ds["train"] = ds["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
# Set the training transforms
ds["train"].set_transform(preprocess_images)
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset")
if data_args.max_eval_samples is not None:
ds["validation"] = (
ds["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
ds["validation"].set_transform(preprocess_images)
# Initialize our trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=ds["train"] if training_args.do_train else None,
eval_dataset=ds["validation"] if training_args.do_eval else None,
tokenizer=feature_extractor,
data_collator=collate_fn,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Write model card and (optionally) push to hub
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if __name__ == "__main__":
main()
| [
"torch.stack"
] | 1.0 | dctelus/transformers | 6786cbc4b14ebff0ac59c768cadd109391db9a08 |
1.6 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
import torch
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.network_architecture.generic_UNet_Multi_Task_DP_in_one import Generic_UNet_Multi_Task_DP_in_one
from nnunet.network_architecture.DDense_UNet_Multi_Task_DP import D_DenseUNet_Multi_Task_DP
from nnunet.training.data_augmentation.data_augmentation_moreDA import get_moreDA_augmentation
from nnunet.training.network_training.nnUNetTrainerV2_3_branch import nnUNetTrainerV2_3_branch
# from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.to_torch import maybe_to_torch, to_cuda, to_cuda_1, to_cuda_2, to_cuda_3
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.dataloading.dataset_loading import unpack_dataset
# from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
from torch.cuda.amp import autocast
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.utils import clip_grad_norm_
from nnunet.training.network_training.nnUNetTrainer_3_branch import nnUNetTrainer_3_branch
from nnunet.network_architecture.DDense_UNet_Multi_Task import get_default_network_config
# 3 brnach with 3D-UNet
# nnUNetTrainerV2_3_branch
class HasTrainer_DP_DDense_3_branch(nnUNetTrainerV2_3_branch):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, num_gpus=1, distribute_batch_size=False, fp16=False):
super(HasTrainer_DP_DDense_3_branch, self).__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage,
unpack_data, deterministic, fp16)
self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, num_gpus, distribute_batch_size, fp16)
self.num_gpus = num_gpus
self.distribute_batch_size = distribute_batch_size
self.dice_smooth = 1e-5
self.dice_do_BG = False
self.loss = None
self.loss_weights = None
def setup_DA_params(self):
"""
net_num_pool_op_kernel_sizes is different in resunet
"""
super().setup_DA_params()
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes[1:]), axis=0))[:-1]
def run_online_evaluation(self, tp_hard, fp_hard, fn_hard):
tp_hard = tp_hard.detach().cpu().numpy().mean(0)
fp_hard = fp_hard.detach().cpu().numpy().mean(0)
fn_hard = fn_hard.detach().cpu().numpy().mean(0)
self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))
self.online_eval_tp.append(list(tp_hard))
self.online_eval_fp.append(list(fp_hard))
self.online_eval_fn.append(list(fn_hard))
def run_online_evaluation_2(self, tp_hard, fp_hard, fn_hard):
tp_hard = tp_hard.detach().cpu().numpy().mean(0)
fp_hard = fp_hard.detach().cpu().numpy().mean(0)
fn_hard = fn_hard.detach().cpu().numpy().mean(0)
self.online_eval_foreground_dc_2.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))
self.online_eval_tp_2.append(list(tp_hard))
self.online_eval_fp_2.append(list(fp_hard))
self.online_eval_fn_2.append(list(fn_hard))
def run_online_evaluation_3(self, tp_hard, fp_hard, fn_hard):
tp_hard = tp_hard.detach().cpu().numpy().mean(0)
fp_hard = fp_hard.detach().cpu().numpy().mean(0)
fn_hard = fn_hard.detach().cpu().numpy().mean(0)
self.online_eval_foreground_dc_3.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))
self.online_eval_tp_3.append(list(tp_hard))
self.online_eval_fp_3.append(list(fp_hard))
self.online_eval_fn_3.append(list(fn_hard))
def process_plans(self, plans):
super(HasTrainer_DP_DDense_3_branch, self).process_plans(plans)
if not self.distribute_batch_size:
self.batch_size = self.num_gpus * self.plans['plans_per_stage'][self.stage]['batch_size']
# self.batch_size = self.plans['plans_per_stage'][self.stage]['batch_size']
print("batch_size : ",self.batch_size)
print("num_gpus: ",self.num_gpus)
print("self.plans['plans_per_stage'][self.stage]['batch_size']: ", self.plans['plans_per_stage'][self.stage]['batch_size'])
print("")
else:
if self.batch_size < self.num_gpus:
print("WARNING: self.batch_size < self.num_gpus. Will not be able to use the GPUs well")
elif self.batch_size % self.num_gpus != 0:
print("WARNING: self.batch_size % self.num_gpus != 0. Will not be able to use the GPUs well")
# def initialize(self, training=True, force_load_plans=False):
# """
# - replaced get_default_augmentation with get_moreDA_augmentation
# - only run this code once
# - loss function wrapper for deep supervision
#
# :param training:
# :param force_load_plans:
# :return:
# """
# if not self.was_initialized:
# maybe_mkdir_p(self.output_folder)
#
# if force_load_plans or (self.plans is None):
# self.load_plans_file()
#
# self.process_plans(self.plans)
#
# self.setup_DA_params()
#
# ################# Here configure the loss for deep supervision ############
# net_numpool = len(self.net_num_pool_op_kernel_sizes)
# weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)])
# weights[~mask] = 0
# weights = weights / weights.sum()
# self.loss_weights = weights
# ################# END ###################
#
# self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
# "_stage%d" % self.stage)
# print("")
# print("folder_with_preprocessed_data : ", self.folder_with_preprocessed_data)
# print("plans : ", self.plans)
# print("labels : ", self.plans['all_classes'])
# print("")
#
# if training:
# self.dl_tr, self.dl_val = self.get_basic_generators()
# if self.unpack_data:
# print("unpacking dataset")
# unpack_dataset(self.folder_with_preprocessed_data)
# print("done")
# else:
# print(
# "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
# "will wait all winter for your model to finish!")
#
# self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val,
# self.data_aug_params[
# 'patch_size_for_spatialtransform'],
# self.data_aug_params,
# deep_supervision_scales=self.deep_supervision_scales,
# pin_memory=self.pin_memory)
#
# print("tr_gen : ", self.tr_gen)
# print("val_gen : ", self.val_gen)
#
# # import pdb
# # pdb.set_trace()
#
# self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
# also_print_to_console=False)
# self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
# also_print_to_console=False)
# else:
# pass
#
# self.initialize_network()
# self.initialize_optimizer_and_scheduler()
#
# assert isinstance(self.network, (SegmentationNetwork, DataParallel))
# else:
# self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
# self.was_initialized = True
def initialize_network(self):
if self.threeD:
cfg = get_default_network_config(3, None, norm_type="in")
else:
cfg = get_default_network_config(1, None, norm_type="in")
stage_plans = self.plans['plans_per_stage'][self.stage]
conv_kernel_sizes = stage_plans['conv_kernel_sizes']
blocks_per_stage_encoder = stage_plans['num_blocks_encoder']
blocks_per_stage_decoder = stage_plans['num_blocks_decoder']
pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']
self.network = D_DenseUNet_Multi_Task_DP(self.num_input_channels, self.base_num_features, blocks_per_stage_encoder, 2,
pool_op_kernel_sizes, conv_kernel_sizes, cfg, self.num_classes,
blocks_per_stage_decoder, True, False, 320, InitWeights_He(1e-2))
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
# def initialize_optimizer_and_scheduler(self):
# assert self.network is not None, "self.initialize_network must be called first"
# self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
# momentum=0.99, nesterov=True)
# self.lr_scheduler = None
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0,
segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = False
ret = nnUNetTrainer_3_branch.validate(self, do_mirroring=do_mirroring, use_sliding_window=use_sliding_window,
step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name,
debug=debug, all_in_gpu=all_in_gpu,
segmentation_export_kwargs=segmentation_export_kwargs,
run_postprocessing_on_folds=run_postprocessing_on_folds)
self.network.decoder.deep_supervision = ds
return ret
# def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
# mirror_axes: Tuple[int] = None,
# use_sliding_window: bool = True, step_size: float = 0.5,
# use_gaussian: bool = True,
# pad_border_mode: str = 'constant',
# pad_kwargs: dict = None, all_in_gpu: bool = False,
# verbose: bool = True, mixed_precision=True) -> Tuple[
# np.ndarray, np.ndarray]:
# ds = self.network.decoder.deep_supervision
# self.network.decoder.deep_supervision = False
# ret = nnUNetTrainer_3_branch.predict_preprocessed_data_return_seg_and_softmax(self, data, do_mirroring=do_mirroring,
# mirror_axes=mirror_axes,
# use_sliding_window=use_sliding_window,
# step_size=step_size,
# use_gaussian=use_gaussian,
# pad_border_mode=pad_border_mode,
# pad_kwargs=pad_kwargs,
# all_in_gpu=all_in_gpu,
# verbose=verbose,
# mixed_precision=mixed_precision)
# self.network.decoder.deep_supervision = ds
# return ret
#
#
#
#
# def predict_preprocessed_data_return_attention(self, data: np.ndarray, do_mirroring: bool = True,
# mirror_axes: Tuple[int] = None,
# use_sliding_window: bool = True, step_size: float = 0.5,
# use_gaussian: bool = True,
# pad_border_mode: str = 'constant',
# pad_kwargs: dict = None, all_in_gpu: bool = False,
# verbose: bool = True, mixed_precision=True) -> Tuple[
# np.ndarray, np.ndarray]:
# ds = self.network.decoder.deep_supervision
# self.network.decoder.deep_supervision = False
# ret = nnUNetTrainer_3_branch.predict_preprocessed_data_return_attention(self, data, do_mirroring=do_mirroring,
# mirror_axes=mirror_axes,
# use_sliding_window=use_sliding_window,
# step_size=step_size,
# use_gaussian=use_gaussian,
# pad_border_mode=pad_border_mode,
# pad_kwargs=pad_kwargs,
# all_in_gpu=all_in_gpu,
# verbose=verbose,
# mixed_precision=mixed_precision)
# self.network.decoder.deep_supervision = ds
# return ret
# def run_training(self):
# self.maybe_update_lr(self.epoch)
# # self.maybe_update_lr()
#
# #self.on_epoch_end()
#
#
# # amp must be initialized before DP
#
# ds = self.network.do_ds
# self.network.do_ds = True
# self.network = DataParallel(self.network, tuple(range(self.num_gpus)), )
# # ret1, ret2, ret3 = nnUNetTrainer.run_training(self)
# ret = nnUNetTrainer_3_branch.run_training(self)
# self.network = self.network.module
# self.network.do_ds = ds
#
# # ret = ret1 + ret2 + ret3
# return ret
def run_training(self):
self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we
# want at the start of the training
ds = self.network.decoder1.deep_supervision
self.network.decoder1.deep_supervision = True
# 이거 추가함 (멀티 gpu)
# a = self.network
self.network = DataParallel(self.network, tuple(range(self.num_gpus)), )
ret = nnUNetTrainer_3_branch.run_training(self)
## if traning is end
# print(self.epoch)
# print(self.max_num_epochs)
# if self.epoch == self.max_num_epochs - 1:
# self.network = a
self.network.decoder.deep_supervision = ds
return ret
# 여기서 iteration이 돌아간다.
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
y1 = copy.deepcopy(target)
y2 = copy.deepcopy(target)
y3 = copy.deepcopy(target)
# print("y1[0].max() : ", y1[0].max())
# print("y2[0].max() : ", y2[0].max())
# print("y3[0].max() : ", y3[0].max())
# y1, y2, y3를 각각 1,2,3의 요소만 남기고 모두 0으로 만든다.
for i in range(3):
y1[i] = torch.where(y1[i] == 1, y1[i], torch.tensor(0).float())
y2[i] = torch.where(y2[i] == 2, y2[i], torch.tensor(0).float())
y3[i] = torch.where(y3[i] == 3, y3[i], torch.tensor(0).float())
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
y1 = to_cuda(y1)
y2 = to_cuda(y2)
y3 = to_cuda(y3)
# 역전파 단계를 실행하기 전에 gradient를 0으로 만든다.
self.optimizer.zero_grad()
# rint("optimizer : ", self.optimizer)
# print("fp16 : ", self.fp16)
# self.fp16 = True
if self.fp16:
with autocast():
# return_hard_tp_fp_fn = run_online_evaluation = False 가 들어간다.
ret1, ret2, ret3 = self.network(data, y1,y2,y3, return_hard_tp_fp_fn=run_online_evaluation)
# print("run_online_evaluation : ",run_online_evaluation)
# # validation 할때 이리로 들어간다.
# if run_online_evaluation:
# ces1, tps1, fps1, fns1, tp_hard1, fp_hard1, fn_hard1 = ret1
# self.run_online_evaluation(tp_hard1, fp_hard1, fn_hard1)
#
# ces2, tps2, fps2, fns2, tp_hard2, fp_hard2, fn_hard2 = ret2
# self.run_online_evaluation(tp_hard2, fp_hard2, fn_hard2)
#
# ces3, tps3, fps3, fns3, tp_hard3, fp_hard3, fn_hard3 = ret3
# self.run_online_evaluation(tp_hard3, fp_hard3, fn_hard3)
# validation 할때 이리로 들어간다.
if run_online_evaluation:
ces1, tps1, fps1, fns1, tp_hard1, fp_hard1, fn_hard1 = ret1
self.run_online_evaluation(tp_hard1, fp_hard1, fn_hard1)
# print("ces1 : ", ces1)
# print("tps1 : ", tps1)
# print("fps1 : ", fps1)
# print("tp_hard1 : ", tp_hard1)
# print("fp_hard1 : ", fp_hard1)
# print("fn_hard1 : ", fn_hard1)
ces2, tps2, fps2, fns2, tp_hard2, fp_hard2, fn_hard2 = ret2
self.run_online_evaluation_2(tp_hard2, fp_hard2, fn_hard2)
ces3, tps3, fps3, fns3, tp_hard3, fp_hard3, fn_hard3 = ret3
self.run_online_evaluation_3(tp_hard3, fp_hard3, fn_hard3)
# print("run_online_evaluation")
# 이리로 들어간다
else:
# CE_loss, TP, FP, FN을 여기서 받는다. (4개 채널로 구성된 tuple)
ces1, tps1, fps1, fns1 = ret1
ces2, tps2, fps2, fns2 = ret2
ces3, tps3, fps3, fns3 = ret3
del data, target
# 아래 compute_loss가 우리가 가장 신장하게 생각해야할 부분
l1 = self.compute_loss(ces1, tps1, fps1, fns1)
l2 = self.compute_loss(ces2, tps2, fps2, fns2)
l3 = self.compute_loss(ces3, tps3, fps3, fns3)
u = 0.7
k = 0.15
b = 0.15
l = u*l1 + k*l2 + b*l3
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
# 여기는 안쓴다고 보면 될듯 fp32 인 경우만 이리로 들어갈듯
# else:
# ret = self.network(data, target, return_hard_tp_fp_fn=run_online_evaluation)
# if run_online_evaluation:
# ces, tps, fps, fns, tp_hard, fp_hard, fn_hard = ret
# self.run_online_evaluation(tp_hard, fp_hard, fn_hard)
# else:
# ces, tps, fps, fns = ret
# del data, target
# l = self.compute_loss(ces, tps, fps, fns)
#
# if do_backprop:
# l.backward()
# torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
# self.optimizer.step()
return l.detach().cpu().numpy()
def compute_loss(self, ces, tps, fps, fns):
# we now need to effectively reimplement the loss
loss = None
for i in range(len(ces)):
if not self.dice_do_BG:
tp = tps[i][:, 1:]
fp = fps[i][:, 1:]
fn = fns[i][:, 1:]
else:
tp = tps[i]
fp = fps[i]
fn = fns[i]
if self.batch_dice:
tp = tp.sum(0)
fp = fp.sum(0)
fn = fn.sum(0)
else:
pass
nominator = 2 * tp + self.dice_smooth
denominator = 2 * tp + fp + fn + self.dice_smooth
dice_loss = (- nominator / denominator).mean()
if loss is None:
loss = self.loss_weights[i] * (ces[i].mean() + dice_loss)
else:
loss += self.loss_weights[i] * (ces[i].mean() + dice_loss)
###########
return loss | [
"torch.cuda.is_available",
"torch.cuda.amp.autocast",
"torch.tensor"
] | 1.6.0 | hasukmin12/nnUNet_MDD_UNet_with_Semi_Supervised | 58c5665a5d89d1ad77038e5d6420be76fadab136 |
1.7 | # @Time : 2020/6/28
# @Author : Zihan Lin
# @Email : [email protected]
# UPDATE
# @Time : 2020/10/04, 2021/3/2, 2021/2/17
# @Author : Shanlei Mu, Yupeng Hou, Jiawei Guan
# @Email : [email protected], [email protected], [email protected]
"""
recbole.config.configurator
################################
"""
import re
import os
import sys
import yaml
import torch
from logging import getLogger
from recbole.evaluator import group_metrics, individual_metrics
from recbole.utils import get_model, Enum, EvaluatorType, ModelType, InputType, \
general_arguments, training_arguments, evaluation_arguments, dataset_arguments
from recbole.utils.utils import set_color
class Config(object):
""" Configurator module that load the defined parameters.
Configurator module will first load the default parameters from the fixed properties in RecBole and then
load parameters from the external input.
External input supports three kind of forms: config file, command line and parameter dictionaries.
- config file: It's a file that record the parameters to be modified or added. It should be in ``yaml`` format,
e.g. a config file is 'example.yaml', the content is:
learning_rate: 0.001
train_batch_size: 2048
- command line: It should be in the format as '---learning_rate=0.001'
- parameter dictionaries: It should be a dict, where the key is parameter name and the value is parameter value,
e.g. config_dict = {'learning_rate': 0.001}
Configuration module allows the above three kind of external input format to be used together,
the priority order is as following:
command line > parameter dictionaries > config file
e.g. If we set learning_rate=0.01 in config file, learning_rate=0.02 in command line,
learning_rate=0.03 in parameter dictionaries.
Finally the learning_rate is equal to 0.02.
"""
def __init__(self, model=None, dataset=None, config_file_list=None, config_dict=None):
"""
Args:
model (str/AbstractRecommender): the model name or the model class, default is None, if it is None, config
will search the parameter 'model' from the external input as the model name or model class.
dataset (str): the dataset name, default is None, if it is None, config will search the parameter 'dataset'
from the external input as the dataset name.
config_file_list (list of str): the external config file, it allows multiple config files, default is None.
config_dict (dict): the external parameter dictionaries, default is None.
"""
self._init_parameters_category()
self.yaml_loader = self._build_yaml_loader()
self.file_config_dict = self._load_config_files(config_file_list)
self.variable_config_dict = self._load_variable_config_dict(config_dict)
self.cmd_config_dict = self._load_cmd_line()
self._merge_external_config_dict()
self.model, self.model_class, self.dataset = self._get_model_and_dataset(model, dataset)
self._load_internal_config_dict(self.model, self.model_class, self.dataset)
self.final_config_dict = self._get_final_config_dict()
self._set_default_parameters()
self._init_device()
self._set_train_neg_sample_args()
def _init_parameters_category(self):
self.parameters = dict()
self.parameters['General'] = general_arguments
self.parameters['Training'] = training_arguments
self.parameters['Evaluation'] = evaluation_arguments
self.parameters['Dataset'] = dataset_arguments
def _build_yaml_loader(self):
loader = yaml.FullLoader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(
u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X
), list(u'-+0123456789.')
)
return loader
def _convert_config_dict(self, config_dict):
r"""This function convert the str parameters to their original type.
"""
for key in config_dict:
param = config_dict[key]
if not isinstance(param, str):
continue
try:
value = eval(param)
if not isinstance(value, (str, int, float, list, tuple, dict, bool, Enum)):
value = param
except (NameError, SyntaxError, TypeError):
if isinstance(param, str):
if param.lower() == "true":
value = True
elif param.lower() == "false":
value = False
else:
value = param
else:
value = param
config_dict[key] = value
return config_dict
def _load_config_files(self, file_list):
file_config_dict = dict()
if file_list:
for file in file_list:
with open(file, 'r', encoding='utf-8') as f:
file_config_dict.update(yaml.load(f.read(), Loader=self.yaml_loader))
return file_config_dict
def _load_variable_config_dict(self, config_dict):
# HyperTuning may set the parameters such as mlp_hidden_size in NeuMF in the format of ['[]', '[]']
# then config_dict will receive a str '[]', but indeed it's a list []
# temporarily use _convert_config_dict to solve this problem
return self._convert_config_dict(config_dict) if config_dict else dict()
def _load_cmd_line(self):
r""" Read parameters from command line and convert it to str.
"""
cmd_config_dict = dict()
unrecognized_args = []
if "ipykernel_launcher" not in sys.argv[0]:
for arg in sys.argv[1:]:
if not arg.startswith("--") or len(arg[2:].split("=")) != 2:
unrecognized_args.append(arg)
continue
cmd_arg_name, cmd_arg_value = arg[2:].split("=")
if cmd_arg_name in cmd_config_dict and cmd_arg_value != cmd_config_dict[cmd_arg_name]:
raise SyntaxError("There are duplicate commend arg '%s' with different value." % arg)
else:
cmd_config_dict[cmd_arg_name] = cmd_arg_value
if len(unrecognized_args) > 0:
logger = getLogger()
logger.warning('command line args [{}] will not be used in RecBole'.format(' '.join(unrecognized_args)))
cmd_config_dict = self._convert_config_dict(cmd_config_dict)
return cmd_config_dict
def _merge_external_config_dict(self):
external_config_dict = dict()
external_config_dict.update(self.file_config_dict)
external_config_dict.update(self.variable_config_dict)
external_config_dict.update(self.cmd_config_dict)
self.external_config_dict = external_config_dict
def _get_model_and_dataset(self, model, dataset):
if model is None:
try:
model = self.external_config_dict['model']
except KeyError:
raise KeyError(
'model need to be specified in at least one of the these ways: '
'[model variable, config file, config dict, command line] '
)
if not isinstance(model, str):
final_model_class = model
final_model = model.__name__
else:
final_model = model
final_model_class = get_model(final_model)
if dataset is None:
try:
final_dataset = self.external_config_dict['dataset']
except KeyError:
raise KeyError(
'dataset need to be specified in at least one of the these ways: '
'[dataset variable, config file, config dict, command line] '
)
else:
final_dataset = dataset
return final_model, final_model_class, final_dataset
def _update_internal_config_dict(self, file):
with open(file, 'r', encoding='utf-8') as f:
config_dict = yaml.load(f.read(), Loader=self.yaml_loader)
if config_dict is not None:
self.internal_config_dict.update(config_dict)
return config_dict
def _load_internal_config_dict(self, model, model_class, dataset):
current_path = os.path.dirname(os.path.realpath(__file__))
overall_init_file = os.path.join(current_path, '../properties/overall.yaml')
model_init_file = os.path.join(current_path, '../properties/model/' + model + '.yaml')
sample_init_file = os.path.join(current_path, '../properties/dataset/sample.yaml')
dataset_init_file = os.path.join(current_path, '../properties/dataset/' + dataset + '.yaml')
quick_start_config_path = os.path.join(current_path, '../properties/quick_start_config/')
context_aware_init = os.path.join(quick_start_config_path, 'context-aware.yaml')
context_aware_on_ml_100k_init = os.path.join(quick_start_config_path, 'context-aware_ml-100k.yaml')
DIN_init = os.path.join(quick_start_config_path, 'sequential_DIN.yaml')
DIN_on_ml_100k_init = os.path.join(quick_start_config_path, 'sequential_DIN_on_ml-100k.yaml')
sequential_init = os.path.join(quick_start_config_path, 'sequential.yaml')
special_sequential_on_ml_100k_init = os.path.join(quick_start_config_path, 'special_sequential_on_ml-100k.yaml')
sequential_embedding_model_init = os.path.join(quick_start_config_path, 'sequential_embedding_model.yaml')
knowledge_base_init = os.path.join(quick_start_config_path, 'knowledge_base.yaml')
self.internal_config_dict = dict()
for file in [overall_init_file, model_init_file, sample_init_file, dataset_init_file]:
if os.path.isfile(file):
config_dict = self._update_internal_config_dict(file)
if file == dataset_init_file:
self.parameters['Dataset'] += [
key for key in config_dict.keys() if key not in self.parameters['Dataset']
]
self.internal_config_dict['MODEL_TYPE'] = model_class.type
if self.internal_config_dict['MODEL_TYPE'] == ModelType.GENERAL:
pass
elif self.internal_config_dict['MODEL_TYPE'] in {ModelType.CONTEXT, ModelType.DECISIONTREE}:
self._update_internal_config_dict(context_aware_init)
if dataset == 'ml-100k':
self._update_internal_config_dict(context_aware_on_ml_100k_init)
elif self.internal_config_dict['MODEL_TYPE'] == ModelType.SEQUENTIAL:
if model in ['DIN', 'DIEN']:
self._update_internal_config_dict(DIN_init)
if dataset == 'ml-100k':
self._update_internal_config_dict(DIN_on_ml_100k_init)
elif model in ['GRU4RecKG', 'KSR']:
self._update_internal_config_dict(sequential_embedding_model_init)
else:
self._update_internal_config_dict(sequential_init)
if dataset == 'ml-100k' and model in ['GRU4RecF', 'SASRecF', 'FDSA', 'S3Rec']:
self._update_internal_config_dict(special_sequential_on_ml_100k_init)
elif self.internal_config_dict['MODEL_TYPE'] == ModelType.KNOWLEDGE:
self._update_internal_config_dict(knowledge_base_init)
def _get_final_config_dict(self):
final_config_dict = dict()
final_config_dict.update(self.internal_config_dict)
final_config_dict.update(self.external_config_dict)
return final_config_dict
def _set_default_parameters(self):
self.final_config_dict['dataset'] = self.dataset
self.final_config_dict['model'] = self.model
if self.dataset == 'ml-100k' and self.final_config_dict['data_path'] is None:
current_path = os.path.dirname(os.path.realpath(__file__))
self.final_config_dict['data_path'] = os.path.join(current_path, '../dataset_example/' + self.dataset)
else:
self.final_config_dict['data_path'] = os.path.join(self.final_config_dict['data_path'], self.dataset)
if hasattr(self.model_class, 'input_type'):
self.final_config_dict['MODEL_INPUT_TYPE'] = self.model_class.input_type
elif 'loss_type' in self.final_config_dict:
if self.final_config_dict['loss_type'] in ['CE']:
if self.final_config_dict['MODEL_TYPE'] == ModelType.SEQUENTIAL and self.final_config_dict['training_neg_sample_num'] > 0:
raise ValueError("training_neg_sample_num should be 0 when the loss_type is CE")
self.final_config_dict['MODEL_INPUT_TYPE'] = InputType.POINTWISE
elif self.final_config_dict['loss_type'] in ['BPR']:
self.final_config_dict['MODEL_INPUT_TYPE'] = InputType.PAIRWISE
else:
raise ValueError('Either Model has attr \'input_type\',' 'or arg \'loss_type\' should exist in config.')
eval_type = None
for metric in self.final_config_dict['metrics']:
if metric.lower() in individual_metrics:
if eval_type is not None and eval_type == EvaluatorType.RANKING:
raise RuntimeError('Ranking metrics and other metrics can not be used at the same time.')
else:
eval_type = EvaluatorType.INDIVIDUAL
if metric.lower() in group_metrics:
if eval_type is not None and eval_type == EvaluatorType.INDIVIDUAL:
raise RuntimeError('Ranking metrics and other metrics can not be used at the same time.')
else:
eval_type = EvaluatorType.RANKING
self.final_config_dict['eval_type'] = eval_type
smaller_metric = ['rmse', 'mae', 'logloss']
valid_metric = self.final_config_dict['valid_metric'].split('@')[0]
self.final_config_dict['valid_metric_bigger'] = False if valid_metric.lower() in smaller_metric else True
if 'additional_feat_suffix' in self.final_config_dict:
ad_suf = self.final_config_dict['additional_feat_suffix']
if isinstance(ad_suf, str):
self.final_config_dict['additional_feat_suffix'] = [ad_suf]
def _init_device(self):
use_gpu = self.final_config_dict['use_gpu']
if use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.final_config_dict['gpu_id'])
self.final_config_dict['device'] = torch.device("cuda" if torch.cuda.is_available() and use_gpu else "cpu")
def _set_train_neg_sample_args(self):
if self.final_config_dict['training_neg_sample_num']:
self.final_config_dict['train_neg_sample_args'] = {
'strategy': 'by',
'by': self.final_config_dict['training_neg_sample_num'],
'distribution': self.final_config_dict['training_neg_sample_distribution'] or 'uniform'
}
else:
self.final_config_dict['train_neg_sample_args'] = {'strategy': 'none'}
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("index must be a str.")
self.final_config_dict[key] = value
def __getitem__(self, item):
if item in self.final_config_dict:
return self.final_config_dict[item]
else:
return None
def __contains__(self, key):
if not isinstance(key, str):
raise TypeError("index must be a str.")
return key in self.final_config_dict
def __str__(self):
args_info = '\n'
for category in self.parameters:
args_info += set_color(category + ' Hyper Parameters:\n', 'pink')
args_info += '\n'.join([(set_color("{}", 'cyan') + " =" + set_color(" {}", 'yellow')).format(arg, value)
for arg, value in self.final_config_dict.items()
if arg in self.parameters[category]])
args_info += '\n\n'
args_info += set_color('Other Hyper Parameters: \n', 'pink')
args_info += '\n'.join([
(set_color("{}", 'cyan') + " = " + set_color("{}", 'yellow')).format(arg, value)
for arg, value in self.final_config_dict.items()
if arg not in {
_ for args in self.parameters.values() for _ in args
}.union({'model', 'dataset', 'config_files'})
])
args_info += '\n\n'
return args_info
def __repr__(self):
return self.__str__()
| [
"torch.cuda.is_available"
] | 1.7.0 | ghazalehnt/RecBole | f1219847005e2c8d72b8c3cd5c49a138fe83276d |
1.7 | import time
import numpy
import torch
from torch_sparse import SparseTensor
from recbole.config import Config
from recbole.data import create_dataset, data_preparation
from recbole.model.abstract_recommender import GeneralRecommender
from recbole.utils import InputType, ModelType, init_seed, get_model
import jnius_config
from glob import glob
import os
import sys
import numpy as np
from recbole.utils.inferred_lm import ItemLM
from recbole.utils.utils import get_fat_jar
REC_MODELS = {
"JSRMFSPARSE_eval1000Active-uni1000_pt3-244-000": {
"checkpoint_file": "saved/JOINTSRMFSPARSE-Jul-21-2021_18-14-46.pth",
"model_name": "JOINTSRMFSPARSE",
"dataset_name": "KITT_goodreads_rated_all",
"config_dict": {'data_path': '/GW/PSR/work/datasets/recbole_datasets/',
'eval_setting': 'RO_RS,uni1000',
'valid_metric': 'NDCG@10',
'group_by_user': True,
'training_neg_sample_num': 8,
'train_batch_size': 4096,
'monitor': False,
'benchmark_filename': ['train-1000activeP50N0R30-restPos-pt3-244-000', 'valid-1000activeP50N0R30-pt3-244-000', 'test-1000activeP50N0R30-pt3-244-000'],
'debug_variant': 1,
'number_of_reviews_to_use_item': None,
'item_description_fields': ['item_description', 'item_genres'],
'alpha_item': 1,
}
},
"JSRMFSPARSE_eval1000Active-uni1000_pt3-145-000": {
"checkpoint_file": "saved/JOINTSRMFSPARSE-Jul-23-2021_12-46-41.pth",
"model_name": "JOINTSRMFSPARSE",
"dataset_name": "KITT_goodreads_rated_all",
"config_dict": {'data_path': '/GW/PSR/work/datasets/recbole_datasets/',
'eval_setting': 'RO_RS,uni1000',
'valid_metric': 'NDCG@10',
'group_by_user': True,
'training_neg_sample_num': 8,
'train_batch_size': 4096,
'monitor': False,
'benchmark_filename': ['train-1000activeP50N0R30-restPos-pt3-145-000', 'valid-1000activeP50N0R30-pt3-145-000', 'test-1000activeP50N0R30-pt3-145-000'],
'debug_variant': 1,
'number_of_reviews_to_use_item': None,
'item_description_fields': ['item_description', 'item_genres'],
'alpha_item': 1,
}
},
"JSRMFSPARSE_eval2000Active-uni1000_pt3-244-000": {
"checkpoint_file": "saved/JOINTSRMFSPARSE-Jul-26-2021_10-19-47.pth",
"model_name": "JOINTSRMFSPARSE",
"dataset_name": "KITT_goodreads_rated_all",
"config_dict": {'data_path': '/GW/PSR/work/datasets/recbole_datasets/',
'eval_setting': 'RO_RS,uni1000',
'valid_metric': 'NDCG@10',
'group_by_user': True,
'training_neg_sample_num': 8,
'train_batch_size': 4096,
'monitor': False,
'benchmark_filename': ['train-2000activeP50N0R30-restPos-pt3-244-000', 'valid-2000activeP50N0R30-pt3-244-000', 'test-2000activeP50N0R30-pt3-244-000'],
'debug_variant': 1,
'number_of_reviews_to_use_item': None,
'item_description_fields': ['item_description', 'item_genres'],
'alpha_item': 1,
}
},
"JSRMFSPARSE_eval2000Active-uni1000_pt3-145-000": {
"checkpoint_file": "saved/JOINTSRMFSPARSE-Jul-26-2021_10-16-03.pth",
"model_name": "JOINTSRMFSPARSE",
"dataset_name": "KITT_goodreads_rated_all",
"config_dict": {'data_path': '/GW/PSR/work/datasets/recbole_datasets/',
'eval_setting': 'RO_RS,uni1000',
'valid_metric': 'NDCG@10',
'group_by_user': True,
'training_neg_sample_num': 8,
'train_batch_size': 4096,
'monitor': False,
'benchmark_filename': ['train-2000activeP50N0R30-restPos-pt3-145-000', 'valid-2000activeP50N0R30-pt3-145-000', 'test-2000activeP50N0R30-pt3-145-000'],
'debug_variant': 1,
'number_of_reviews_to_use_item': None,
'item_description_fields': ['item_description', 'item_genres'],
'alpha_item': 1,
}
},
"JSRMFSPARSE_eval1000Active-uni1000_pt3-244-000_onlyOtherUsers-91-00": {
"checkpoint_file": "saved/JOINTSRMFSPARSE-Aug-03-2021_14-10-44.pth",
"model_name": "JOINTSRMFSPARSE",
"dataset_name": "KITT_goodreads_rated_all",
"config_dict": {'data_path': '/GW/PSR/work/datasets/recbole_datasets/',
'eval_setting': 'RO_RS,uni1000',
'valid_metric': 'NDCG@10',
'group_by_user': True,
'training_neg_sample_num': 8,
'train_batch_size': 4096,
'monitor': False,
'benchmark_filename': ['train-1000activeP50N0R30-onlyOtherUsersPos-pt3-244-000-train-91-00',
'train-1000activeP50N0R30-onlyOtherUsersPos-pt3-244-000-valid-91-00',
'test-1000activeP50N0R30-pt3-244-000'],
'debug_variant': 1,
'number_of_reviews_to_use_item': None,
'item_description_fields': ['item_description', 'item_genres'],
'alpha_item': 1,
}
},
"JSRMFSPARSE_eval1000Active-uni1000_pt3-145-000_onlyOtherUsers-91-00": {
"checkpoint_file": "saved/JOINTSRMFSPARSE-Aug-03-2021_13-27-59.pth",
"model_name": "JOINTSRMFSPARSE",
"dataset_name": "KITT_goodreads_rated_all",
"config_dict": {'data_path': '/GW/PSR/work/datasets/recbole_datasets/',
'eval_setting': 'RO_RS,uni1000',
'valid_metric': 'NDCG@10',
'group_by_user': True,
'training_neg_sample_num': 8,
'train_batch_size': 4096,
'monitor': False,
'benchmark_filename': ['train-1000activeP50N0R30-onlyOtherUsersPos-pt3-145-000-train-91-00',
'train-1000activeP50N0R30-onlyOtherUsersPos-pt3-145-000-valid-91-00',
'test-1000activeP50N0R30-pt3-145-000'],
'debug_variant': 1,
'number_of_reviews_to_use_item': None,
'item_description_fields': ['item_description', 'item_genres'],
'alpha_item': 1,
}
},
"JSRMFSPARSE_eval2000Active-uni1000_pt3-244-000_onlyOtherUsers-91-00": {
"checkpoint_file": "saved/JOINTSRMFSPARSE-Aug-03-2021_14-07-10.pth",
"model_name": "JOINTSRMFSPARSE",
"dataset_name": "KITT_goodreads_rated_all",
"config_dict": {'data_path': '/GW/PSR/work/datasets/recbole_datasets/',
'eval_setting': 'RO_RS,uni1000',
'valid_metric': 'NDCG@10',
'group_by_user': True,
'training_neg_sample_num': 8,
'train_batch_size': 4096,
'monitor': False,
'benchmark_filename': ['train-2000activeP50N0R30-onlyOtherUsersPos-pt3-244-000-train-91-00',
'train-2000activeP50N0R30-onlyOtherUsersPos-pt3-244-000-valid-91-00',
'test-2000activeP50N0R30-pt3-244-000'],
'debug_variant': 1,
'number_of_reviews_to_use_item': None,
'item_description_fields': ['item_description', 'item_genres'],
'alpha_item': 1,
}
},
"JSRMFSPARSE_eval2000Active-uni1000_pt3-145-000_onlyOtherUsers-91-00": {
"checkpoint_file": "saved/JOINTSRMFSPARSE-Aug-03-2021_14-01-29.pth",
"model_name": "JOINTSRMFSPARSE",
"dataset_name": "KITT_goodreads_rated_all",
"config_dict": {'data_path': '/GW/PSR/work/datasets/recbole_datasets/',
'eval_setting': 'RO_RS,uni1000',
'valid_metric': 'NDCG@10',
'group_by_user': True,
'training_neg_sample_num': 8,
'train_batch_size': 4096,
'monitor': False,
'benchmark_filename': ['train-2000activeP50N0R30-onlyOtherUsersPos-pt3-145-000-train-91-00',
'train-2000activeP50N0R30-onlyOtherUsersPos-pt3-145-000-valid-91-00',
'test-2000activeP50N0R30-pt3-145-000'],
'debug_variant': 1,
'number_of_reviews_to_use_item': None,
'item_description_fields': ['item_description', 'item_genres'],
'alpha_item': 1,
}
},
}
class BM25vec(GeneralRecommender):
input_type = InputType.POINTWISE
type = ModelType.TRADITIONAL
def __init__(self, config, dataset):
super(BM25vec, self).__init__(config, dataset)
rec_model = config["saved_rec_model"]
topk = config["inferred_lm_topk_w"]
item_description_fields = config['item_description_fields']
max_number_of_reviews_item_lm = config['number_of_reviews_to_use_item']
user_profile_fields = config['user_profile_fields']
max_number_of_reviews_user_lm = config['number_of_reviews_to_use_user']
self.k1 = config["k1"]
self.b = config["b"]
self.c = config["c"]
self.use_sparse = config["use_sparse"]
step = config["rec_model_load_step"]
if step is None:
step = 200000
if rec_model is None and item_description_fields is None:
print("Should specify rec_model or item_description_fields")
exit(-1)
elif rec_model is not None and item_description_fields is not None:
print("Give either rec_model or item_description_fields, not both")
exit(-1)
if user_profile_fields is None:
print("user_profile_fields should be given!")
exit(-1)
self.n_items = dataset.item_num
self.n_users = dataset.user_num
# load background idf
print("Loading background corpus")
s = time.time()
background_idf_temp = {}
jnius_config.set_classpath(get_fat_jar())
indexcorpus = open('background_corpus_path', 'r').read().strip()
from jnius import autoclass
JFile = autoclass("java.io.File")
JFSDirectory = autoclass("org.apache.lucene.store.FSDirectory")
fsdir = JFSDirectory.open(JFile(indexcorpus).toPath())
reader = autoclass("org.apache.lucene.index.DirectoryReader").open(fsdir)
numdocs = reader.numDocs()
JTerm = autoclass("org.apache.lucene.index.Term")
# numterms = self.reader.getSumTotalTermFreq("contents")
print(f"done {time.time()-s}")
# create query/user LM:
print("Creating user lm")
s = time.time()
uid_term_frequencies = {}
self.uid_len = {}
# self.uid_termprobs = {}
num_of_used_revs = {}
if "review" in user_profile_fields:
# first we want to only load reviews that are in the training set so we specify those:
training_set = {}
for i in range(len(dataset.dataset.inter_feat["user_id"])):
uid = int(dataset.dataset.inter_feat["user_id"][i])
iid = int(dataset.dataset.inter_feat["item_id"][i])
if uid not in training_set:
training_set[uid] = set()
training_set[uid].add(iid)
user_fields = [3]
inter_file = os.path.join(dataset.dataset.dataset_path, f"{dataset.dataset.dataset_name}.inter")
with open(inter_file, 'r') as infile:
next(infile)
for line in infile:
split = line.split("\t")
user_id = dataset.token2id_exists("user_id", split[0])
item_id = dataset.token2id_exists("item_id", split[1])
if item_id == -1 or user_id == -1:
continue
if item_id == 0 or user_id == 0:
print("Isnt that padding?")
if user_id not in training_set:
continue
if item_id not in training_set[user_id]:
continue
if user_id not in num_of_used_revs:
num_of_used_revs[user_id] = 0
if max_number_of_reviews_user_lm is not None and num_of_used_revs[user_id] >= max_number_of_reviews_user_lm:
continue
if user_id not in uid_term_frequencies:
uid_term_frequencies[user_id] = {}
self.uid_len[user_id] = 0
for fi in user_fields:
desc = split[fi]
if len(desc) > 1:
num_of_used_revs[user_id] += 1
for term in desc.split():
if term not in uid_term_frequencies[user_id]:
uid_term_frequencies[user_id][term] = 1
else:
uid_term_frequencies[user_id][term] += 1
self.uid_len[user_id] += 1
# bg idf:
if term not in background_idf_temp:
jterm = JTerm("contents", term)
df = reader.docFreq(jterm)
background_idf_temp[term] = np.log10((numdocs - df + 0.5) / (df + 0.5))
# for user_id in self.uid_term_frequencies.keys():
# self.uid_termprobs[user_id] = {k: (v/self.uid_len[user_id]) for k, v in self.uid_term_frequencies[user_id]}
# TODO extend this for KITT users... from ..user files
self.term_idx = {}
self.background_idf = torch.zeros(len(background_idf_temp.keys()))
idx = 0
for t, idf in background_idf_temp.items():
self.term_idx[t] = idx
self.background_idf[idx] = idf
idx += 1
self.background_idf.to(device=self.device)
if self.use_sparse:
indices = [[0], [0]]
values = [0]
for user_id in uid_term_frequencies.keys():
for t, v in uid_term_frequencies[user_id].items():
indices[0].append(user_id)
indices[1].append(self.term_idx[t])
values.append(v)
self.uid_term_frequencies = SparseTensor(row=torch.tensor(indices[0], dtype=torch.long), col=torch.tensor(indices[1], dtype=torch.long), value=torch.tensor(values), sparse_sizes=(self.n_users, len(self.background_idf)))
self.uid_term_frequencies.to(self.device)
else:
self.uid_term_frequencies = torch.zeros((self.n_users, len(self.background_idf)), device=self.device)
for user in uid_term_frequencies:
for t, v in uid_term_frequencies[user].items():
self.uid_term_frequencies[user][self.term_idx[t]] = v
print(f"done {time.time()-s}")
# item lm:
doc_tf = {}
self.doc_len = torch.zeros(self.n_items, device=self.device)
# create item LM (inferred):
if rec_model is not None:
print("Creating inferred item lm")
s = time.time()
checkpoint_file = REC_MODELS[rec_model]["checkpoint_file"]
model_name = REC_MODELS[rec_model]["model_name"]
dataset_name = REC_MODELS[rec_model]["dataset_name"]
config_dict = REC_MODELS[rec_model]["config_dict"]
rec_model = ItemLM(checkpoint_file, model_name, dataset_name, k=topk, step=step, config_dict=config_dict)
inferred_lm = rec_model.get_lm()
for i in range(1, len(dataset.dataset.item_feat)):
item_id = dataset.dataset.item_feat["item_id"][i]
item_url_rid = dataset.dataset.item_feat["item_url"][i]
item_url = dataset.id2token("item_url", item_url_rid)
if item_url in inferred_lm:
doc_tf[item_id] = inferred_lm[item_url]
# doc_tf[item_id] = {inferred_lm[item_url][0][j]: inferred_lm[item_url][1][j] for j in range(len(inferred_lm[item_url][0]))}
self.doc_len[item_id] = sum(inferred_lm[item_url][1])
else:
doc_tf[item_id] = {([],[])}
self.doc_len[item_id] = 0
print(f"{time.time() - s}")
if self.use_sparse:
indices = [[0], [0]]
values = [0]
for item_id in doc_tf.keys():
for i in range(len(doc_tf[item_id][0])):
t = doc_tf[item_id][0][i]
v = doc_tf[item_id][1][i]
if t in self.term_idx:
indices[0].append(item_id)
indices[1].append(self.term_idx[t])
values.append(v)
self.doc_tf = SparseTensor(row=torch.tensor(indices[0], dtype=torch.long), col=torch.tensor(indices[1], dtype=torch.long), value=torch.tensor(values), sparse_sizes=(self.n_items, len(self.background_idf)))
self.doc_tf.to(self.device)
else:
self.doc_tf = torch.zeros((self.n_items, len(self.background_idf)), device=self.device)
for item in doc_tf:
for i in range(len(doc_tf[item_id][0])):
t = doc_tf[item_id][0][i]
v = doc_tf[item_id][1][i]
if t in self.term_idx:
self.doc_tf[item][self.term_idx[t]] = v
# OR create item LM statistical:
elif item_description_fields is not None:
print("Creating item lm")
item_desc_fields = []
if "item_description" in item_description_fields:
item_desc_fields.append(3)
if "item_genres" in item_description_fields:
item_desc_fields.append(4)
if "tags" in item_description_fields:
item_desc_fields.append(4)
if len(item_desc_fields) > 0:
item_LM_file = os.path.join(dataset.dataset.dataset_path, f"{dataset.dataset.dataset_name}.item")
with open(item_LM_file, 'r') as infile:
next(infile)
for line in infile:
split = line.split("\t")
item_id = dataset.token2id_exists("item_id", split[0])
if item_id == -1:
print(item_id)
continue
if item_id == 0:
print("Isnt that padding?")
if item_id not in doc_tf:
doc_tf[item_id] = {}
self.doc_len[item_id] = 0
for fi in item_desc_fields:
if fi >= len(split):
print(split)
continue
desc = split[fi]
for term in desc.split():
if term not in doc_tf[item_id]:
doc_tf[item_id][term] = 1
else:
doc_tf[item_id][term] += 1
self.doc_len[item_id] += 1
num_of_used_revs = {}
if "review" in item_description_fields:
# first we want to only load reviews that are in the training set so we specify those:
training_set = {}
for i in range(len(dataset.dataset.inter_feat["user_id"])):
uid = int(dataset.dataset.inter_feat["user_id"][i])
iid = int(dataset.dataset.inter_feat["item_id"][i])
if uid not in training_set:
training_set[uid] = set()
training_set[uid].add(iid)
item_desc_fields = [3]
item_LM_file = os.path.join(dataset.dataset.dataset_path, f"{dataset.dataset.dataset_name}.inter")
with open(item_LM_file, 'r') as infile:
next(infile)
for line in infile:
split = line.split("\t")
user_id = dataset.token2id_exists("user_id", split[0])
item_id = dataset.token2id_exists("item_id", split[1])
if item_id == -1 or user_id == -1:
continue
if item_id == 0 or user_id == 0:
print("Isnt that padding?")
if user_id not in training_set:
continue
if item_id not in training_set[user_id]:
continue
if item_id not in num_of_used_revs:
num_of_used_revs[item_id] = 0
if max_number_of_reviews_item_lm is not None and num_of_used_revs[item_id] >= max_number_of_reviews_item_lm:
continue
if item_id not in doc_tf:
doc_tf[item_id] = {}
self.doc_len[item_id] = 0
for fi in item_desc_fields:
desc = split[fi]
if len(desc) > 1:
num_of_used_revs[item_id] += 1
for term in desc.split():
if term not in doc_tf[item_id]:
doc_tf[item_id][term] = 1
else:
doc_tf[item_id][term] += 1
self.doc_len[item_id] += 1
if self.use_sparse:
indices = [[0], [0]]
values = [0]
for item_id in doc_tf.keys():
for t, v in doc_tf[item_id].items():
if t in self.term_idx:
indices[0].append(item_id)
indices[1].append(self.term_idx[t])
values.append(v)
self.doc_tf = SparseTensor(row=torch.tensor(indices[0], dtype=torch.long), col=torch.tensor(indices[1], dtype=torch.long), value=torch.tensor(values), sparse_sizes=(self.n_items, len(self.background_idf)))
self.doc_tf.to(self.device)
else:
self.doc_tf = torch.zeros((self.n_items, len(self.background_idf)), device=self.device)
for item in doc_tf:
for t, v in doc_tf[item].items():
if t in self.term_idx:
self.doc_tf[item][self.term_idx[t]] = v
self.average_doc_len = self.doc_len.sum()/self.doc_len.shape[0]
print(self.average_doc_len)
print(f"done {time.time()-s}")
self.fake_loss = torch.nn.Parameter(torch.zeros(1)) # alaki yek parameter tarif mikonim why?
def calculate_loss(self, interaction):
pass
return torch.nn.Parameter(torch.zeros(1))
def predict(self, interaction):
users = interaction[self.USER_ID]
items = interaction[self.ITEM_ID]
# return a list of scores wrt the user item pairs
if self.use_sparse:
try:
doctf = self.doc_tf[items].to_dense()
except:
print(items)
print(self.n_items)
print(self.doc_tf[items])
exit(-1)
qtf = self.uid_term_frequencies[users].to_dense()
else:
doctf = self.doc_tf[items]
qtf = self.uid_term_frequencies[users]
numerator = doctf * (self.k1 + 1)
t = self.k1 * (1 - self.b + self.b * (self.doc_len[items] / self.average_doc_len))
t = t.unsqueeze(1)
denominator = doctf + t
doctf = numerator / denominator
if self.c is not None:
qtf = (qtf * (self.c + 1)) / (qtf + self.c)
ret = self.background_idf * doctf * qtf
return ret.sum(1)
| [
"torch.zeros",
"torch.tensor"
] | 1.7.0 | ghazalehnt/RecBole | f1219847005e2c8d72b8c3cd5c49a138fe83276d |
0.4 | #
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Loggers frontends and backends.
- DataLogger is the generic logger interface.
- PythonLogger logs using the Python logger.
- TensorBoardLogger logs to files that can be read by Google's TensorBoard.
- CsvLogger logs to CSV files.
Note that not all loggers implement all logging methods.
"""
import torch
import numpy as np
import tabulate
import distiller
from distiller.utils import density, sparsity, sparsity_2D, size_to_str, to_np
# TensorBoard logger
from .tbbackend import TBBackend
# Visdom logger
from torchnet.logger import VisdomPlotLogger, VisdomLogger
import csv
import logging
msglogger = logging.getLogger()
__all__ = ['PythonLogger', 'TensorBoardLogger', 'CsvLogger']
class DataLogger(object):
"""This is an abstract interface for data loggers
Data loggers log the progress of the training process to some backend.
This backend can be a file, a web service, or some other means to collect and/or
display the training
"""
def __init__(self):
pass
def log_training_progress(self, model, epoch, i, set_size, batch_time, data_time, classerr, losses, print_freq, collectors):
raise NotImplementedError
def log_activation_sparsity(self, activation_sparsity, logcontext):
raise NotImplementedError
def log_weights_sparsity(self, model, epoch):
raise NotImplementedError
def log_weights_distribution(self, named_params, steps_completed):
pass
class PythonLogger(DataLogger):
def __init__(self, logger):
super(PythonLogger, self).__init__()
self.pylogger = logger
def log_training_progress(self, stats_dict, epoch, completed, total, freq):
stats_dict = stats_dict[1]
if epoch > -1:
log = 'Epoch: [{}][{:5d}/{:5d}] '.format(epoch, completed, int(total))
else:
log = 'Test: [{:5d}/{:5d}] '.format(completed, int(total))
for name, val in stats_dict.items():
if isinstance(val, int):
log = log + '{name} {val} '.format(name=name, val=distiller.pretty_int(val))
else:
log = log + '{name} {val:.6f} '.format(name=name, val=val)
self.pylogger.info(log)
def log_activation_sparsity(self, activation_sparsity, logcontext):
data = []
for layer, sparsity in activation_sparsity.items():
data.append([layer, sparsity*100])
t = tabulate.tabulate(data, headers=['Layer', 'sparsity (%)'], tablefmt='psql', floatfmt=".1f")
msglogger.info('\n' + t)
def log_weights_sparsity(self, model, epoch):
t, total = distiller.weights_sparsity_tbl_summary(model, return_total_sparsity=True)
msglogger.info("\nParameters:\n" + str(t))
msglogger.info('Total sparsity: {:0.2f}\n'.format(total))
class TensorBoardLogger(DataLogger):
def __init__(self, logdir):
super(TensorBoardLogger, self).__init__()
# Set the tensorboard logger
self.tblogger = TBBackend(logdir)
print('\n--------------------------------------------------------')
print('Logging to TensorBoard - remember to execute the server:')
print('> tensorboard --logdir=\'./logs\'\n')
# Hard-code these preferences for now
self.log_gradients = False # True
self.logged_params = ['weight'] # ['weight', 'bias']
def log_training_progress(self, stats_dict, epoch, completed, total, freq):
def total_steps(total, epoch, completed):
return total*epoch + completed
prefix = stats_dict[0]
stats_dict = stats_dict[1]
for tag, value in stats_dict.items():
self.tblogger.scalar_summary(prefix+tag, value, total_steps(total, epoch, completed))
self.tblogger.sync_to_file()
def log_activation_sparsity(self, activation_sparsity, epoch):
group = 'sparsity/activations/'
for tag, value in activation_sparsity.items():
self.tblogger.scalar_summary(group+tag, value, epoch)
self.tblogger.sync_to_file()
def log_weights_sparsity(self, model, epoch):
params_size = 0
sparse_params_size = 0
for name, param in model.state_dict().items():
if param.dim() in [2, 4]:
_density = density(param)
params_size += torch.numel(param)
sparse_params_size += param.numel() * _density
self.tblogger.scalar_summary('sparsity/weights/' + name,
sparsity(param)*100, epoch)
self.tblogger.scalar_summary('sparsity-2D/weights/' + name,
sparsity_2D(param)*100, epoch)
self.tblogger.scalar_summary("sprasity/weights/total", 100*(1 - sparse_params_size/params_size), epoch)
self.tblogger.sync_to_file()
def log_weights_distribution(self, named_params, steps_completed):
if named_params is None:
return
for tag, value in named_params:
tag = tag.replace('.', '/')
if any(substring in tag for substring in self.logged_params):
self.tblogger.histogram_summary(tag, to_np(value), steps_completed)
if self.log_gradients:
self.tblogger.histogram_summary(tag+'/grad', to_np(value.grad), steps_completed)
self.tblogger.sync_to_file()
class CsvLogger(DataLogger):
def __init__(self, fname):
super(CsvLogger, self).__init__()
self.fname = fname
def log_weights_sparsity(self, model, epoch):
with open(self.fname, 'w') as csv_file:
params_size = 0
sparse_params_size = 0
writer = csv.writer(csv_file)
# write the header
writer.writerow(['parameter', 'shape', 'volume', 'sparse volume', 'sparsity level'])
for name, param in model.state_dict().items():
if param.dim() in [2, 4]:
_density = density(param)
params_size += torch.numel(param)
sparse_params_size += param.numel() * _density
writer.writerow([name, size_to_str(param.size()),
torch.numel(param),
int(_density * param.numel()),
(1-_density)*100])
| [
"torch.numel"
] | 0.4.0 | amishacorns/dnn-quant-ocs | a43b9f101dbf95e034c404f89162ce0082e12ecf |
1.2 | #
# File: odesolver.py
#
import abc
import torch
class Euler:
@staticmethod
def step_func(func, t, dt, y, u, transforms=None):
return tuple(dt * f_ for f_ in func(t, y, u=u))
@property
def order(self):
return 1
class Midpoint:
@staticmethod
def step_func(func, t, dt, y, u, transforms=None):
y_mid = tuple(y_ + f_ * dt / 2 for y_, f_ in zip(y, func(t, y, u=u)))
y_mid = tuple(trans(y_) for y_, trans in zip(y_mid, transforms))
return tuple(dt * f_ for f_ in func(t + dt / 2, y_mid, u=u))
@property
def order(self):
return 2
class RK4:
@staticmethod
def step_func(func, t, dt, y, u, transforms=None):
return rk4_alt_step_func(func, t, dt, y, u=u)
@property
def order(self):
return 4
def rk4_alt_step_func(func, t, dt, y, k1=None, u=None):
"""Smaller error with slightly more compute."""
if k1 is None:
k1 = func(t, y, u=u)
k2 = func(t + dt / 3, tuple(y_ + dt * k1_ / 3 for y_, k1_ in zip(y, k1)), u=u)
k3 = func(t + dt * 2 / 3,
tuple(y_ + dt * (k1_ / -3 + k2_) for y_, k1_, k2_ in zip(y, k1, k2)), u=u)
k4 = func(t + dt,
tuple(y_ + dt * (k1_ - k2_ + k3_) for y_, k1_, k2_, k3_ in zip(y, k1, k2, k3)), u=u)
return tuple((k1_ + 3 * k2_ + 3 * k3_ + k4_) * (dt / 8)
for k1_, k2_, k3_, k4_ in zip(k1, k2, k3, k4))
def odestep(func, t, dt, y0, u=None, method='midpoint', transforms=None):
tensor_input, func, y0, t = _check_inputs(func, y0, t)
if transforms is None:
transforms = [lambda x: x for _ in range(len(y0))]
dy = SOLVERS[method].step_func(func, t, dt, y0, u=u, transforms=transforms)
y = tuple(trans(y0_ + dy_) for y0_, dy_, trans in zip(y0, dy, transforms))
if tensor_input:
y = y[0]
return y
SOLVERS = {
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
}
def _check_inputs(func, y0, t):
tensor_input = False
if torch.is_tensor(y0):
tensor_input = True
y0 = (y0,)
_base_nontuple_func_ = func
func = lambda t, y, u: (_base_nontuple_func_(t, y[0], u),)
assert isinstance(y0, tuple), 'y0 must be either a torch.Tensor or a tuple'
for y0_ in y0:
assert torch.is_tensor(y0_), 'each element must be a torch.Tensor but received {}'.format(
type(y0_))
for y0_ in y0:
if not torch.is_floating_point(y0_):
raise TypeError('`y0` must be a floating point Tensor but is a {}'.format(y0_.type()))
if not torch.is_floating_point(t):
raise TypeError('`t` must be a floating point Tensor but is a {}'.format(t.type()))
return tensor_input, func, y0, t
| [
"torch.is_tensor",
"torch.is_floating_point"
] | 1.2 | sisl/CEEM | 6154587fe3cdb92e8b7f70eedb1262caa1553cc8 |
1.2 | #
# File: smoother.py
#
import numpy as np
import torch
from numpy.random import choice
from scipy.optimize import least_squares, minimize
from torch.distributions.multivariate_normal import MultivariateNormal
from ceem import utils
from ceem.opt_criteria import GroupSOSCriterion, STRStateCriterion
from tqdm import tqdm
def NLSsmoother(x0, criterion, system, solver_kwargs={'verbose': 2}):
"""
Smoothing with Gauss-Newton based approach
Args:
x0 (torch.tensor): (1,T,n) system states
criterion (SOSCriterion): criterion to optimize
system (DiscreteDynamicalSystem)
solver_kwargs : options for scipy.optimize.least_squares
"""
if 'tr_rho' in solver_kwargs:
tr_rho = solver_kwargs.pop('tr_rho')
criterion = GroupSOSCriterion([criterion, STRStateCriterion(tr_rho, x0)])
B, T, xdim = x0.shape
assert B == 1, f"Smoothing one trajectory at a time. x0.shape[0] is {B} but should be 1."
def loss(x):
with torch.no_grad():
x = torch.tensor(x).view(1, T, xdim).to(x0.dtype)
loss = criterion.residuals(system, x)
return loss.numpy()
def jac(x):
x = torch.tensor(x).view(1, T, xdim)
return criterion.jac_resid_x(system, x, sparse=True)
with utils.Timer() as time:
kwargs = dict(method='trf', loss='linear')
kwargs.update(solver_kwargs)
opt_result = least_squares(loss, x0.view(-1).detach().numpy(), jac, **kwargs)
x = torch.tensor(opt_result.x).view(1, T, xdim)
metrics = {'fun': float(opt_result.cost), 'success': opt_result.success, 'time': time.dt}
return x, metrics
def EKF(x0, y_T, u_T, sigma0, Q, R, system):
"""
Extended Kalman filter
Args:
x0 (torch.tensor): (B, xdim) initial system states
y_T (torch.tensor): (B, T, ydim) observations
u_T (torch.tensor): (B, T, udim) controls
sigma0 (torch.tensor): (xdim, xdim) initial state covariance
dyn_err (torch.tensor): (xdim) dynamics error mean
obs_err (torch.tensor): (xdim) observation error mean
Q (torch.tensor): (xdim, xdim) dynamics error covariance
R (torch.tensor): (ydim, ydim) observation error covariance
system (DiscreteDynamicalSystem)
Returns:
x_filt (torch.tensor): (B, T, xdim) system states
y_pred (torch.tensor): (B, T, ydim) predicted observations before state correction
"""
xdim = Q.shape[0]
B, T, ydim = y_T.shape
I = torch.eye(xdim)
x = torch.zeros(B, T, xdim)
x[:, 0:1] = x0
y = y_T.clone()
with torch.no_grad():
y[:, 0:1] = system.observe(0, x[:, :1], u_T[:, :1])
St = torch.zeros(B, T, xdim, xdim)
St[:, 0] = sigma0.unsqueeze(0)
for t in tqdm(range(1, T)):
# Propagate dynamics
with torch.no_grad():
x[:, t:t + 1] = system.step(t - 1, x[:, t - 1:t], u_T[:, t - 1:t])
Gt = system.jac_step_x(t, x[:, t:t + 1], u_T[:, t:t + 1]).detach()
St_hat = Gt @ St[:, t - 1:t] @ Gt.transpose(-1, -2) + Q
# Estimate observation
with torch.no_grad():
y[:, t:t + 1] = system.observe(t, x[:, t:t + 1], u_T[:, t:t + 1])
Ht = system.jac_obs_x(t, x[:, t:t + 1], u_T[:, t:t + 1]).detach()
Zt = Ht @ St_hat @ Ht.transpose(-1, -2) + R
# Estimate Kalman Gain and correct xt
Kt = St_hat @ Ht.transpose(-1, -2) @ torch.inverse(Zt)
x[:, t:t + 1] = x[:, t:t + 1] + (Kt @ (
y_T[:, t:t + 1] - y[:, t:t + 1]).unsqueeze(-1)).squeeze(-1)
St[:, t:t + 1] = (I - Kt @ Ht) @ St_hat
return x, y
## Particle Smoother
class ParticleSmootherSystemWrapper:
def __init__(self, sys, R):
self._sys = sys
self._Rmvn = MultivariateNormal(torch.zeros(R.shape[0],), R)
def __call__(self, x, t):
"""
t (int): time
x (torch.tensor): (N,n) particles
"""
x = x.unsqueeze(1)
t = torch.tensor([float(t)])
nx = self._sys.step(t, x)
return nx.squeeze(1)
def obsll(self, x, y):
"""
x (torch.tensor): (N,n) particles
y (torch.tensor): (1,m) observation
"""
y_ = self._sys.observe(None, x.unsqueeze(1)).squeeze(1)
dy = y - y_
logprob_y = self._Rmvn.log_prob(dy).unsqueeze(1)
return logprob_y
@property
def _xdim(self):
return self._sys.xdim
class ParticleSmoother:
def __init__(self, N, system, obsll, Q, Px0, x0mean=None):
"""
Args:
N (int):
system (DiscreteDynamics):
obsfun (callable): function mapping ((*,xdim),(1,ydim) -> (*,[0,1])
Q (torch.tensor): (xdim,xdim) torch tensor
R (torch.tensor): (ydim,ydim) torch tensor
Px0 (torch.tensor): (xdim,xdim) torch tensor
x0mean (torch.tensor): (1,xdim) torch tensor
"""
self._N = N
self._system = system
self._obsll = obsll
self._xdim = system._xdim
self._Qchol = Q.cholesky().unsqueeze(0)
self._Qpdf = MultivariateNormal(torch.zeros(self._xdim), Q)
self._Px0chol = Px0.cholesky().unsqueeze(0)
if x0mean is not None:
self._x0mean = x0mean
else:
self._x0mean = torch.zeros(1, self._xdim)
self._xfilt = None
self._wfilt = None
self._wsmooth = None
def filter(self, y):
# inputs:
# y (torch.tensor): (T, ydim) torch tensor
T = y.shape[0]
x = torch.zeros(T, self._N, self._xdim)
w = torch.zeros(T, self._N, 1)
# sample initial distribution
x[0] = self._x0mean + (self._Px0chol @ torch.randn(self._N, self._xdim, 1)).squeeze(2)
for t in range(T - 1):
## Observe
log_wt = self._obsll(x[t], y[None,t])
## Update weights
# numerically stable computation of w
log_wt -= log_wt.max()
wt = log_wt.exp()
wt /= wt.sum()
# since we divide by wt.sum(), subtracting off log_wt.max()
# gives the same result
w[t] = wt
## Resample
rinds = choice(self._N, self._N, p=w[t, :, 0].detach().numpy())
xtr = x[t,rinds]
## Propegate
with torch.no_grad():
x[t + 1] = self._system(
xtr, t) + (self._Qchol @ torch.randn(self._N, self._xdim, 1)).squeeze(2)
log_wt = self._obsll(x[-1], y[None, -1])
log_wt -= log_wt.max()
wt = log_wt.exp()
wt /= wt.sum()
w[-1] = wt
return x, w
def smoother(self, x, w):
T, N, n = x.shape
## Compute p(xt+1|xt)
Tlogprobs = torch.zeros(T-1, N, N)
for t in range(T - 1):
with torch.no_grad():
xtp1_pred = self._system(x[t], t)
xtp1_diff = xtp1_pred.unsqueeze(1) - x[None, t + 1]
Tlogprobs[t] = self._Qpdf.log_prob(xtp1_diff.reshape(-1, n)).reshape(N, N)
# for numerical stability subtract the max
Tlogprobs -= Tlogprobs.max(1)[0].unsqueeze(1)
Tprobs = Tlogprobs.exp()
# compute v
v = (w[:-1] * Tprobs).sum(1)
# since Tprobs sum is in the denominator, subtracting Tlogprobs.max
# above gives the same result as not
# compute w_N by backward recursion
w_N = w.clone()
# sets w_N[-1] = w[-1]
for t in range(T - 1):
t = T - t - 2
w_N_t = w[t] * (w_N[t + 1] * Tprobs[t] / v[t].unsqueeze(1)).sum(1).unsqueeze(1)
if w_N_t.sum() > 0.:
# if no particles have weight just use the filtered weight
w_N[t] = w_N_t
# normalize weights
w_N[t] /= w_N[t].sum()
return w_N
def run(self, y):
x, w = self.filter(y)
w_N = self.smoother(x, w)
self._xfilt = x
self._wfilt = w
self._wsmooth = w_N
def get_smooth_mean(self):
x_mean = (self._xfilt * self._wsmooth).sum(1) / self._wsmooth.sum(1)
return x_mean
| [
"torch.zeros",
"torch.no_grad",
"torch.inverse",
"torch.eye",
"torch.tensor",
"torch.randn"
] | 1.2 | sisl/CEEM | 6154587fe3cdb92e8b7f70eedb1262caa1553cc8 |
0.4 | """
A stacked bidirectional LSTM with skip connections between layers.
"""
from typing import Optional, Tuple, List
import warnings
import torch
from torch.nn.utils.rnn import PackedSequence, pad_packed_sequence
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
import numpy
from allennlp.modules.lstm_cell_with_projection import LstmCellWithProjection
from allennlp.common.checks import ConfigurationError
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.common.file_utils import cached_path
class ElmoLstm(_EncoderBase):
"""
A stacked, bidirectional LSTM which uses
:class:`~allennlp.modules.lstm_cell_with_projection.LstmCellWithProjection`'s
with highway layers between the inputs to layers.
The inputs to the forward and backward directions are independent - forward and backward
states are not concatenated between layers.
Additionally, this LSTM maintains its `own` state, which is updated every time
``forward`` is called. It is dynamically resized for different batch sizes and is
designed for use with non-continuous inputs (i.e inputs which aren't formatted as a stream,
such as text used for a language modelling task, which is how stateful RNNs are typically used).
This is non-standard, but can be thought of as having an "end of sentence" state, which is
carried across different sentences.
Parameters
----------
input_size : ``int``, required
The dimension of the inputs to the LSTM.
hidden_size : ``int``, required
The dimension of the outputs of the LSTM.
cell_size : ``int``, required.
The dimension of the memory cell of the
:class:`~allennlp.modules.lstm_cell_with_projection.LstmCellWithProjection`.
num_layers : ``int``, required
The number of bidirectional LSTMs to use.
requires_grad: ``bool``, optional
If True, compute gradient of ELMo parameters for fine tuning.
recurrent_dropout_probability: ``float``, optional (default = 0.0)
The dropout probability to be used in a dropout scheme as stated in
`A Theoretically Grounded Application of Dropout in Recurrent Neural Networks
<https://arxiv.org/abs/1512.05287>`_ .
state_projection_clip_value: ``float``, optional, (default = None)
The magnitude with which to clip the hidden_state after projecting it.
memory_cell_clip_value: ``float``, optional, (default = None)
The magnitude with which to clip the memory cell.
"""
def __init__(self,
input_size: int,
hidden_size: int,
cell_size: int,
num_layers: int,
requires_grad: bool = False,
recurrent_dropout_probability: float = 0.0,
memory_cell_clip_value: Optional[float] = None,
state_projection_clip_value: Optional[float] = None) -> None:
super(ElmoLstm, self).__init__(stateful=True)
# Required to be wrapped with a :class:`PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.cell_size = cell_size
self.requires_grad = requires_grad
forward_layers = []
backward_layers = []
lstm_input_size = input_size
go_forward = True
for layer_index in range(num_layers):
forward_layer = LstmCellWithProjection(lstm_input_size,
hidden_size,
cell_size,
go_forward,
# 0.5 if layer_index == 0 else recurrent_dropout_probability,
recurrent_dropout_probability,
memory_cell_clip_value,
state_projection_clip_value)
backward_layer = LstmCellWithProjection(lstm_input_size,
hidden_size,
cell_size,
not go_forward,
# 0.5 if layer_index == 0 else recurrent_dropout_probability,
recurrent_dropout_probability,
memory_cell_clip_value,
state_projection_clip_value)
lstm_input_size = hidden_size
self.add_module('forward_layer_{}'.format(layer_index), forward_layer)
self.add_module('backward_layer_{}'.format(layer_index), backward_layer)
forward_layers.append(forward_layer)
backward_layers.append(backward_layer)
self.forward_layers = forward_layers
self.backward_layers = backward_layers
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
mask: torch.LongTensor) -> torch.Tensor:
"""
Parameters
----------
inputs : ``torch.Tensor``, required.
A Tensor of shape ``(batch_size, sequence_length, hidden_size)``.
mask : ``torch.LongTensor``, required.
A binary mask of shape ``(batch_size, sequence_length)`` representing the
non-padded elements in each sequence in the batch.
Returns
-------
A ``torch.Tensor`` of shape (num_layers, batch_size, sequence_length, hidden_size),
where the num_layers dimension represents the LSTM output from that layer.
"""
batch_size, total_sequence_length = mask.size()
stacked_sequence_output, final_states, restoration_indices = \
self.sort_and_run_forward(self._lstm_forward, inputs, mask)
num_layers, num_valid, returned_timesteps, encoder_dim = stacked_sequence_output.size()
# Add back invalid rows which were removed in the call to sort_and_run_forward.
if num_valid < batch_size:
zeros = stacked_sequence_output.new_zeros(num_layers,
batch_size - num_valid,
returned_timesteps,
encoder_dim)
stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 1)
# The states also need to have invalid rows added back.
new_states = []
for state in final_states:
state_dim = state.size(-1)
zeros = state.new_zeros(num_layers, batch_size - num_valid, state_dim)
new_states.append(torch.cat([state, zeros], 1))
final_states = new_states
# It's possible to need to pass sequences which are padded to longer than the
# max length of the sequence to a Seq2StackEncoder. However, packing and unpacking
# the sequences mean that the returned tensor won't include these dimensions, because
# the RNN did not need to process them. We add them back on in the form of zeros here.
sequence_length_difference = total_sequence_length - returned_timesteps
if sequence_length_difference > 0:
zeros = stacked_sequence_output.new_zeros(num_layers,
batch_size,
sequence_length_difference,
stacked_sequence_output[0].size(-1))
stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 2)
self._update_states(final_states, restoration_indices)
# Restore the original indices and return the sequence.
# Has shape (num_layers, batch_size, sequence_length, hidden_size)
return stacked_sequence_output.index_select(1, restoration_indices)
def _lstm_forward(self,
inputs: PackedSequence,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None) -> \
Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Parameters
----------
inputs : ``PackedSequence``, required.
A batch first ``PackedSequence`` to run the stacked LSTM over.
initial_state : ``Tuple[torch.Tensor, torch.Tensor]``, optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and
(num_layers, batch_size, 2 * cell_size) respectively.
Returns
-------
output_sequence : ``torch.FloatTensor``
The encoded sequence of shape (num_layers, batch_size, sequence_length, hidden_size)
final_states: ``Tuple[torch.FloatTensor, torch.FloatTensor]``
The per-layer final (state, memory) states of the LSTM, with shape
(num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size)
respectively. The last dimension is duplicated because it contains the state/memory
for both the forward and backward layers.
"""
if initial_state is None:
hidden_states: List[Optional[Tuple[torch.Tensor,
torch.Tensor]]] = [None] * len(self.forward_layers)
elif initial_state[0].size()[0] != len(self.forward_layers):
raise ConfigurationError("Initial states were passed to forward() but the number of "
"initial states does not match the number of layers.")
else:
hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))
inputs, batch_lengths = pad_packed_sequence(inputs, batch_first=True)
forward_output_sequence = inputs
backward_output_sequence = inputs
final_states = []
sequence_outputs = []
for layer_index, state in enumerate(hidden_states):
forward_layer = getattr(self, 'forward_layer_{}'.format(layer_index))
backward_layer = getattr(self, 'backward_layer_{}'.format(layer_index))
forward_cache = forward_output_sequence
backward_cache = backward_output_sequence
if state is not None:
forward_hidden_state, backward_hidden_state = state[0].split(self.hidden_size, 2)
forward_memory_state, backward_memory_state = state[1].split(self.cell_size, 2)
forward_state = (forward_hidden_state, forward_memory_state)
backward_state = (backward_hidden_state, backward_memory_state)
else:
forward_state = None
backward_state = None
forward_output_sequence, forward_state = forward_layer(forward_output_sequence,
batch_lengths,
forward_state)
backward_output_sequence, backward_state = backward_layer(backward_output_sequence,
batch_lengths,
backward_state)
# Skip connections, just adding the input to the output.
if layer_index != 0:
forward_output_sequence += forward_cache
backward_output_sequence += backward_cache
sequence_outputs.append(torch.cat([forward_output_sequence,
backward_output_sequence], -1))
# Append the state tuples in a list, so that we can return
# the final states for all the layers.
final_states.append((torch.cat([forward_state[0], backward_state[0]], -1),
torch.cat([forward_state[1], backward_state[1]], -1)))
stacked_sequence_outputs: torch.FloatTensor = torch.stack(sequence_outputs)
# Stack the hidden state and memory for each layer into 2 tensors of shape
# (num_layers, batch_size, hidden_size) and (num_layers, batch_size, cell_size)
# respectively.
final_hidden_states, final_memory_states = zip(*final_states)
final_state_tuple: Tuple[torch.FloatTensor,
torch.FloatTensor] = (torch.cat(final_hidden_states, 0),
torch.cat(final_memory_states, 0))
return stacked_sequence_outputs, final_state_tuple
def load_weights(self, weight_file: str) -> None:
"""
Load the pre-trained weights from the file.
"""
requires_grad = self.requires_grad
with h5py.File(cached_path(weight_file), 'r') as fin:
for i_layer, lstms in enumerate(
zip(self.forward_layers, self.backward_layers)
):
for j_direction, lstm in enumerate(lstms):
# lstm is an instance of LSTMCellWithProjection
cell_size = lstm.cell_size
dataset = fin['RNN_%s' % j_direction]['RNN']['MultiRNNCell']['Cell%s' % i_layer
]['LSTMCell']
# tensorflow packs together both W and U matrices into one matrix,
# but pytorch maintains individual matrices. In addition, tensorflow
# packs the gates as input, memory, forget, output but pytorch
# uses input, forget, memory, output. So we need to modify the weights.
tf_weights = numpy.transpose(dataset['W_0'][...])
torch_weights = tf_weights.copy()
# split the W from U matrices
input_size = lstm.input_size
input_weights = torch_weights[:, :input_size]
recurrent_weights = torch_weights[:, input_size:]
tf_input_weights = tf_weights[:, :input_size]
tf_recurrent_weights = tf_weights[:, input_size:]
# handle the different gate order convention
for torch_w, tf_w in [[input_weights, tf_input_weights],
[recurrent_weights, tf_recurrent_weights]]:
torch_w[(1 * cell_size):(2 * cell_size), :] = tf_w[(2 * cell_size):(3 * cell_size), :]
torch_w[(2 * cell_size):(3 * cell_size), :] = tf_w[(1 * cell_size):(2 * cell_size), :]
lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights))
lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights))
lstm.input_linearity.weight.requires_grad = requires_grad
lstm.state_linearity.weight.requires_grad = requires_grad
# the bias weights
tf_bias = dataset['B'][...]
# tensorflow adds 1.0 to forget gate bias instead of modifying the
# parameters...
tf_bias[(2 * cell_size):(3 * cell_size)] += 1
torch_bias = tf_bias.copy()
torch_bias[(1 * cell_size):(2 * cell_size)
] = tf_bias[(2 * cell_size):(3 * cell_size)]
torch_bias[(2 * cell_size):(3 * cell_size)
] = tf_bias[(1 * cell_size):(2 * cell_size)]
lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias))
lstm.state_linearity.bias.requires_grad = requires_grad
# the projection weights
proj_weights = numpy.transpose(dataset['W_P_0'][...])
lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights))
lstm.state_projection.weight.requires_grad = requires_grad
| [
"torch.cat",
"torch.stack",
"torch.FloatTensor",
"torch.nn.utils.rnn.pad_packed_sequence"
] | 0.4.0 | LiyuanLucasLiu/allennlp | da81516cbe78b58c2f2a3a9e56ef2526bd72fb9f |
1.7 |
import torch.nn as nn
class LeNet(nn.Module):
def __init__(self, out_dim=10, in_channel=1, img_sz=32, hidden_dim=500):
super(LeNet, self).__init__()
feat_map_sz = img_sz//4
self.n_feat = 50 * feat_map_sz * feat_map_sz
self.hidden_dim = hidden_dim
self.conv = nn.Sequential(
nn.Conv2d(in_channel, 20, 5, padding=2),
nn.BatchNorm2d(20),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(20, 50, 5, padding=2),
nn.BatchNorm2d(50),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Flatten(),
)
self.linear = nn.Sequential(
nn.Linear(self.n_feat, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True),
)
self.last = nn.Linear(hidden_dim, out_dim) # Subject to be replaced dependent on task
def features(self, x):
x = self.conv(x)
x = self.linear(x.view(-1, self.n_feat))
# x=self.linear(x)
return x
def logits(self, x):
x = self.last(x)
return x
def forward(self, x):
x = self.features(x)
x = self.logits(x)
return x
def LeNetC(out_dim=10, hidden_dim=500): # LeNet with color input
return LeNet(out_dim=out_dim, in_channel=3, img_sz=32, hidden_dim=hidden_dim) | [
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.Flatten"
] | 1.7.1 | lihr04/PCA-OGD | 196d03701f22110479af9c1feb619fef6fe1562b |
2 | from mmdet2trt.models.builder import register_wraper, build_wraper
import torch
from torch import nn
import torch.nn.functional as F
from .bbox_head import BBoxHeadWraper
@register_wraper("mmdet.models.roi_heads.bbox_heads.sabl_head.SABLHead")
class SABLHeadWraper(BBoxHeadWraper):
def __init__(self, module, test_cfg):
super(SABLHeadWraper, self).__init__(module, test_cfg)
def regress_by_class(self, rois, label, bbox_pred, img_shape):
module = self.module
if rois.size(1) == 4:
new_rois, _ = self.bbox_coder.decode(
rois.unsqueeze(0), [bb.unsqueeze(0) for bb in bbox_pred],
max_shape=img_shape)
else:
bboxes, _ = self.bbox_coder.decode(
rois[:,
1:].unsqueeze(0), [bb.unsqueeze(0) for bb in bbox_pred],
max_shape=img_shape)
new_rois = torch.cat((rois[:, 0:1], bboxes), dim=2)
new_rois = new_rois.squeeze(0)
return new_rois
def get_bboxes(self, rois, cls_score, bbox_pred, img_shape, batch_size,
num_proposals, cfg):
module = self.module
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=-1)
if rois.size(1) == 4:
bboxes, confids = self.bbox_coder.decode(
rois.unsqueeze(0), [bb.unsqueeze(0) for bb in bbox_pred],
max_shape=img_shape)
else:
bboxes, confids = self.bbox_coder.decode(
rois[:,
1:].unsqueeze(0), [bb.unsqueeze(0) for bb in bbox_pred],
max_shape=img_shape)
bboxes = bboxes.squeeze(0)
confids = confids.squeeze(0)
scores = scores * confids.unsqueeze(1)
scores = scores.view(batch_size, num_proposals, -1)
bboxes = bboxes.view(batch_size, num_proposals, -1, 4)
num_bboxes = bboxes.shape[1]
if bboxes.size(2) == module.num_classes:
bboxes_ext = bboxes[:, :, 0:1, :] * 0
bboxes = torch.cat([bboxes, bboxes_ext], 2)
else:
bboxes = bboxes.repeat(1, 1, module.num_classes + 1, 1)
num_detections, det_boxes, det_scores, det_classes = self.rcnn_nms(
scores, bboxes, num_bboxes, cfg.max_per_img)
return num_detections, det_boxes, det_scores, det_classes
| [
"torch.cat",
"torch.nn.functional.softmax"
] | 2 | jackweiwang/mmdetection-to-tensorrt | c31c32ee4720ff56010bcda77bacf3a110d0526c |
3 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
import torch.nn as nn
from pytorch3d import _C
from torch.autograd import Function
from torch.autograd.function import once_differentiable
class GraphConv(nn.Module):
"""A single graph convolution layer."""
def __init__(
self,
input_dim: int,
output_dim: int,
init: str = "normal",
directed: bool = False,
):
"""
Args:
input_dim: Number of input features per vertex.
output_dim: Number of output features per vertex.
init: Weight initialization method. Can be one of ['zero', 'normal'].
directed: Bool indicating if edges in the graph are directed.
"""
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.directed = directed
self.w0 = nn.Linear(input_dim, output_dim)
self.w1 = nn.Linear(input_dim, output_dim)
if init == "normal":
nn.init.normal_(self.w0.weight, mean=0, std=0.01)
nn.init.normal_(self.w1.weight, mean=0, std=0.01)
# pyre-fixme[16]: Optional type has no attribute `data`.
self.w0.bias.data.zero_()
self.w1.bias.data.zero_()
elif init == "zero":
self.w0.weight.data.zero_()
self.w1.weight.data.zero_()
else:
raise ValueError('Invalid GraphConv initialization "%s"' % init)
def forward(self, verts, edges):
"""
Args:
verts: FloatTensor of shape (V, input_dim) where V is the number of
vertices and input_dim is the number of input features
per vertex. input_dim has to match the input_dim specified
in __init__.
edges: LongTensor of shape (E, 2) where E is the number of edges
where each edge has the indices of the two vertices which
form the edge.
Returns:
out: FloatTensor of shape (V, output_dim) where output_dim is the
number of output features per vertex.
"""
if verts.is_cuda != edges.is_cuda:
raise ValueError("verts and edges tensors must be on the same device.")
if verts.shape[0] == 0:
# empty graph.
return verts.new_zeros((0, self.output_dim)) * verts.sum()
verts_w0 = self.w0(verts) # (V, output_dim)
verts_w1 = self.w1(verts) # (V, output_dim)
if torch.cuda.is_available() and verts.is_cuda and edges.is_cuda:
neighbor_sums = gather_scatter(verts_w1, edges, self.directed)
else:
neighbor_sums = gather_scatter_python(
verts_w1, edges, self.directed
) # (V, output_dim)
# Add neighbor features to each vertex's features.
out = verts_w0 + neighbor_sums
return out
def __repr__(self):
Din, Dout, directed = self.input_dim, self.output_dim, self.directed
return "GraphConv(%d -> %d, directed=%r)" % (Din, Dout, directed)
def gather_scatter_python(input, edges, directed: bool = False):
"""
Python implementation of gather_scatter for aggregating features of
neighbor nodes in a graph.
Given a directed graph: v0 -> v1 -> v2 the updated feature for v1 depends
on v2 in order to be consistent with Morris et al. AAAI 2019
(https://arxiv.org/abs/1810.02244). This only affects
directed graphs; for undirected graphs v1 will depend on both v0 and v2,
no matter which way the edges are physically stored.
Args:
input: Tensor of shape (num_vertices, input_dim).
edges: Tensor of edge indices of shape (num_edges, 2).
directed: bool indicating if edges are directed.
Returns:
output: Tensor of same shape as input.
"""
if not (input.dim() == 2):
raise ValueError("input can only have 2 dimensions.")
if not (edges.dim() == 2):
raise ValueError("edges can only have 2 dimensions.")
if not (edges.shape[1] == 2):
raise ValueError("edges must be of shape (num_edges, 2).")
num_vertices, input_feature_dim = input.shape
num_edges = edges.shape[0]
output = torch.zeros_like(input)
idx0 = edges[:, 0].view(num_edges, 1).expand(num_edges, input_feature_dim)
idx1 = edges[:, 1].view(num_edges, 1).expand(num_edges, input_feature_dim)
# pyre-fixme[16]: `Tensor` has no attribute `scatter_add`.
output = output.scatter_add(0, idx0, input.gather(0, idx1))
if not directed:
output = output.scatter_add(0, idx1, input.gather(0, idx0))
return output
class GatherScatter(Function):
"""
Torch autograd Function wrapper for gather_scatter C++/CUDA implementations.
"""
@staticmethod
def forward(ctx, input, edges, directed=False):
"""
Args:
ctx: Context object used to calculate gradients.
input: Tensor of shape (num_vertices, input_dim)
edges: Tensor of edge indices of shape (num_edges, 2)
directed: Bool indicating if edges are directed.
Returns:
output: Tensor of same shape as input.
"""
if not (input.dim() == 2):
raise ValueError("input can only have 2 dimensions.")
if not (edges.dim() == 2):
raise ValueError("edges can only have 2 dimensions.")
if not (edges.shape[1] == 2):
raise ValueError("edges must be of shape (num_edges, 2).")
if not (input.dtype == torch.float32):
raise ValueError("input has to be of type torch.float32.")
ctx.directed = directed
input, edges = input.contiguous(), edges.contiguous()
ctx.save_for_backward(edges)
backward = False
output = _C.gather_scatter(input, edges, directed, backward)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
grad_output = grad_output.contiguous()
edges = ctx.saved_tensors[0]
directed = ctx.directed
backward = True
grad_input = _C.gather_scatter(grad_output, edges, directed, backward)
grad_edges = None
grad_directed = None
return grad_input, grad_edges, grad_directed
# pyre-fixme[16]: `GatherScatter` has no attribute `apply`.
gather_scatter = GatherScatter.apply
| [
"torch.zeros_like",
"torch.nn.init.normal_",
"torch.cuda.is_available",
"torch.nn.Linear"
] | 3 | martinruenz/pytorch3d | 7f1e63aed1252ba8145d4a66ce2272331d60cdae |
1.6 | from typing import Optional, Any, cast
import gym
import gym_minigrid.minigrid
import numpy as np
import torch
from babyai.utils.format import InstructionsPreprocessor
from gym_minigrid.minigrid import MiniGridEnv
from allenact.base_abstractions.sensor import Sensor, prepare_locals_for_super
from allenact.base_abstractions.task import Task, SubTaskType
# fmt: off
ALL_VOCAB_TOKENS = [
"a", "after", "and", "ball", "behind", "blue", "box",
"door", "front", "go", "green", "grey", "in", "key",
"left", "next", "of", "on", "open", "pick", "purple",
"put", "red", "right", "the", "then", "to", "up", "yellow",
"you", "your",
]
# fmt: on
class EgocentricMiniGridSensor(Sensor[MiniGridEnv, Task[MiniGridEnv]]):
def __init__(
self,
agent_view_size: int,
view_channels: int = 1,
uuid: str = "minigrid_ego_image",
**kwargs: Any
):
self.agent_view_size = agent_view_size
self.view_channels = view_channels
self.num_objects = (
cast(
int, max(map(abs, gym_minigrid.minigrid.OBJECT_TO_IDX.values())) # type: ignore
)
+ 1
)
self.num_colors = (
cast(int, max(map(abs, gym_minigrid.minigrid.COLOR_TO_IDX.values()))) # type: ignore
+ 1
)
self.num_states = (
cast(int, max(map(abs, gym_minigrid.minigrid.STATE_TO_IDX.values()))) # type: ignore
+ 1
)
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
return gym.spaces.Box(
low=0,
high=max(self.num_objects, self.num_colors, self.num_states) - 1,
shape=(self.agent_view_size, self.agent_view_size, self.view_channels),
dtype=int,
)
def get_observation(
self,
env: MiniGridEnv,
task: Optional[SubTaskType],
*args,
minigrid_output_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> Any:
if minigrid_output_obs is not None and minigrid_output_obs["image"].shape == (
self.agent_view_size,
self.agent_view_size,
):
img = minigrid_output_obs["image"][:, :, : self.view_channels]
else:
env.agent_view_size = self.agent_view_size
img = env.gen_obs()["image"][:, :, : self.view_channels]
assert img.dtype == np.uint8
return img
class MiniGridMissionSensor(Sensor[MiniGridEnv, Task[MiniGridEnv]]):
def __init__(self, instr_len: int, uuid: str = "minigrid_mission", **kwargs: Any):
self.instr_preprocessor = InstructionsPreprocessor(
model_name="TMP_SENSOR", load_vocab_from=None
)
# We initialize the vocabulary with a fixed collection of tokens
# and then ensure that the size cannot exceed this number. This
# guarantees that sensors on all processes will produce the same
# values.
for token in ALL_VOCAB_TOKENS:
_ = self.instr_preprocessor.vocab[token]
self.instr_preprocessor.vocab.max_size = len(ALL_VOCAB_TOKENS)
self.instr_len = instr_len
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
return gym.spaces.Box(
low=0,
high=self.instr_preprocessor.vocab.max_size,
shape=(self.instr_len,),
dtype=int,
)
def get_observation(
self,
env: MiniGridEnv,
task: Optional[SubTaskType],
*args,
minigrid_output_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> Any:
if minigrid_output_obs is None:
minigrid_output_obs = env.gen_obs()
out = self.instr_preprocessor([minigrid_output_obs]).view(-1)
n: int = out.shape[0]
if n > self.instr_len:
out = out[: self.instr_len]
elif n < self.instr_len:
out = torch.nn.functional.pad(
input=out, pad=[0, self.instr_len - n], value=0,
)
return out.long().numpy()
| [
"torch.nn.functional.pad"
] | 1.6.0 | klemenkotar/dcrl | 457be7af1389db37ec12e165dfad646e17359162 |
1.4 | import collections
import itertools
import json
import os
import attr
import torch
import torch.nn.functional as F
import numpy as np
from tensor2struct.models import abstract_preproc, encoder, batched_encoder
from tensor2struct.modules import embedders, lstm, attention, permutation
from tensor2struct.utils import serialization, vocab, registry
import logging
logger = logging.getLogger("tensor2struct")
@attr.s
class EncoderState:
src_memory = attr.ib()
lengths = attr.ib()
# only useful for conventional attention-based decoding
src_summary = attr.ib(default=None)
# for lexical model
src_embedding = attr.ib(default=None)
# for debugging
permutation = attr.ib(default=None)
# heuristic loss e.g., constraining the latent permutations
enc_loss = attr.ib(default=None)
@registry.register("encoder", "latper_enc")
class LatPerEncoder(batched_encoder.Encoder):
batched = True
Preproc = encoder.EncPreproc
"""
Latent permuttion produced by syntax encoder +
semantic encoder ==> postorder encoder to obtain
the final representation
"""
def __init__(
self,
device,
preproc,
dropout=0.1,
word_emb_size=128,
recurrent_size=256,
num_heads=4,
use_native_lstm=True,
bert_version="bert-base-uncased",
syntax_encoder=("emb", "bilstm"),
semantic_encoder=("emb",),
postorder_encoder=None,
forward_relaxed=True,
gumbel_temperature=None,
use_map_decode=False,
):
super().__init__(
device=device,
preproc=preproc,
dropout=dropout,
word_emb_size=word_emb_size,
recurrent_size=recurrent_size,
encoder=syntax_encoder,
num_heads=num_heads,
use_native_lstm=use_native_lstm,
bert_version=bert_version,
)
# another encoder for obtain semantic info
self.semantic_encoder_modules = semantic_encoder
if self.semantic_encoder_modules is not None:
self.semantic_encoder = self._build_modules(self.semantic_encoder_modules)
self.postorder_encoder_modules = postorder_encoder
if postorder_encoder is not None:
self.postorder_encoder = self._build_modules(self.postorder_encoder_modules)
if self.postorder_encoder_modules:
self.last_enc_module = self.postorder_encoder_modules[-1]
elif self.semantic_encoder_modules:
self.last_enc_module = self.semantic_encoder_modules[-1]
else:
self.last_enc_module = self.encoder_modules[-1]
self.permutator = permutation.BinarizableTree(
device=device,
input_size=recurrent_size,
forward_relaxed=forward_relaxed,
gumbel_temperature=gumbel_temperature,
use_map_decode=use_map_decode,
dropout=dropout
)
def _pad(self, tokens_list):
"""
Add BOS and EOS to use LSTM-minus features
"""
res = []
for tokens in tokens_list:
res.append([vocab.BOS] + tokens + [vocab.EOS])
return res
def compute_encoding(self, tokens_list):
res = self.compute_encoding_batched(tokens_list)
# res = self.compute_encoding_unbatched(tokens_list)
return res
def extract_lstm_enc(self, src_enc, enc_module):
assert enc_module in ["bilstm", "unilstm"]
src_memory, lengths = src_enc.pad(batch_first=True)
bidirectional = enc_module == "bilstm"
src_summary = lstm.extract_last_hidden_state_batched(
src_memory, lengths, bidirectional=bidirectional
)
return src_memory, lengths, src_summary
def extract_trans_enc(self, src_enc, enc_module):
assert enc_module in ["transformer"]
raw_src_enc_memory, lengths = src_enc
# unpack CLS representation as the summary, recover original lengths
src_summary = raw_src_enc_memory[:, 0, :]
src_memory = raw_src_enc_memory[:, 1:, :]
for i in range(len(lengths)):
lengths[i] = lengths[i] - 1
return src_memory, lengths, src_summary
def extract_enc(self, src_enc, enc_module):
if enc_module in ["bilstm", "unilstm"]:
return self.extract_lstm_enc(src_enc, enc_module)
elif enc_module in ["transformer"]:
return self.extract_trans_enc(src_enc, enc_module)
elif enc_module in ["emb"]:
src_memory, lengths = src_enc.pad(batch_first=True)
return src_memory, lengths, None
def compute_encoding_batched(self, tokens_list):
"""
For syntax encoding, each sentence is padded with bos and eos to obtain
the LSTM-minus span-level features.
"""
# 1. obtain permutation from syntax representations
padded_tokens_list = self._pad(tokens_list)
syntax_src_enc = self.encoder(padded_tokens_list)
syntax_src_enc_batched, padded_lengths = syntax_src_enc.pad(batch_first=True)
if self.semantic_encoder_modules is None:
# 2.a baseline without any the reodering
permutation_matrix = None
permuted_memory = syntax_src_enc_batched
lengths = padded_lengths
else:
syntax_span_rep = lstm.SpanRepresentation(
syntax_src_enc_batched, padded_lengths
)
permutation_matrix, _ = self.permutator(
syntax_span_rep
) # use span_rep to handle bos and eos
# 2.b use permutation matrix to obtain reordered semantic representations
# optional: postorder encoder is applied after permutation
if self.postorder_encoder_modules:
preorder_src_enc = self.semantic_encoder(tokens_list)
postorder_input = preorder_src_enc.apply_raw(
lambda x: torch.bmm(permutation_matrix, x)
)
postorder_src_enc = self.postorder_encoder(postorder_input)
permuted_memory, lengths, src_summary = self.extract_enc(
postorder_src_enc, self.last_enc_module
)
else:
semantic_src_enc = self.semantic_encoder(tokens_list)
semantic_src_enc_batched, lengths, src_summary = self.extract_enc(
semantic_src_enc, self.last_enc_module
)
permuted_memory = torch.bmm(
permutation_matrix, semantic_src_enc_batched
)
# optional: check lengths
# span_rep.get_length() remove bos and eos
lengths = [int(l) for l in lengths] # tensor to int
for l1, l2 in zip(syntax_span_rep.get_lengths(), lengths):
assert l1 == l2
res = EncoderState(
src_memory=permuted_memory,
lengths=lengths,
src_summary=src_summary,
permutation=permutation_matrix,
)
return res
@registry.register("encoder", "ssnt_latper_enc")
class LatPerSSNTEncoder(LatPerEncoder):
batched = True
Preproc = encoder.EncPreproc
"""
Compared with latper_enc, this encoder
1. adds an additional EOS token at the end of every input utterance.
2. In addition, it also supports semantic dropout (not very effective).
3. support posteriro control of straignt/invert operations
"""
def __init__(
self,
device,
preproc,
dropout=0.1,
word_emb_size=128,
recurrent_size=256,
num_heads=4,
use_native_lstm=True,
bert_version="bert-base-uncased",
syntax_encoder=("emb", "bilstm"),
semantic_encoder=("emb",),
postorder_encoder=None,
forward_relaxed=True,
gumbel_temperature=None,
use_map_decode=False,
semantic_dropout=None,
):
super().__init__(
device,
preproc,
dropout=dropout,
word_emb_size=word_emb_size,
recurrent_size=recurrent_size,
num_heads=num_heads,
use_native_lstm=use_native_lstm,
bert_version=bert_version,
syntax_encoder=syntax_encoder,
semantic_encoder=semantic_encoder,
postorder_encoder=postorder_encoder,
forward_relaxed=forward_relaxed,
gumbel_temperature=gumbel_temperature,
use_map_decode=use_map_decode,
)
self.semantic_dropout = semantic_dropout
self.eos_emb = torch.nn.Parameter(torch.randn(word_emb_size).to(device))
def compute_encoding_batched(self, tokens_list):
"""
Add a special token at the end of each sentence
"""
padded_tokens_list = self._pad(tokens_list)
syntax_src_enc = self.encoder(padded_tokens_list)
syntax_src_enc_batched, padded_lengths = syntax_src_enc.pad(batch_first=True)
# 1. syntax rep
syntax_span_rep = lstm.SpanRepresentation(
syntax_src_enc_batched, padded_lengths
)
permutation_matrix, reorder_loss = self.permutator(
syntax_span_rep
) # use span_rep to handle bos and eos
# 2. use permutation matrix to obtain reordered semantic representations
assert self.postorder_encoder_modules
preorder_src_enc = self.semantic_encoder(tokens_list)
postorder_input = preorder_src_enc.apply_raw(
lambda x: torch.bmm(permutation_matrix, x)
)
# 3. add EOS to the permuted embedding
def add_eos(x):
padded_x, lengths = x.pad()
bs, _, rs = padded_x.size()
zero_pad = torch.zeros([bs, 1, rs]).to(padded_x.device)
x_with_zero_padded = torch.cat([padded_x, zero_pad], dim=1)
aux_t = torch.zeros_like(x_with_zero_padded)
for batch_idx, eos_idx in enumerate(lengths):
aux_t[batch_idx, eos_idx] = self.eos_emb
new_x = x_with_zero_padded + aux_t
# increase the sorted length of packed seq by 1
sorted_lengths = [length + 1 for length in x.lengths]
per_idx_t = torch.LongTensor(x.orig_to_sort).to(self._device)
per_data = new_x[per_idx_t]
new_ps = torch.nn.utils.rnn.pack_padded_sequence(
per_data, sorted_lengths, batch_first=True
)
return attr.evolve(x, ps=new_ps, lengths=sorted_lengths)
postorder_input_with_eos = add_eos(postorder_input)
# 4. apply postoder update
postorder_src_enc = self.postorder_encoder(postorder_input_with_eos)
permuted_memory, lengths, src_summary = self.extract_enc(
postorder_src_enc, self.last_enc_module
)
# 5. optional: apply semantic dropout
postorder_emb, _ = postorder_input_with_eos.pad(batch_first=True)
if self.training and self.semantic_dropout:
p_mask = self.semantic_dropout * torch.ones(permuted_memory.size()[:2]).to(
self._device
)
mask = torch.bernoulli(p_mask)
batch_mask = mask.unsqueeze(-1).expand(-1, -1, permuted_memory.size()[-1])
permuted_memory = permuted_memory * (1 - batch_mask) + postorder_emb * batch_mask
elif self.semantic_dropout == 1.0:
# if semantic_dropout is 1.0, we skip the postordering model
permuted_memory = postorder_emb
# optional: check lengths
# span_rep.get_length() remove bos and eos
lengths = [int(l) for l in lengths] # tensor to int
for l1, l2 in zip(syntax_span_rep.get_lengths(), lengths):
assert l1 + 1 == l2
res = EncoderState(
src_memory=permuted_memory,
lengths=lengths,
permutation=permutation_matrix,
src_summary=src_summary,
src_embedding=postorder_emb,
enc_loss=reorder_loss,
)
return res
@registry.register("encoder", "latper_semi_batched_enc")
class LatPerSemiBatchedEncoder(encoder.Encoder):
"""
Used for SemiBatchedEncDec
"""
batched = True
Preproc = encoder.EncPreproc
def compute_encoding(self, tokens_list):
res = self.compute_encoding_unbatched(tokens_list)
return res
def compute_encoding_unbatched(self, tokens_list):
tokens_list = self._pad(tokens_list)
src_enc = self.encoder(tokens_list)
ret_list = []
for i in range(len(tokens_list)):
# does not transformer for now
assert "transformer" not in self.encoder_modules
assert self.encoder_modules[-1] == "bilstm"
src_memory = src_enc.select(i)
src_summary = lstm.extract_last_hidden_state(src_memory)
# extract and apply latent permutation
span_rep = lstm.SpanRepresentation(src_memory)
permutation_matrix = self.permutator(span_rep)
real_src_memory = src_memory[1:-1, :] # remove bos and eos
permuted_memory = torch.matmul(permutation_matrix, real_src_memory)
# attach a batch dimension
permuted_memory = permuted_memory.unsqueeze(0)
src_summary = src_summary.unsqueeze(0)
ret_list.append(
EncoderState(src_memory=permuted_memory, src_summary=src_summary)
)
return ret_list
@registry.register("encoder", "sinkhorn_batched_enc")
class SinkhornEncoder(batched_encoder.Encoder):
batched = True
Preproc = encoder.EncPreproc
def __init__(
self,
device,
preproc,
dropout=0.1,
word_emb_size=128,
recurrent_size=256,
encoder=("emb", "bilstm"),
semantic_encoder=("emb",),
):
super().__init__(
device, preproc, dropout, word_emb_size, recurrent_size, encoder
)
query_size = recurrent_size
key_size = recurrent_size
self.query_proj = torch.nn.Linear(recurrent_size, query_size)
self.key_proj = torch.nn.Linear(recurrent_size, key_size)
# self.temp = np.power(key_size, 0.5)
self.temp = 1
self.num_sh_it = 32
self.semantic_encoder_modules = semantic_encoder
self.semantic_encoder = self._build_modules(self.semantic_encoder_modules)
def sinkhorn_attention(self, input_v):
""" input_v: sent_len * recurent_size """
query_v = self.query_proj(input_v)
key_v = self.key_proj(input_v)
score_mat = torch.einsum("ij,kj->ik", [key_v, query_v]) / self.temp
it_scores = score_mat
for _ in range(self.num_sh_it):
it_scores = it_scores - torch.logsumexp(it_scores, dim=1, keepdim=True)
it_scores = it_scores - torch.logsumexp(it_scores, dim=0, keepdim=True)
prob_m = torch.exp(it_scores)
return prob_m
def compute_encoding(self, tokens_list):
syntax_enc = self.encoder(tokens_list)
semantic_enc = self.semantic_encoder(tokens_list)
max_len = max(len(tokens) for tokens in tokens_list)
memory_list = []
length_list = []
for i in range(len(tokens_list)):
src_memory = syntax_enc.select(i)
semantic_memory = semantic_enc.select(i)
permutation_mat = self.sinkhorn_attention(src_memory)
permutated_memory = torch.einsum("ji,jk->ik", [permutation_mat, semantic_memory])
cur_length = len(tokens_list[i])
reshaped_permutated_memory = F.pad(permutated_memory, (0, 0, 0, max_len - cur_length), "constant", 0)
memory_list.append(reshaped_permutated_memory)
length_list.append(cur_length)
src_enc_memory = torch.stack(memory_list, dim=0)
return EncoderState(src_enc_memory, length_list, None)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.einsum",
"torch.bmm",
"torch.logsumexp",
"torch.matmul",
"torch.bernoulli",
"torch.LongTensor",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.zeros_like",
"torch.nn.functional.pad",
"torch.exp",
"torch.randn"
] | 1.4.0 | chenyangh/tensor2struct-public | d3257cba6d76d3c658a58a78f687d986bdc755cf |
1.4 | import collections
import itertools
import json
import os
import attr
import nltk.corpus
import torch
import torchtext
import numpy as np
from tensor2struct.models import abstract_preproc
from tensor2struct.utils import serialization, vocab, registry
from tensor2struct.modules import rat, lstm, embedders, bert_tokenizer
from transformers import BertModel, ElectraModel
import logging
logger = logging.getLogger("tensor2struct")
@attr.s
class SpiderEncoderState:
state = attr.ib()
memory = attr.ib()
question_memory = attr.ib()
schema_memory = attr.ib()
words_for_copying = attr.ib()
pointer_memories = attr.ib()
pointer_maps = attr.ib()
m2c_align_mat = attr.ib()
m2t_align_mat = attr.ib()
# for copying
tokenizer = attr.ib()
def find_word_occurrences(self, token):
occurrences = [i for i, w in enumerate(self.words_for_copying) if w == token]
if len(occurrences) > 0:
return occurrences[0]
else:
return None
class SpiderEncoderBertPreproc(abstract_preproc.AbstractPreproc):
def __init__(
self,
save_path,
context,
bert_version="bert-base-uncased",
compute_sc_link=True,
compute_cv_link=True,
):
self.data_dir = os.path.join(save_path, "enc")
self.texts = collections.defaultdict(list)
self.compute_sc_link = compute_sc_link
self.compute_cv_link = compute_cv_link
self.context_config = context
self.relations = set()
# TODO: should get types from the data
# column_types = ["text", "number", "time", "boolean", "others"]
# self.tokenizer.add_tokens([f"<type: {t}>" for t in column_types])
self.tokenizer_config = bert_version # lazy init
self.context_cache = {}
@property
def tokenizer(self):
if not hasattr(self, "_tokenizer"):
self._tokenizer = bert_tokenizer.BERTokenizer(self.tokenizer_config)
return self._tokenizer
def validate_item(self, item, section):
num_words = (
len(item.text)
+ sum(len(c.name) for c in item.schema.columns)
+ sum(len(t.name) for t in item.schema.tables)
)
if num_words > 512:
logger.info(f"Found long seq in {item.schema.db_id}")
return False, None
else:
return True, None
def add_item(self, item, section, validation_info):
preprocessed = self.preprocess_item(item, validation_info)
self.texts[section].append(preprocessed)
if section == "train":
for relation_name in itertools.chain(
preprocessed["schema_relations"].keys(),
preprocessed["sc_relations"].keys(),
preprocessed["cv_relations"].keys(),
):
self.relations.add(relation_name)
def clear_items(self):
self.texts = collections.defaultdict(list)
def preprocess_item(self, item, validation_info):
q_text = " ".join(item.text)
# use the original words for copying, while they are not necessarily used for encoding
# question_for_copying = self.tokenizer.tokenize_and_lemmatize(q_text)
question_for_copying = self.tokenizer.tokenize_with_orig(q_text)
if item.schema.db_id in self.context_cache:
context = self.context_cache[item.schema.db_id]
else:
context = registry.construct(
"context",
self.context_config,
schema=item.schema,
tokenizer=self.tokenizer,
)
self.context_cache[item.schema.db_id] = context
preproc_schema = context.preproc_schema
schema_relations = context.compute_schema_relations()
sc_relations = (
context.compute_schema_linking(q_text) if self.compute_sc_link else {}
)
cv_relations = (
context.compute_cell_value_linking(q_text) if self.compute_cv_link else {}
)
return {
"question_text": q_text,
"question_for_copying": question_for_copying,
"db_id": item.schema.db_id,
"schema_relations": schema_relations,
"sc_relations": sc_relations,
"cv_relations": cv_relations,
"columns": preproc_schema.column_names,
"tables": preproc_schema.table_names,
"table_bounds": preproc_schema.table_bounds,
"column_to_table": preproc_schema.column_to_table,
"table_to_columns": preproc_schema.table_to_columns,
"foreign_keys": preproc_schema.foreign_keys,
"foreign_keys_tables": preproc_schema.foreign_keys_tables,
"primary_keys": preproc_schema.primary_keys,
}
def save(self):
os.makedirs(self.data_dir, exist_ok=True)
# self.tokenizer.save_pretrained(self.data_dir)
default_relations = registry.lookup(
"context", self.context_config["name"]
).get_default_relations()
self.relations = sorted(self.relations.union(default_relations))
print(f"{len(self.relations)} relations extracted")
with open(os.path.join(self.data_dir, "relations.json"), "w") as f:
json.dump(self.relations, f)
for section, texts in self.texts.items():
with open(os.path.join(self.data_dir, section + ".jsonl"), "w") as f:
for text in texts:
f.write(json.dumps(text) + "\n")
def load(self):
# self.tokenizer = BertTokenizer.from_pretrained(self.data_dir)
with open(os.path.join(self.data_dir, "relations.json"), "r") as f:
relations = json.load(f)
self.relations = sorted(relations)
self.relations2id = {r: ind for ind, r in enumerate(self.relations)}
def dataset(self, section):
# for codalab eval
if len(self.texts[section]) > 0:
return self.texts[section]
else:
return [
json.loads(line)
for line in open(os.path.join(self.data_dir, section + ".jsonl"))
]
@registry.register("encoder", "spider-bert")
class SpiderEncoderBert(torch.nn.Module):
Preproc = SpiderEncoderBertPreproc
batched = True
def __init__(
self,
device,
preproc,
bert_token_type=False,
bert_version="bert-base-uncased",
summarize_header="avg",
include_in_memory=("question", "column", "table"),
rat_config={},
linking_config={},
):
super().__init__()
self._device = device
self.preproc = preproc
self.bert_token_type = bert_token_type
self.base_enc_hidden_size = (
1024 if "large" in bert_version else 768
)
self.include_in_memory = include_in_memory
# ways to summarize header
assert summarize_header in ["first", "avg"]
self.summarize_header = summarize_header
self.enc_hidden_size = self.base_enc_hidden_size
# matching
self.schema_linking = registry.construct(
"schema_linking", linking_config, preproc=preproc, device=device,
)
# rat
rat_modules = {"rat": rat.RAT, "none": rat.NoOpUpdate}
self.rat_update = registry.instantiate(
rat_modules[rat_config["name"]],
rat_config,
unused_keys={"name"},
device=self._device,
relations2id=preproc.relations2id,
hidden_size=self.enc_hidden_size,
)
# aligner
self.aligner = rat.AlignmentWithRAT(
device=device,
hidden_size=self.enc_hidden_size,
relations2id=preproc.relations2id,
enable_latent_relations=False,
)
if "electra" in bert_version:
modelclass = ElectraModel
elif "bert" in bert_version:
modelclass = BertModel
else:
raise NotImplementedError
self.bert_model = modelclass.from_pretrained(bert_version)
self.tokenizer = self.preproc.tokenizer
# self.bert_model.resize_token_embeddings(
# len(self.tokenizer)
# ) # several tokens added
def forward(self, descs):
# TODO: abstract the operations of batching for bert
batch_token_lists = []
batch_id_to_retrieve_question = []
batch_id_to_retrieve_column = []
batch_id_to_retrieve_table = []
if self.summarize_header == "avg":
batch_id_to_retrieve_column_2 = []
batch_id_to_retrieve_table_2 = []
long_seq_set = set()
batch_id_map = {} # some long examples are not included
# 1) retrieve bert pre-trained embeddings
for batch_idx, desc in enumerate(descs):
qs = self.tokenizer.text_to_ids(desc["question_text"], cls=True)
cols = [self.tokenizer.text_to_ids(c, cls=False) for c in desc["columns"]]
tabs = [self.tokenizer.text_to_ids(t, cls=False) for t in desc["tables"]]
token_list = (
qs + [c for col in cols for c in col] + [t for tab in tabs for t in tab]
)
assert self.tokenizer.check_bert_input_seq(token_list)
if len(token_list) > 512:
long_seq_set.add(batch_idx)
continue
q_b = len(qs)
col_b = q_b + sum(len(c) for c in cols)
# leave out [CLS] and [SEP]
question_indexes = list(range(q_b))[1:-1]
# use the first/avg representation for column/table
column_indexes = np.cumsum(
[q_b] + [len(token_list) for token_list in cols[:-1]]
).tolist()
table_indexes = np.cumsum(
[col_b] + [len(token_list) for token_list in tabs[:-1]]
).tolist()
if self.summarize_header == "avg":
column_indexes_2 = np.cumsum(
[q_b - 2] + [len(token_list) for token_list in cols]
).tolist()[1:]
table_indexes_2 = np.cumsum(
[col_b - 2] + [len(token_list) for token_list in tabs]
).tolist()[1:]
# token_list is already indexed
indexed_token_list = token_list
batch_token_lists.append(indexed_token_list)
# add index for retrieving representations
question_rep_ids = torch.LongTensor(question_indexes).to(self._device)
batch_id_to_retrieve_question.append(question_rep_ids)
column_rep_ids = torch.LongTensor(column_indexes).to(self._device)
batch_id_to_retrieve_column.append(column_rep_ids)
table_rep_ids = torch.LongTensor(table_indexes).to(self._device)
batch_id_to_retrieve_table.append(table_rep_ids)
if self.summarize_header == "avg":
assert all(i2 >= i1 for i1, i2 in zip(column_indexes, column_indexes_2))
column_rep_ids_2 = torch.LongTensor(column_indexes_2).to(self._device)
batch_id_to_retrieve_column_2.append(column_rep_ids_2)
assert all(i2 >= i1 for i1, i2 in zip(table_indexes, table_indexes_2))
table_rep_ids_2 = torch.LongTensor(table_indexes_2).to(self._device)
batch_id_to_retrieve_table_2.append(table_rep_ids_2)
batch_id_map[batch_idx] = len(batch_id_map)
(
padded_token_lists,
att_mask_lists,
tok_type_lists,
) = self.tokenizer.pad_sequence_for_bert_batch(batch_token_lists)
tokens_tensor = torch.LongTensor(padded_token_lists).to(self._device)
att_masks_tensor = torch.LongTensor(att_mask_lists).to(self._device)
if self.bert_token_type:
tok_type_tensor = torch.LongTensor(tok_type_lists).to(self._device)
bert_output = self.bert_model(
tokens_tensor,
attention_mask=att_masks_tensor,
token_type_ids=tok_type_tensor,
)[0]
else:
bert_output = self.bert_model(
tokens_tensor, attention_mask=att_masks_tensor
)[0]
enc_output = bert_output
column_pointer_maps = [
{i: [i] for i in range(len(desc["columns"]))} for desc in descs
]
table_pointer_maps = [
{i: [i] for i in range(len(desc["tables"]))} for desc in descs
]
assert len(long_seq_set) == 0 # remove them for now
# 2) rat update
result = []
for batch_idx, desc in enumerate(descs):
# retrieve representations
bert_batch_idx = batch_id_map[batch_idx]
q_enc = enc_output[bert_batch_idx][
batch_id_to_retrieve_question[bert_batch_idx]
]
col_enc = enc_output[bert_batch_idx][
batch_id_to_retrieve_column[bert_batch_idx]
]
tab_enc = enc_output[bert_batch_idx][
batch_id_to_retrieve_table[bert_batch_idx]
]
if self.summarize_header == "avg":
col_enc_2 = enc_output[bert_batch_idx][
batch_id_to_retrieve_column_2[bert_batch_idx]
]
tab_enc_2 = enc_output[bert_batch_idx][
batch_id_to_retrieve_table_2[bert_batch_idx]
]
col_enc = (col_enc + col_enc_2) / 2.0 # avg of first and last token
tab_enc = (tab_enc + tab_enc_2) / 2.0 # avg of first and last token
words_for_copying = desc["question_for_copying"]
assert q_enc.size()[0] == len(words_for_copying)
assert col_enc.size()[0] == len(desc["columns"])
assert tab_enc.size()[0] == len(desc["tables"])
# rat update
# TODO: change this, question is in the protocal of build relations
desc["question"] = words_for_copying
relation = self.schema_linking(desc)
(
q_enc_new_item,
c_enc_new_item,
t_enc_new_item,
) = self.rat_update.forward_unbatched(
desc,
q_enc.unsqueeze(1),
col_enc.unsqueeze(1),
tab_enc.unsqueeze(1),
relation,
)
# attention memory
memory = []
if "question" in self.include_in_memory:
memory.append(q_enc_new_item)
if "column" in self.include_in_memory:
memory.append(c_enc_new_item)
if "table" in self.include_in_memory:
memory.append(t_enc_new_item)
memory = torch.cat(memory, dim=1)
# alignment matrix
align_mat_item = self.aligner(
desc, q_enc_new_item, c_enc_new_item, t_enc_new_item, relation
)
result.append(
SpiderEncoderState(
state=None,
words_for_copying=words_for_copying,
tokenizer=self.tokenizer,
memory=memory,
question_memory=q_enc_new_item,
schema_memory=torch.cat((c_enc_new_item, t_enc_new_item), dim=1),
pointer_memories={
"column": c_enc_new_item,
"table": t_enc_new_item,
},
pointer_maps={
"column": column_pointer_maps[batch_idx],
"table": table_pointer_maps[batch_idx],
},
m2c_align_mat=align_mat_item[0],
m2t_align_mat=align_mat_item[1],
)
)
return result
| [
"torch.cat",
"torch.LongTensor"
] | 1.4.0 | chenyangh/tensor2struct-public | d3257cba6d76d3c658a58a78f687d986bdc755cf |
1.4 | import torch
from fairseq.models.bart import BARTModel
import argparse
from pprint import pprint
from tqdm import tqdm
import os
from os.path import join
import shutil
import logging
import numpy as np
import json
import random
import string
import files2rouge
import time
def test_rouge(cand, ref, outpath=None, tmp_dir='/tmp/'):
def random_string(stringLength=8):
"""Generate a random string of fixed length """
letters= string.ascii_lowercase
return ''.join(random.sample(letters,stringLength))
tmp_path = join(tmp_dir, 'tmp'+random_string())
os.makedirs(tmp_path)
hyp_path = join(tmp_path, 'hyp.txt')
ref_path = join(tmp_path, 'ref.txt')
candidates = [line.strip().lower() for line in open(cand, encoding='utf-8')]
references = [json.loads(line.strip())['target'] for line in open(ref, encoding='utf-8')]
paper_ids = [json.loads(line.strip())['paper_id'] for line in open(ref, encoding='utf-8')]
assert len(candidates) == len(references), f'{tmp_dir}: len cand {len(candidates)} len ref {len(references)}'
all_scores = []
save_scores = []
# For each prediction
for cand_idx, cand in enumerate(candidates):
curr_targets = references[cand_idx]
curr_scores = []
hyp = open(join(tmp_path, 'hyp.txt'), 'w')
hyp.write(cand)
hyp.close()
# For each target
for tgt in curr_targets:
tgt = tgt.lower().strip('\n')
ref = open(join(tmp_path, 'ref.txt'), 'w')
ref.write(tgt)
ref.close()
try:
_r = files2rouge.run(ref_path, hyp_path, to_json=True)
except Exception as e:
print(e)
exit(0)
curr_scores.append(_r)
# Take the max of curr scores
r1 = [r['rouge-1']['f'] for r in curr_scores]
max_idx = r1.index(max(r1))
save_scores.append({
'paper_id': paper_ids[cand_idx],
'all_scores': curr_scores,
'max_idx': max_idx,
'prediction': cand,
'target': curr_targets
})
all_scores.append(curr_scores[max_idx])
# Average across all scores
avg_scores = {"rouge-1": {
"f": [],
"p": [],
"r":[]
},
"rouge-2": {
"f": [],
"p": [],
"r": []
},
"rouge-l": {
"f": [],
"p": [],
"r": []
}
}
# Append all scores to an array, the average over array
for score in all_scores:
for r_type in score.keys():
for m_type in score[r_type].keys():
x = score[r_type][m_type]
avg_scores[r_type][m_type].append(x)
for r_type in avg_scores.keys():
for m_type in avg_scores[r_type].keys():
x = avg_scores[r_type][m_type]
avg_scores[r_type][m_type] = np.mean(x)
if outpath:
with open(outpath, 'w') as fout:
for s in save_scores:
fout.write(json.dumps(s) + '\n')
shutil.rmtree(tmp_path)
return avg_scores
def evaluate(bart, bsz, count, datadir, outdir, decoder_params,
test_fname='test.hypo', multitarget=False, quick=False):
if torch.cuda.is_available():
bart.cuda()
bart.half()
bart.eval()
source_fname = os.path.join(datadir, 'test.source')
pred_fname = os.path.join(outdir, test_fname)
with open(source_fname, encoding="utf-8") as source, open(pred_fname, 'w', encoding="utf-8") as fout:
sline = source.readline().strip()
# sline = f'{sline} {decoder_params["ctrl"]} .'
slines = [sline]
for sline in tqdm(source):
if count % bsz == 0:
with torch.no_grad():
hypotheses_batch = bart.sample(slines, beam=decoder_params['beam'],
lenpen=decoder_params['lenpen'],
max_len_b=decoder_params['max_len_b'],
min_len=decoder_params['min_len'],
no_repeat_ngram_size=decoder_params['no_repeat_ngram_size'])
for hypothesis in hypotheses_batch:
fout.write(hypothesis + '\n')
fout.flush()
slines = []
slines.append(sline.strip())
count += 1
if slines != []:
hypotheses_batch = bart.sample(slines, beam=decoder_params['beam'],
lenpen=decoder_params['lenpen'],
max_len_b=decoder_params['max_len_b'],
min_len=decoder_params['min_len'],
no_repeat_ngram_size=decoder_params['no_repeat_ngram_size'])
for hypothesis in hypotheses_batch:
fout.write(hypothesis.replace('\n', ' ') + '\n')
fout.flush()
ref_fname = 'test.jsonl'
ref_fname = os.path.join(datadir, ref_fname)
r = test_rouge(pred_fname,
ref_fname,
outpath=os.path.join(outdir, test_fname + '.rouge'))
return r
def maybe_percentages(r, percentages):
if percentages:
for r_type in ['rouge-1', 'rouge-2', 'rouge-l']:
for m_type in ['f', 'p', 'r']:
x = r[r_type][m_type]
r[r_type][m_type] = x * 100
return r
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('datadir')
parser.add_argument('checkpoint_dir')
parser.add_argument('--checkpoint_file', default='checkpoint_best.pt')
parser.add_argument('--outdir', default='')
parser.add_argument('--percentages', action='store_true', default=False,
help='flag if you want to print as percentages')
# Decoder params
# parser.add_argument('--ctrl', default='<|TLDR|>')
parser.add_argument('--count', default=1, type=int)
parser.add_argument('--batch_size', '--bsz', default=32, type=int, dest='bsz')
parser.add_argument('--test_fname', default='test.hypo')
parser.add_argument('--beam', default=6, type=int)
parser.add_argument('--lenpen', default=1.0, type=float)
parser.add_argument('--max_len_b', default=30, type=int)
parser.add_argument('--min_len', default=5, type=int)
parser.add_argument('--no_repeat_ngram_size', default=3, type=int)
args = parser.parse_args()
start = time.time()
#### Path checks
if not os.path.exists(args.datadir):
print(f'{args.datadir} does not exist')
exit(0)
if not os.path.exists(join(args.datadir, 'test.source')):
print(f'{join(args.datadir, "test.source")} does not exist')
exit(0)
if (not os.path.exists(join(args.checkpoint_dir, args.checkpoint_file))):
print(f'{join(args.checkpoint_dir, args.checkpoint_file)} does not exist')
exit(0)
if not args.outdir:
args.outdir = args.checkpoint_dir
os.makedirs(args.outdir, exist_ok=True)
if args.datadir.endswith('/'):
args.datadir = args.datadir[:-1]
bart = BARTModel.from_pretrained(
args.checkpoint_dir,
checkpoint_file=args.checkpoint_file,
data_name_or_path=args.datadir + '-bin',
task='translation'
)
decoder_params ={
# 'ctrl': args.ctrl,
'beam': args.beam,
'lenpen': args.lenpen,
'max_len_b': args.max_len_b,
'min_len': args.min_len,
'no_repeat_ngram_size': args.no_repeat_ngram_size
}
r = evaluate(bart, args.bsz, args.count,
args.datadir, args.outdir,
decoder_params,
test_fname=args.test_fname,
)
r['beam'] = args.beam
r['lenpen'] = args.lenpen
pprint(maybe_percentages(r, args.percentages))
with open(join(args.outdir, args.test_fname + '.score'), 'w') as fout:
fout.write(json.dumps(r, indent=4))
end = time.time()
print(f'Time to run script: {(end-start)} sec')
| [
"torch.no_grad",
"torch.cuda.is_available"
] | 1.4.0 | yakushechkin/scitldr | c8090d0c8d62bafc878a0050dcfb7c33e3c54dc5 |
1.6 | import torch
from torch import nn
from .metric import Metric
class Accuracy(Metric):
def __init__(self, name="accuracy", dtype=None,
reduction="sum", **kwargs):
super().__init__(name, dtype, **kwargs)
assert reduction in {"sum", "mean", "max", "min"}
# TODO: more reduction
self.reduction = reduction
def forward(self, y_true, y_pred,
sample_weight=None):
return self.update_state(y_true, y_pred,
sample_weight=sample_weight)
def update_state(self, y_true, y_pred,
sample_weight=None):
if sample_weight is not None:
raise NotImplementedError("sample_weight")
if y_pred.ndim == 2:
y_pred = y_pred.argmax(1)
self.correct += torch.sum(y_pred == y_true)
self.total += y_true.numel()
def reset_states(self):
# K.batch_set_value([(v, 0) for v in self.variables])
self.total = torch.tensor(0)
self.correct = torch.tensor(0)
def result(self):
return (self.correct.float() / self.total).detach()
| [
"torch.tensor",
"torch.sum"
] | 1.6.0 | kisekizzz/GraphGallery | fd4a1f474c244f774397460ae95935638ef48f5b |
1.10 | from Models.MNIST_Model import MNIST_Model as Model
from Dataloaders.Mnist import Mnist as Dataloader
from Clients.SGDClient import SGDClient as Client
from Servers.FedKpServer import FedKpServer
from Servers.FedAvgServer import FedAvgServer
from Algorithms.FedAvg import FedAvg as Alg
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
import matplotlib.gridspec as grid_spec
import seaborn as sns
import pandas as pd
import torch
import numpy as np
import os
from Models.Callbacks.Callbacks import Callbacks
import json
out_path = os.path.join('data','Results','MNIST_exploration')
result_path = os.path.join(out_path,'results')
plot_path = os.path.join(out_path,'plots')
if not os.path.exists(out_path): os.mkdir(out_path)
if not os.path.exists(result_path): os.mkdir(result_path)
if not os.path.exists(plot_path): os.mkdir(plot_path)
# Parameters
alphas = [100,50,10,5,1,0.5,0.1,0.05,0.01,0.005,0.001]
n_clients = 10
seed = 0
batch_size = 16
device = torch.cuda.current_device() if torch.cuda.is_available() else 'cpu'
dl = Dataloader(n_clients)
test_data = dl.get_test_dataloader(batch_size)
##################### Plot distributions based on alpha ######################
plt.style.use('seaborn-whitegrid')
torch.manual_seed(seed)
np.random.seed(seed)
fig,axs = plt.subplots(2,2,figsize=(6,6))
for i,alpha in enumerate([10,1,0.1,0.01]):
ax = axs[i//2,i%2]
dl = Dataloader(n_clients,alpha=alpha)
for j,c_data in enumerate(dl.get_training_raw_data()):
labels = [l for x,l in c_data]
counts = np.zeros(10)
for l in labels:
counts[l]+=1
counts = counts/5420
ax.scatter(np.repeat(j,10)+1,np.arange(10)+1,s = 100*counts,color='blue')
ax.set_title('alpha = %.2f'%alpha)
ax.set_xticks(np.arange(10)+1)
ax.set_yticks(np.arange(10)+1)
ax.grid(color='grey',alpha=0.2)
if(i%2==0):
ax.set_ylabel('Labels')
if(i//2==1):
ax.set_xlabel('Client')
plt.savefig(os.path.join(plot_path,'sampledist.jpg'))
plt.show()
################# Res 1 ###############################################
torch.manual_seed(seed)
np.random.seed(seed)
init_model = Model()
cbs = Callbacks(test_data,verbose=False,device=device)
callbacks = [cbs.server_loss,cbs.server_accuracy,cbs.server_training_loss,cbs.server_training_accuracy,
cbs.skew,cbs.kurtosis,cbs.ks_test]
res = pd.DataFrame(columns=['alpha','train_acc','train_loss','val_loss','val_acc',
'ks_test','kurtosis','skew','rounds','epochs','n_clients'])
for alpha in alphas[1:]:
dl = Dataloader(n_clients,alpha=alpha)
alg = Alg(dl,Model,batch_size=batch_size)
alg.server.model.set_weights(init_model.get_weights())
alg.run(1,epochs=1,callbacks=callbacks,device=device)
r = alg.get_callback_data()
train_acc = np.mean(r['server_training_accuracy'][-1])
train_loss = np.mean(r['server_training_loss'][-1])
val_loss = r['server_loss'][-1]
val_acc = r['server_accuracy'][-1]
for skew,kurtosis,ks in zip(r['skew'][-1],r['kurtosis'][-1],r['ks_test'][-1]):
res = res.append({'alpha':alpha,'train_acc':train_acc,'train_loss':train_loss,
'val_acc':val_acc,'val_loss':val_loss,
'ks_test':ks,'kurtosis':kurtosis,
'skew':skew,'rounds':1,'epochs':1,
'n_clients':n_clients},ignore_index = True)
import matplotlib.pyplot as plt
from matplotlib import gridspec as grid_spec
from sklearn.neighbors import KernelDensity
import seaborn as sns
import os
res.to_csv(os.path.join(result_path,'res_alphas.csv'))
################# Res 5 ###############################################
seed=1
torch.manual_seed(seed)
np.random.seed(seed)
repeats = 5
init_model = Model()
cbs = Callbacks(test_data,verbose=False,device=device)
callbacks = [cbs.server_loss,cbs.server_accuracy,cbs.server_training_loss,cbs.server_training_accuracy]
res = pd.DataFrame(columns=['alpha','train_acc','train_loss','val_loss','val_acc'])
for _ in range(repeats):
for alpha in alphas:
dl = Dataloader(n_clients,alpha=alpha)
alg = Alg(dl,Model,batch_size=batch_size)
alg.server.model.set_weights(init_model.get_weights())
alg.run(1,epochs=1,callbacks=callbacks,device=device)
r = alg.get_callback_data()
train_acc = np.mean(r['server_training_accuracy'][-1])
train_loss = np.mean(r['server_training_loss'][-1])
val_loss = r['server_loss'][-1]
val_acc = r['server_accuracy'][-1]
res = res.append({'alpha':alpha,'train_acc':train_acc,'train_loss':train_loss,
'val_acc':val_acc,'val_loss':val_loss},ignore_index = True)
res['val_acc'] = res['val_acc'].apply(lambda x: x.item())
res.to_csv(os.path.join(result_path,'res_lossacc.csv'))
res
########################################################################
############################ Plot Results ##############################
########################################################################
######## Res 1 ###############################
res = pd.read_csv(os.path.join(result_path,'res_alphas.csv'))
alphas = [100,50,10,5,1,0.5,0.1,0.05,0.01,0.005,0.001]
out_path = os.path.join('data','Results','Plots','Distributions')
metrics = ['ks_test','skew','kurtosis']
colors = sns.color_palette("ch:s=.25,rot=-.25", n_colors=len(alphas))
for metric in metrics:
fig = plt.figure(figsize=(6,6))
gs = grid_spec.GridSpec(len(alphas),1,1)
ax_objs=[]
for i,alpha in enumerate(alphas[::-1]):
ax_objs.append(fig.add_subplot(gs[i:i+1, 0:]))
ax = ax_objs[-1]
res_tmp = res[res['alpha'] == alpha]
x = np.array(res_tmp[metric])
acc = np.array(res_tmp['val_acc'])[0]
loss = np.array(res_tmp['val_loss'])[0]
x_d = np.linspace(np.min(x),np.max(x), 1000)
kde = KernelDensity(bandwidth=0.03, kernel='gaussian')
kde.fit(x[:, None])
logprob = kde.score_samples(x_d[:, None])
# plotting the distribution
ax.plot(x_d, np.exp(logprob),color="white",lw=1)
ax.fill_between(x_d, np.exp(logprob), alpha=1,color=colors[i])
# setting uniform x and y lims
ax.set_xlim(np.min(x),np.max(x))
# make background transparent
rect = ax.patch
rect.set_alpha(0)
# remove borders, axis ticks, and labels
ax.set_yticklabels([])
ax.set_yticks([])
if i == len(alphas)-1:
if metric == 'ks_test':
ax.set_xlabel('p-value', fontsize=14)
else:
ax.set_xlabel(metric, fontsize=14)
else:
ax.set_xticklabels([])
ax.set_xticks([])
spines = ["top","right","left","bottom"]
for s in spines:
ax.spines[s].set_visible(False)
ax.text(np.min(x),0,r'$\alpha=$'+'%.3f'%(alpha),fontsize=10,ha="right")
ax.grid(False)
gs.update(hspace=-0.5)
plt.tight_layout()
name = 'alphas_%s'%(metric)
plt.savefig(os.path.join(plot_path,name))
############## Res 5 #############################
res = pd.read_csv(os.path.join(result_path,'res_lossacc.csv')).drop('Unnamed: 0',axis=1)
stats = res.groupby('alpha').aggregate(['mean','std'])
cols = np.unique([c[0] for c in stats.columns])[[0,2,1,3]]
colors = sns.color_palette("Paired", n_colors=4)
fig,ax = plt.subplots(figsize=(6,6))
ax2 = ax.twinx()
x = np.array(stats.index)
for i,col in enumerate(cols):
mu = np.array(stats[col]['mean'])
std = np.array(stats[col]['std'])
l = mu-std
u = mu+std
ax_tmp = ax2 if('loss' in col) else ax
ax_tmp.plot(x,mu,label=col,color=colors[i])
ax_tmp.fill_between(x,l,u,color=colors[i],alpha=0.3)
ax.legend(bbox_to_anchor=(1, -0.1),ncol=2,prop={'size': 10})
ax2.grid(False)
ax.grid(False)
ax2.legend(bbox_to_anchor=(0.5, -0.1),ncol=2,prop={'size': 10})
ax.set_xlabel(r'$\alpha$',fontsize=14)
ax.set_ylabel('Accuracy',fontsize=14)
ax2.set_ylabel('Loss',fontsize=14)
ax.set_xscale('log')
plt.savefig(os.path.join(plot_path,'lossandacc'))
plt.show()
| [
"torch.cuda.current_device",
"torch.cuda.is_available",
"torch.manual_seed"
] | 1.10.2 | MartinHex/master-thesis | b5077d9acce60fd42467f73df6e39c61fd3e19b2 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.data import METRIC_EPS
def _kld_update(p: Tensor, q: Tensor, log_prob: bool) -> Tuple[Tensor, int]:
_check_same_shape(p, q)
if p.ndim != 2 or q.ndim != 2:
raise ValueError(f"Expected both p and q distribution to be 2D but got {p.ndim} and {q.ndim} respectively")
total = p.shape[0]
if log_prob:
measures = torch.sum(p.exp() * (p - q), axis=-1)
else:
p = p / p.sum(axis=-1, keepdim=True)
q = q / q.sum(axis=-1, keepdim=True)
q = torch.clamp(q, METRIC_EPS)
measures = torch.sum(p * torch.log(p / q), axis=-1)
return measures, total
def _kld_compute(measures: Tensor, total: Tensor, reduction: Optional[str] = 'mean') -> Tensor:
if reduction == 'sum':
return measures.sum()
elif reduction == 'mean':
return measures.sum() / total
elif reduction is None or reduction == 'none':
return measures
return measures / total
def kldivergence(p: Tensor, q: Tensor, log_prob: bool = False, reduction: Optional[str] = 'mean') -> Tensor:
r"""Computes the `KL divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_:
.. math::
D_{KL}(P||Q) = \sum_{x\in\mathcal{X}} P(x) \log\frac{P(x)}{Q{x}}
Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution
over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence
is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`.
Args:
p: data distribution with shape ``[N, d]``
q: prior or approximate distribution with shape ``[N, d]``
log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities,
will normalize to make sure the distributes sum to 1
reduction:
Determines how to reduce over the ``N``/batch dimension:
- ``'mean'`` [default]: Averages score across samples
- ``'sum'``: Sum score across samples
- ``'none'`` or ``None``: Returns score per sample
Example:
>>> import torch
>>> from torchmetrics.functional import kldivergence
>>> p = torch.tensor([[0.36, 0.48, 0.16]])
>>> q = torch.tensor([[1/3, 1/3, 1/3]])
>>> kldivergence(p, q)
tensor(0.0853)
"""
measures, total = _kld_update(p, q, log_prob)
return _kld_compute(measures, total, reduction)
| [
"torch.log",
"torch.clamp"
] | 1.3.1 | GiannisVagionakis/metrics | 12d0746e0e9ef9eeeca11cef1e118a156c1518ec |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from pathlib import Path
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from pytorch_lightning import Trainer
from pytorch_lightning.core.step_result import Result, TrainResult, EvalResult
import tests.base.develop_utils as tutils
from tests.base import EvalModelTemplate
from tests.base.datamodules import TrialMNISTDataModule
def _setup_ddp(rank, worldsize):
import os
os.environ["MASTER_ADDR"] = "localhost"
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=worldsize)
def _ddp_test_fn(rank, worldsize, result_cls: Result):
_setup_ddp(rank, worldsize)
tensor = torch.tensor([1.0])
res = result_cls()
res.log("test_tensor", tensor, sync_dist=True, sync_dist_op=torch.distributed.ReduceOp.SUM)
assert res["test_tensor"].item() == dist.get_world_size(), "Result-Log does not work properly with DDP and Tensors"
@pytest.mark.parametrize("result_cls", [Result, TrainResult, EvalResult])
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
def test_result_reduce_ddp(result_cls):
"""Make sure result logging works with DDP"""
tutils.reset_seed()
tutils.set_random_master_port()
worldsize = 2
mp.spawn(_ddp_test_fn, args=(worldsize, result_cls), nprocs=worldsize)
@pytest.mark.parametrize(
"test_option,do_train,gpus",
[
pytest.param(
0, True, 0, id='full_loop'
),
pytest.param(
0, False, 0, id='test_only'
),
pytest.param(
1, False, 0, id='test_only_mismatching_tensor', marks=pytest.mark.xfail(raises=ValueError, match="Mism.*")
),
pytest.param(
2, False, 0, id='mix_of_tensor_dims'
),
pytest.param(
3, False, 0, id='string_list_predictions'
),
pytest.param(
4, False, 0, id='int_list_predictions'
),
pytest.param(
5, False, 0, id='nested_list_predictions'
),
pytest.param(
6, False, 0, id='dict_list_predictions'
),
pytest.param(
7, True, 0, id='write_dict_predictions'
),
pytest.param(
0, True, 1, id='full_loop_single_gpu', marks=pytest.mark.skipif(torch.cuda.device_count() < 1, reason="test requires single-GPU machine")
)
]
)
def test_result_obj_predictions(tmpdir, test_option, do_train, gpus):
tutils.reset_seed()
dm = TrialMNISTDataModule(tmpdir)
prediction_file = Path(tmpdir) / 'predictions.pt'
model = EvalModelTemplate()
model.test_option = test_option
model.prediction_file = prediction_file.as_posix()
model.test_step = model.test_step_result_preds
model.test_step_end = None
model.test_epoch_end = None
model.test_end = None
if prediction_file.exists():
prediction_file.unlink()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
weights_summary=None,
deterministic=True,
gpus=gpus
)
# Prediction file shouldn't exist yet because we haven't done anything
assert not prediction_file.exists()
if do_train:
result = trainer.fit(model, dm)
assert result == 1
result = trainer.test(datamodule=dm)
result = result[0]
assert result['test_loss'] < 0.6
assert result['test_acc'] > 0.8
else:
result = trainer.test(model, datamodule=dm)
# check prediction file now exists and is of expected length
assert prediction_file.exists()
predictions = torch.load(prediction_file)
assert len(predictions) == len(dm.mnist_test)
def test_result_gather_stack():
""" Test that tensors get concatenated when they all have the same shape. """
outputs = [
{"foo": torch.zeros(4, 5)},
{"foo": torch.zeros(4, 5)},
{"foo": torch.zeros(4, 5)},
]
result = Result.gather(outputs)
assert isinstance(result["foo"], torch.Tensor)
assert list(result["foo"].shape) == [12, 5]
def test_result_gather_concatenate():
""" Test that tensors get concatenated when they have varying size in first dimension. """
outputs = [
{"foo": torch.zeros(4, 5)},
{"foo": torch.zeros(8, 5)},
{"foo": torch.zeros(3, 5)},
]
result = Result.gather(outputs)
assert isinstance(result["foo"], torch.Tensor)
assert list(result["foo"].shape) == [15, 5]
def test_result_gather_scalar():
""" Test that 0-dim tensors get gathered and stacked correctly. """
outputs = [
{"foo": torch.tensor(1)},
{"foo": torch.tensor(2)},
{"foo": torch.tensor(3)},
]
result = Result.gather(outputs)
assert isinstance(result["foo"], torch.Tensor)
assert list(result["foo"].shape) == [3]
def test_result_gather_different_shapes():
""" Test that tensors of varying shape get gathered into a list. """
outputs = [
{"foo": torch.tensor(1)},
{"foo": torch.zeros(2, 3)},
{"foo": torch.zeros(1, 2, 3)},
]
result = Result.gather(outputs)
expected = [torch.tensor(1), torch.zeros(2, 3), torch.zeros(1, 2, 3)]
assert isinstance(result["foo"], list)
assert all(torch.eq(r, e).all() for r, e in zip(result["foo"], expected))
def test_result_gather_mixed_types():
""" Test that a collection of mixed types gets gathered into a list. """
outputs = [
{"foo": 1.2},
{"foo": ["bar", None]},
{"foo": torch.tensor(1)},
]
result = Result.gather(outputs)
expected = [1.2, ["bar", None], torch.tensor(1)]
assert isinstance(result["foo"], list)
assert result["foo"] == expected
def test_result_retrieve_last_logged_item():
result = Result()
result.log('a', 5., on_step=True, on_epoch=True)
assert result['a_epoch'] == 5.
assert result['a_step'] == 5.
assert result['a'] == 5.
| [
"torch.zeros",
"torch.distributed.get_world_size",
"torch.eq",
"torch.distributed.init_process_group",
"torch.multiprocessing.spawn",
"torch.cuda.device_count",
"torch.tensor",
"torch.load"
] | 1.3 | nightlessbaron/pytorch-lightning | 239bea5c29cef0d1a0cfb319de5dbc9227aa2a53 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from pytorch_lightning.core.lightning import LightningModule
from tests.base.datasets import MNIST, AverageDataset, TrialMNIST
class Generator(nn.Module):
def __init__(self, latent_dim: int, img_shape: tuple):
super().__init__()
self.img_shape = img_shape
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(latent_dim, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, int(np.prod(img_shape))),
nn.Tanh()
)
def forward(self, z):
img = self.model(z)
img = img.view(img.size(0), *self.img_shape)
return img
class Discriminator(nn.Module):
def __init__(self, img_shape: tuple):
super().__init__()
self.model = nn.Sequential(
nn.Linear(int(np.prod(img_shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid(),
)
def forward(self, img):
img_flat = img.view(img.size(0), -1)
validity = self.model(img_flat)
return validity
class BasicGAN(LightningModule):
"""Implements a basic GAN for the purpose of illustrating multiple optimizers."""
def __init__(self, hidden_dim: int = 128, learning_rate: float = 0.001, b1: float = 0.5, b2: float = 0.999, **kwargs):
super().__init__()
self.hidden_dim = hidden_dim
self.learning_rate = learning_rate
self.b1 = b1
self.b2 = b2
# networks
mnist_shape = (1, 28, 28)
self.generator = Generator(latent_dim=self.hidden_dim, img_shape=mnist_shape)
self.discriminator = Discriminator(img_shape=mnist_shape)
# cache for generated images
self.generated_imgs = None
self.last_imgs = None
self.example_input_array = torch.rand(2, self.hidden_dim)
def forward(self, z):
return self.generator(z)
def adversarial_loss(self, y_hat, y):
return F.binary_cross_entropy(y_hat, y)
def training_step(self, batch, batch_idx, optimizer_idx=None):
imgs, _ = batch
self.last_imgs = imgs
# train generator
if optimizer_idx == 0:
# sample noise
z = torch.randn(imgs.shape[0], self.hidden_dim)
z = z.type_as(imgs)
# generate images
self.generated_imgs = self(z)
# ground truth result (ie: all fake)
# put on GPU because we created this tensor inside training_loop
valid = torch.ones(imgs.size(0), 1)
valid = valid.type_as(imgs)
# adversarial loss is binary cross-entropy
g_loss = self.adversarial_loss(self.discriminator(self.generated_imgs), valid)
tqdm_dict = {'g_loss': g_loss}
output = OrderedDict({
'loss': g_loss,
'progress_bar': tqdm_dict,
'log': tqdm_dict
})
return output
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
# how well can it label as real?
valid = torch.ones(imgs.size(0), 1)
valid = valid.type_as(imgs)
real_loss = self.adversarial_loss(self.discriminator(imgs), valid)
# how well can it label as fake?
fake = torch.zeros(imgs.size(0), 1)
fake = fake.type_as(fake)
fake_loss = self.adversarial_loss(self.discriminator(self.generated_imgs.detach()), fake)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {'d_loss': d_loss}
output = OrderedDict({
'loss': d_loss,
'progress_bar': tqdm_dict,
'log': tqdm_dict
})
return output
def configure_optimizers(self):
lr = self.learning_rate
b1 = self.b1
b2 = self.b2
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2))
opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))
return [opt_g, opt_d], []
def train_dataloader(self):
return DataLoader(TrialMNIST(train=True, download=True), batch_size=16)
class ParityModuleRNN(LightningModule):
def __init__(self):
super().__init__()
self.rnn = nn.LSTM(10, 20, batch_first=True)
self.linear_out = nn.Linear(in_features=20, out_features=5)
self.example_input_array = torch.rand(2, 3, 10)
def forward(self, x):
seq, last = self.rnn(x)
return self.linear_out(seq)
def training_step(self, batch, batch_nb):
x, y = batch
y_hat = self(x)
loss = F.mse_loss(y_hat, y)
return {'loss': loss}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
def train_dataloader(self):
return DataLoader(AverageDataset(), batch_size=30)
class ParityModuleMNIST(LightningModule):
def __init__(self):
super().__init__()
self.c_d1 = nn.Linear(in_features=28 * 28, out_features=128)
self.c_d1_bn = nn.BatchNorm1d(128)
self.c_d1_drop = nn.Dropout(0.3)
self.c_d2 = nn.Linear(in_features=128, out_features=10)
self.example_input_array = torch.rand(2, 1, 28, 28)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.c_d1(x)
x = torch.tanh(x)
x = self.c_d1_bn(x)
x = self.c_d1_drop(x)
x = self.c_d2(x)
return x
def training_step(self, batch, batch_nb):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
return {'loss': loss}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
def train_dataloader(self):
return DataLoader(MNIST(train=True, download=True,), batch_size=128, num_workers=1)
| [
"torch.nn.Linear",
"torch.rand",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.nn.functional.mse_loss",
"torch.nn.functional.cross_entropy",
"torch.nn.BatchNorm1d",
"torch.tanh",
"torch.randn",
"torch.nn.functional.binary_cross_entropy"
] | 1.3 | nightlessbaron/pytorch-lightning | 239bea5c29cef0d1a0cfb319de5dbc9227aa2a53 |
1.5 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from torch.autograd import grad
def velocity_verlet(z, r, potential_fn, kinetic_grad, step_size, num_steps=1, z_grads=None):
r"""
Second order symplectic integrator that uses the velocity verlet algorithm.
:param dict z: dictionary of sample site names and their current values
(type :class:`~torch.Tensor`).
:param dict r: dictionary of sample site names and corresponding momenta
(type :class:`~torch.Tensor`).
:param callable potential_fn: function that returns potential energy given z
for each sample site. The negative gradient of the function with respect
to ``z`` determines the rate of change of the corresponding sites'
momenta ``r``.
:param callable kinetic_grad: a function calculating gradient of kinetic energy
w.r.t. momentum variable.
:param float step_size: step size for each time step iteration.
:param int num_steps: number of discrete time steps over which to integrate.
:param torch.Tensor z_grads: optional gradients of potential energy at current ``z``.
:return tuple (z_next, r_next, z_grads, potential_energy): next position and momenta,
together with the potential energy and its gradient w.r.t. ``z_next``.
"""
z_next = z.copy()
r_next = r.copy()
for _ in range(num_steps):
z_next, r_next, z_grads, potential_energy = _single_step_verlet(z_next,
r_next,
potential_fn,
kinetic_grad,
step_size,
z_grads)
return z_next, r_next, z_grads, potential_energy
def _single_step_verlet(z, r, potential_fn, kinetic_grad, step_size, z_grads=None):
r"""
Single step velocity verlet that modifies the `z`, `r` dicts in place.
"""
z_grads = potential_grad(potential_fn, z)[0] if z_grads is None else z_grads
for site_name in r:
r[site_name] = r[site_name] + 0.5 * step_size * (-z_grads[site_name]) # r(n+1/2)
r_grads = kinetic_grad(r)
for site_name in z:
z[site_name] = z[site_name] + step_size * r_grads[site_name] # z(n+1)
z_grads, potential_energy = potential_grad(potential_fn, z)
for site_name in r:
r[site_name] = r[site_name] + 0.5 * step_size * (-z_grads[site_name]) # r(n+1)
return z, r, z_grads, potential_energy
def potential_grad(potential_fn, z):
"""
Gradient of `potential_fn` w.r.t. parameters z.
:param potential_fn: python callable that takes in a dictionary of parameters
and returns the potential energy.
:param dict z: dictionary of parameter values keyed by site name.
:return: tuple of `(z_grads, potential_energy)`, where `z_grads` is a dictionary
with the same keys as `z` containing gradients and potential_energy is a
torch scalar.
"""
z_keys, z_nodes = zip(*z.items())
for node in z_nodes:
node.requires_grad_(True)
try:
potential_energy = potential_fn(z)
# deal with singular matrices
except RuntimeError as e:
if "singular U" in str(e):
grads = {k: v.new_zeros(v.shape) for k, v in z.items()}
return grads, z_nodes[0].new_tensor(float('nan'))
else:
raise e
grads = grad(potential_energy, z_nodes)
for node in z_nodes:
node.requires_grad_(False)
return dict(zip(z_keys, grads)), potential_energy.detach()
| [
"torch.autograd.grad"
] | 1.5.0 | ashishfarmer/pyro | 11a96cde05756def826c232d76f9cff66f6e6d4f |
1.5 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import operator
from functools import partial, reduce
import torch
from torch.distributions import constraints
from torch.distributions.utils import _sum_rightmost
from pyro.distributions.conditional import ConditionalTransformModule
from pyro.distributions.torch_transform import TransformModule
from pyro.distributions.transforms.utils import clamp_preserve_gradients
from pyro.distributions.util import copy_docs_from
from pyro.nn import ConditionalDenseNN, DenseNN
@copy_docs_from(TransformModule)
class AffineCoupling(TransformModule):
r"""
An implementation of the affine coupling layer of RealNVP (Dinh et al., 2017)
that uses the bijective transform,
:math:`\mathbf{y}_{1:d} = \mathbf{x}_{1:d}`
:math:`\mathbf{y}_{(d+1):D} = \mu + \sigma\odot\mathbf{x}_{(d+1):D}`
where :math:`\mathbf{x}` are the inputs, :math:`\mathbf{y}` are the outputs,
e.g. :math:`\mathbf{x}_{1:d}` represents the first :math:`d` elements of the
inputs, and :math:`\mu,\sigma` are shift and translation parameters calculated
as the output of a function inputting only :math:`\mathbf{x}_{1:d}`.
That is, the first :math:`d` components remain unchanged, and the subsequent
:math:`D-d` are shifted and translated by a function of the previous components.
Together with :class:`~pyro.distributions.TransformedDistribution` this provides
a way to create richer variational approximations.
Example usage:
>>> from pyro.nn import DenseNN
>>> input_dim = 10
>>> split_dim = 6
>>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
>>> param_dims = [input_dim-split_dim, input_dim-split_dim]
>>> hypernet = DenseNN(split_dim, [10*input_dim], param_dims)
>>> transform = AffineCoupling(split_dim, hypernet)
>>> pyro.module("my_transform", transform) # doctest: +SKIP
>>> flow_dist = dist.TransformedDistribution(base_dist, [transform])
>>> flow_dist.sample() # doctest: +SKIP
The inverse of the Bijector is required when, e.g., scoring the log density of a
sample with :class:`~pyro.distributions.TransformedDistribution`. This
implementation caches the inverse of the Bijector when its forward operation is
called, e.g., when sampling from
:class:`~pyro.distributions.TransformedDistribution`. However, if the cached
value isn't available, either because it was overwritten during sampling a new
value or an arbitary value is being scored, it will calculate it manually.
This is an operation that scales as O(1), i.e. constant in the input dimension.
So in general, it is cheap to sample *and* score (an arbitrary value) from
:class:`~pyro.distributions.transforms.AffineCoupling`.
:param split_dim: Zero-indexed dimension :math:`d` upon which to perform input/
output split for transformation.
:type split_dim: int
:param hypernet: an autoregressive neural network whose forward call returns a
real-valued mean and logit-scale as a tuple. The input should have final
dimension split_dim and the output final dimension input_dim-split_dim for
each member of the tuple.
:type hypernet: callable
:param dim: the tensor dimension on which to split. This value must be negative
and defines the event dim as `abs(dim)`.
:type dim: int
:param log_scale_min_clip: The minimum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_min_clip: float
:param log_scale_max_clip: The maximum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_max_clip: float
References:
[1] Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation
using Real NVP. ICLR 2017.
"""
domain = constraints.real
codomain = constraints.real
bijective = True
def __init__(self, split_dim, hypernet, *, dim=-1, log_scale_min_clip=-5., log_scale_max_clip=3.):
super().__init__(cache_size=1)
if dim >= 0:
raise ValueError("'dim' keyword argument must be negative")
self.split_dim = split_dim
self.nn = hypernet
self.dim = dim
self.event_dim = -dim
self._cached_log_scale = None
self.log_scale_min_clip = log_scale_min_clip
self.log_scale_max_clip = log_scale_max_clip
def _call(self, x):
"""
:param x: the input into the bijection
:type x: torch.Tensor
Invokes the bijection x=>y; in the prototypical context of a
:class:`~pyro.distributions.TransformedDistribution` `x` is a sample from
the base distribution (or the output of a previous transform)
"""
x1, x2 = x.split([self.split_dim, x.size(self.dim) - self.split_dim], dim=self.dim)
# Now that we can split on an arbitrary dimension, we have do a bit of reshaping...
mean, log_scale = self.nn(x1.reshape(x1.shape[:-self.event_dim] + (-1,)))
mean = mean.reshape(mean.shape[:-1] + x2.shape[-self.event_dim:])
log_scale = log_scale.reshape(log_scale.shape[:-1] + x2.shape[-self.event_dim:])
log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)
self._cached_log_scale = log_scale
y1 = x1
y2 = torch.exp(log_scale) * x2 + mean
return torch.cat([y1, y2], dim=self.dim)
def _inverse(self, y):
"""
:param y: the output of the bijection
:type y: torch.Tensor
Inverts y => x. Uses a previously cached inverse if available, otherwise
performs the inversion afresh.
"""
y1, y2 = y.split([self.split_dim, y.size(self.dim) - self.split_dim], dim=self.dim)
x1 = y1
# Now that we can split on an arbitrary dimension, we have do a bit of reshaping...
mean, log_scale = self.nn(x1.reshape(x1.shape[:-self.event_dim] + (-1,)))
mean = mean.reshape(mean.shape[:-1] + y2.shape[-self.event_dim:])
log_scale = log_scale.reshape(log_scale.shape[:-1] + y2.shape[-self.event_dim:])
log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)
self._cached_log_scale = log_scale
x2 = (y2 - mean) * torch.exp(-log_scale)
return torch.cat([x1, x2], dim=self.dim)
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log jacobian
"""
x_old, y_old = self._cached_x_y
if self._cached_log_scale is not None and x is x_old and y is y_old:
log_scale = self._cached_log_scale
else:
x1, x2 = x.split([self.split_dim, x.size(self.dim) - self.split_dim], dim=self.dim)
_, log_scale = self.nn(x1.reshape(x1.shape[:-self.event_dim] + (-1,)))
log_scale = log_scale.reshape(log_scale.shape[:-1] + x2.shape[-self.event_dim:])
log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)
return _sum_rightmost(log_scale, self.event_dim)
@copy_docs_from(ConditionalTransformModule)
class ConditionalAffineCoupling(ConditionalTransformModule):
r"""
An implementation of the affine coupling layer of RealNVP (Dinh et al., 2017)
that conditions on an additional context variable and uses the bijective
transform,
:math:`\mathbf{y}_{1:d} = \mathbf{x}_{1:d}`
:math:`\mathbf{y}_{(d+1):D} = \mu + \sigma\odot\mathbf{x}_{(d+1):D}`
where :math:`\mathbf{x}` are the inputs, :math:`\mathbf{y}` are the outputs,
e.g. :math:`\mathbf{x}_{1:d}` represents the first :math:`d` elements of the
inputs, and :math:`\mu,\sigma` are shift and translation parameters calculated
as the output of a function input :math:`\mathbf{x}_{1:d}` and a context
variable :math:`\mathbf{z}\in\mathbb{R}^M`.
That is, the first :math:`d` components remain unchanged, and the subsequent
:math:`D-d` are shifted and translated by a function of the previous components.
Together with :class:`~pyro.distributions.ConditionalTransformedDistribution`
this provides a way to create richer variational approximations.
Example usage:
>>> from pyro.nn import ConditionalDenseNN
>>> input_dim = 10
>>> split_dim = 6
>>> context_dim = 4
>>> batch_size = 3
>>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
>>> param_dims = [input_dim-split_dim, input_dim-split_dim]
>>> hypernet = ConditionalDenseNN(split_dim, context_dim, [10*input_dim],
... param_dims)
>>> transform = ConditionalAffineCoupling(split_dim, hypernet)
>>> pyro.module("my_transform", transform) # doctest: +SKIP
>>> z = torch.rand(batch_size, context_dim)
>>> flow_dist = dist.ConditionalTransformedDistribution(base_dist,
... [transform]).condition(z)
>>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP
The inverse of the Bijector is required when, e.g., scoring the log density of a
sample with :class:`~pyro.distributions.ConditionalTransformedDistribution`.
This implementation caches the inverse of the Bijector when its forward
operation is called, e.g., when sampling from
:class:`~pyro.distributions.ConditionalTransformedDistribution`. However, if the
cached value isn't available, either because it was overwritten during sampling
a new value or an arbitary value is being scored, it will calculate it manually.
This is an operation that scales as O(1), i.e. constant in the input dimension.
So in general, it is cheap to sample *and* score (an arbitrary value) from
:class:`~pyro.distributions.transforms.ConditionalAffineCoupling`.
:param split_dim: Zero-indexed dimension :math:`d` upon which to perform input/
output split for transformation.
:type split_dim: int
:param hypernet: A neural network whose forward call returns a real-valued mean
and logit-scale as a tuple. The input should have final dimension split_dim
and the output final dimension input_dim-split_dim for each member of the
tuple. The network also inputs a context variable as a keyword argument in
order to condition the output upon it.
:type hypernet: callable
:param log_scale_min_clip: The minimum value for clipping the log(scale) from
the NN
:type log_scale_min_clip: float
:param log_scale_max_clip: The maximum value for clipping the log(scale) from
the NN
:type log_scale_max_clip: float
References:
Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation using
Real NVP. ICLR 2017.
"""
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
def __init__(self, split_dim, hypernet, **kwargs):
super().__init__()
self.split_dim = split_dim
self.nn = hypernet
self.kwargs = kwargs
def condition(self, context):
cond_nn = partial(self.nn, context=context)
return AffineCoupling(self.split_dim, cond_nn, **self.kwargs)
def affine_coupling(input_dim, hidden_dims=None, split_dim=None, dim=-1, **kwargs):
"""
A helper function to create an
:class:`~pyro.distributions.transforms.AffineCoupling` object that takes care of
constructing a dense network with the correct input/output dimensions.
:param input_dim: Dimension(s) of input variable to permute. Note that when
`dim < -1` this must be a tuple corresponding to the event shape.
:type input_dim: int
:param hidden_dims: The desired hidden dimensions of the dense network. Defaults
to using [10*input_dim]
:type hidden_dims: list[int]
:param split_dim: The dimension to split the input on for the coupling
transform. Defaults to using input_dim // 2
:type split_dim: int
:param dim: the tensor dimension on which to split. This value must be negative
and defines the event dim as `abs(dim)`.
:type dim: int
:param log_scale_min_clip: The minimum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_min_clip: float
:param log_scale_max_clip: The maximum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_max_clip: float
"""
if not isinstance(input_dim, int):
if len(input_dim) != -dim:
raise ValueError('event shape {} must have same length as event_dim {}'.format(input_dim, -dim))
event_shape = input_dim
extra_dims = reduce(operator.mul, event_shape[(dim + 1):], 1)
else:
event_shape = [input_dim]
extra_dims = 1
event_shape = list(event_shape)
if split_dim is None:
split_dim = event_shape[dim] // 2
if hidden_dims is None:
hidden_dims = [10 * event_shape[dim] * extra_dims]
hypernet = DenseNN(split_dim * extra_dims,
hidden_dims,
[(event_shape[dim] - split_dim) * extra_dims,
(event_shape[dim] - split_dim) * extra_dims])
return AffineCoupling(split_dim, hypernet, dim=dim, **kwargs)
def conditional_affine_coupling(input_dim, context_dim, hidden_dims=None, split_dim=None, dim=-1, **kwargs):
"""
A helper function to create an
:class:`~pyro.distributions.transforms.ConditionalAffineCoupling` object that
takes care of constructing a dense network with the correct input/output
dimensions.
:param input_dim: Dimension of input variable
:type input_dim: int
:param context_dim: Dimension of context variable
:type context_dim: int
:param hidden_dims: The desired hidden dimensions of the dense network. Defaults
to using [10*input_dim]
:type hidden_dims: list[int]
:param split_dim: The dimension to split the input on for the coupling
transform. Defaults to using input_dim // 2
:type split_dim: int
:param dim: the tensor dimension on which to split. This value must be negative
and defines the event dim as `abs(dim)`.
:type dim: int
:param log_scale_min_clip: The minimum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_min_clip: float
:param log_scale_max_clip: The maximum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_max_clip: float
"""
if not isinstance(input_dim, int):
if len(input_dim) != -dim:
raise ValueError('event shape {} must have same length as event_dim {}'.format(input_dim, -dim))
event_shape = input_dim
extra_dims = reduce(operator.mul, event_shape[(dim + 1):], 1)
else:
event_shape = [input_dim]
extra_dims = 1
event_shape = list(event_shape)
if split_dim is None:
split_dim = event_shape[dim] // 2
if hidden_dims is None:
hidden_dims = [10 * event_shape[dim] * extra_dims]
nn = ConditionalDenseNN(split_dim * extra_dims, context_dim, hidden_dims,
[(event_shape[dim] - split_dim) * extra_dims, (event_shape[dim] - split_dim) * extra_dims])
return ConditionalAffineCoupling(split_dim, nn, dim=dim, **kwargs)
| [
"torch.distributions.utils._sum_rightmost",
"torch.cat",
"torch.exp"
] | 1.5.0 | ashishfarmer/pyro | 54d48627a7c5c0575c2fe69d5b6c80f3c47b287b |
1.2 | import torch
from .functional import auxiliary_classification_loss
from .loss import DiscriminatorLoss, GeneratorLoss
__all__ = [
"AuxiliaryClassifierGeneratorLoss",
"AuxiliaryClassifierDiscriminatorLoss",
]
class AuxiliaryClassifierGeneratorLoss(GeneratorLoss):
r"""Auxiliary Classifier GAN (ACGAN) loss based on a from
`"Conditional Image Synthesis With Auxiliary Classifier GANs
by Odena et. al. " <https://arxiv.org/abs/1610.09585>`_ paper
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): A function is passed to this argument,
if the default ``train_ops`` is not to be used.
"""
def forward(self, logits, labels):
return auxiliary_classification_loss(logits, labels, self.reduction)
def train_ops(
self,
generator,
discriminator,
optimizer_generator,
device,
batch_size,
labels=None,
):
r"""Defines the standard ``train_ops`` used by the Auxiliary Classifier generator loss.
The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops
is as follows (label_g and label_d both could be either real labels or generated labels):
1. :math:`fake = generator(noise, label_g)`
2. :math:`value_1 = classifier(fake, label_g)`
3. :math:`value_2 = classifier(real, label_d)`
4. :math:`loss = loss\_function(value_1, label_g) + loss\_function(value_2, label_d)`
5. Backpropagate by computing :math:`\nabla loss`
6. Run a step of the optimizer for discriminator
Args:
generator (torchgan.models.Generator): The model to be optimized. For ACGAN, it must require
labels for training
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``discriminator``.
real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(
generator,
discriminator,
optimizer_generator,
device,
batch_size,
labels,
)
if generator.label_type == "required" and labels is None:
raise Exception("GAN model requires label for training")
noise = torch.randn(batch_size, generator.encoding_dims, device=device)
optimizer_generator.zero_grad()
if generator.label_type == "none":
raise Exception(
"Incorrect Model: ACGAN generator must require labels"
)
if generator.label_type == "required":
fake = generator(noise, labels)
elif generator.label_type == "generated":
label_gen = torch.randint(
0, generator.num_classes, (batch_size,), device=device
)
fake = generator(noise, label_gen)
cgz = discriminator(fake, mode="classifier")
if generator.label_type == "required":
loss = self.forward(cgz, labels)
else:
label_gen = label_gen.type(torch.LongTensor).to(device)
loss = self.forward(cgz, label_gen)
loss.backward()
optimizer_generator.step()
return loss.item()
class AuxiliaryClassifierDiscriminatorLoss(DiscriminatorLoss):
r"""Auxiliary Classifier GAN (ACGAN) loss based on a from
`"Conditional Image Synthesis With Auxiliary Classifier GANs
by Odena et. al. " <https://arxiv.org/abs/1610.09585>`_ paper
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): A function is passed to this argument,
if the default ``train_ops`` is not to be used.
"""
def forward(self, logits, labels):
return auxiliary_classification_loss(logits, labels, self.reduction)
def train_ops(
self,
generator,
discriminator,
optimizer_discriminator,
real_inputs,
device,
labels=None,
):
r"""Defines the standard ``train_ops`` used by the Auxiliary Classifier discriminator loss.
The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops
is as follows (label_g and label_d both could be either real labels or generated labels):
1. :math:`fake = generator(noise, label_g)`
2. :math:`value_1 = classifier(fake, label_g)`
3. :math:`value_2 = classifier(real, label_d)`
4. :math:`loss = loss\_function(value_1, label_g) + loss\_function(value_2, label_d)`
5. Backpropagate by computing :math:`\nabla loss`
6. Run a step of the optimizer for discriminator
Args:
generator (torchgan.models.Generator): The model to be optimized. For ACGAN, it must require labels
for training
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``discriminator``.
real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(
generator,
discriminator,
optimizer_discriminator,
real_inputs,
device,
labels,
)
if labels is None:
raise Exception("ACGAN Discriminator requires labels for training")
if generator.label_type == "none":
raise Exception(
"Incorrect Model: ACGAN generator must require labels for training"
)
batch_size = real_inputs.size(0)
noise = torch.randn(batch_size, generator.encoding_dims, device=device)
optimizer_discriminator.zero_grad()
cx = discriminator(real_inputs, mode="classifier")
if generator.label_type == "required":
fake = generator(noise, labels)
elif generator.label_type == "generated":
label_gen = torch.randint(
0, generator.num_classes, (batch_size,), device=device
)
fake = generator(noise, label_gen)
cgz = discriminator(fake, mode="classifier")
if generator.label_type == "required":
loss = self.forward(cgz, labels) + self.forward(cx, labels)
else:
label_gen = label_gen.type(torch.LongTensor).to(device)
loss = self.forward(cgz, label_gen) + self.forward(cx, labels)
loss.backward()
optimizer_discriminator.step()
return loss.item()
| [
"torch.randint",
"torch.randn"
] | 1.2 | torchgan/torchgan | cfd5da4b7ffcec544c6cc4a22257edf40fd31f9d |
1.6 | import transformers
from transformers.models.auto.configuration_auto import AutoConfig
from transformers import AutoModel, AutoModelForSequenceClassification
from transformers import AutoTokenizer
from src.dataset.wic_dataset import *
from transformers import AutoTokenizer
from src.models.modeling import BaseEncoderModel, TransformerWrapper
from src.modules.model_compression import prune_huggingface, prune_rewire
from src.modules.modules import *
from src.utils.metrics import AccuracyMeter, AverageMeter, EmbeddingSimilarityMeter
from src.dataset.sts_dataset import StsDataset
from src.dataset.dataset import SmartParaphraseDataloader
import argparse
from src.dataset.parallel_dataset import *
from src.configurations import config
import torch
from torch.cuda import amp
from tqdm import tqdm
def eval(args, model, eval_dataloader):
nb_eval_steps = 0
preds = None
eval_dataloader = tqdm(eval_dataloader, desc="Computing Head Importance...")
tot_tokens = 0.0
accuracy = AccuracyMeter()
model.to(args.device)
model.eval()
for batch in eval_dataloader:
if isinstance(batch, dict):
for key in batch:
el = batch[key]
if isinstance(el, torch.Tensor):
batch[key] = el.to(args.device)
else:
batch.to(args.device)
if isinstance(model, BaseEncoderModel):
if args.mixed_precision:
with amp.autocast():
outputs = model(features=batch)
else:
outputs = model(features=batch)
tmp_eval_loss = outputs.loss
logits = outputs.predictions
labels = batch.labels
else:
if not isinstance(batch, dict):
feats = batch.to_dict()
labels = batch.labels
else:
feats = batch
labels = batch["labels"]
if args.mixed_precision:
with amp.autocast():
outputs = model(**feats, labels=batch.labels)
else:
outputs = model(**feats, labels=batch.labels)
tmp_eval_loss = outputs[0]
logits = outputs[1]
preds = logits.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
accuracy.update(preds, labels, n=args.batch_size)
eval_dataloader.set_postfix({"accuracy": "{:.2f}".format(accuracy.avg)})
nb_eval_steps += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ep', type=int, dest="epochs", default=1)
parser.add_argument('--name', type=str, dest="config_name")
parser.add_argument('--bs', type=int, dest="batch_size", default=16)
parser.add_argument('--fp16', type=bool, dest="mixed_precision", default=True)
parser.add_argument('--embed_dim', type=int, dest="embed_dim", default=768)
parser.add_argument('--seq_len', type=int, dest="seq_len", default=128)
parser.add_argument('--device', type=str, dest="device", default="cuda")
parser.add_argument('--model', type=str, dest="model", default="distilbert-base-multilingual-cased")
parser.add_argument('--pretrained', type=str, dest="pretrained_model_path", default="../training/trained_models/distilbert-multi-seq-class-nikkei")
parser.add_argument('--target_num_heads', type=int, dest="target_num_heads", default=6)
parser.add_argument('--target_ffn_dim', type=int, dest="target_ffn_dim", default=1536)
parser.add_argument('--output_dir', dest="output_dir", type=str, default="./output")
parser.add_argument('--normalize', type=bool, dest="normalize_layers", default=False)
parser.add_argument(
"--masking_threshold",
default=0.97,
type=float,
help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).",
)
parser.add_argument(
"--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step."
)
parser.add_argument(
"--dont_normalize_importance_by_layer",
dest = "dont_normalize_importance_by_layer",
action="store_true",
help="don't normalize importance score by layers",
default=False)
parser.add_argument(
"--dont_normalize_global_importance",
dest = "dont_normalize_global_importance",
action="store_true",
help="don't normalize importance score by layers",
default=False)
parser.add_argument("--use_huggingface", type=bool, dest="use_huggingface", default=False)
args = parser.parse_args()
random.seed(43)
tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)
dataset = utils.load_file("../dataset/cached/nikkei_dataset")
train_split, valid_split = dataset.split_dataset(test_perc=0.1)
#train_dataset = Dataset(train_split)
valid_dataset = Dataset(valid_split)
LABELS_TO_ID = dataset.label_to_id
model_config = config.SenseModelParameters(
model_name = args.config_name,
hidden_size = args.embed_dim,
num_classes = len(LABELS_TO_ID),
freeze_weights = False,
context_layers = (-1,)
)
configuration = config.Configuration(
model_parameters=model_config,
model = args.model,
save_path = args.output_dir,
sequence_max_len = args.seq_len,
batch_size = args.batch_size,
epochs = args.epochs,
device = torch.device(args.device),
tokenizer = tokenizer,
)
valid_data_loader = SmartParaphraseDataloader.build_batches(valid_dataset, 16, mode="sequence", config=configuration)
autoconfig = AutoConfig.from_pretrained(args.pretrained_model_path, output_attentions=True,)
autoconfig.num_labels = len(LABELS_TO_ID)
model = AutoModelForSequenceClassification.from_pretrained(args.pretrained_model_path, config=autoconfig)
"""
model = TransformerWrapper.load_pretrained(
args.pretrained_model_path,
params=configuration,
pooler = BertPoolingStrategy(configuration),
loss = SoftmaxLoss(configuration))
model_config = config.ModelParameters(
model_name = args.config_name,
hidden_size = args.embed_dim,
num_classes=3,
freeze_weights = False,
context_layers = (-1,)
)
configuration = config.ParallelConfiguration(
model_parameters=model_config,
model = args.model,
sequence_max_len=args.seq_len,
save_path = args.output_dir,
batch_size = args.batch_size,
epochs = args.epochs,
device = torch.device(args.device),
tokenizer = tokenizer,
)
"""
"""
valid_dataset = EntailmentDataset.build_dataset('../data/nli/AllNLI.tsv', mode="test")
print()
print(f"########## Number of examples {len(valid_dataset)} ##################")
print()
dataloader = SmartParaphraseDataloader.build_batches(valid_dataset, args.batch_size, mode="standard", config=configuration, sentence_pairs=False)
sentence_model = SentenceTransformerWrapper.load_pretrained(
path=args.model,
params=configuration,
merge_strategy=SentenceBertCombineStrategy(),
loss = SoftmaxLoss(params=configuration)
)
"""
if args.use_huggingface:
metrics = {"validation": AccuracyMeter}
prune_huggingface(args, model, valid_data_loader)
else:
model = prune_rewire(args, model, valid_data_loader, tokenizer, is_distilbert=True)
print(f"Evaluating Pruned Model...")
eval(args, model, valid_data_loader)
| [
"torch.device",
"torch.cuda.amp.autocast"
] | 1.6.0 | cr1m5onk1ng/text_similarity | 2123621bf153683b35e9433835237812605bd42f |
1.7 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Original Author: Wei Yang
"""
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.block1)
feat_m.append(self.block2)
feat_m.append(self.block3)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
out = self.conv1(x)
f0 = out
out = self.block1(out)
f1 = out
out = self.block2(out)
f2 = out
out = self.block3(out)
f3 = out
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
f4 = out
out = self.fc(out)
if is_feat:
if preact:
f1 = self.block2.layer[0].bn1(f1)
f2 = self.block3.layer[0].bn1(f2)
f3 = self.bn1(f3)
return [f0, f1, f2, f3, f4], out
else:
return out
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
def wrn_40_2(**kwargs):
model = WideResNet(depth=40, widen_factor=2, **kwargs)
return model
def wrn_40_1(**kwargs):
model = WideResNet(depth=40, widen_factor=1, **kwargs)
return model
def wrn_16_2(**kwargs):
model = WideResNet(depth=16, widen_factor=2, **kwargs)
return model
def wrn_16_1(**kwargs):
model = WideResNet(depth=16, widen_factor=1, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 32, 32)
net = wrn_40_2(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning') | [
"torch.nn.Linear",
"torch.nn.functional.avg_pool2d",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.functional.dropout",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.randn"
] | 1.7.0 | hanseungwook/SimSiam | ff363f2cfdee07ecfee6c25ae3e920fdb9302e57 |
1.0 | #!/usr/bin/env python3
import torch
def inv_softplus(x):
return torch.log(torch.exp(x) - 1)
def inv_sigmoid(x):
return torch.log(x / (1 - x))
def _get_inv_param_transform(param_transform, inv_param_transform=None):
reg_inv_tf = TRANSFORM_REGISTRY.get(param_transform, None)
if reg_inv_tf is None:
if inv_param_transform is None:
raise RuntimeError("Must specify inv_param_transform for custom param_transforms")
return inv_param_transform
elif inv_param_transform is not None and reg_inv_tf != inv_param_transform:
raise RuntimeError("TODO")
return reg_inv_tf
TRANSFORM_REGISTRY = {
torch.exp: torch.log,
torch.nn.functional.softplus: inv_softplus,
torch.nn.functional.sigmoid: inv_sigmoid,
}
| [
"torch.log",
"torch.exp"
] | 1.0.0 | beyucel/gpytorch | a5394937495756945b831d83035349579d8fac31 |
1.0 | #!/usr/bin/env python3
import torch
import unittest
from gpytorch.lazy import BlockDiagLazyTensor, NonLazyTensor
from test.lazy._lazy_tensor_test_case import LazyTensorTestCase
class TestBlockDiagLazyTensor(LazyTensorTestCase, unittest.TestCase):
seed = 0
should_test_sample = True
def create_lazy_tensor(self):
blocks = torch.randn(8, 4, 4)
blocks = blocks.matmul(blocks.transpose(-1, -2))
blocks.add_(torch.eye(4, 4).unsqueeze_(0))
return BlockDiagLazyTensor(NonLazyTensor(blocks))
def evaluate_lazy_tensor(self, lazy_tensor):
blocks = lazy_tensor.base_lazy_tensor.tensor
actual = torch.zeros(32, 32)
for i in range(8):
actual[i * 4 : (i + 1) * 4, i * 4 : (i + 1) * 4] = blocks[i]
return actual
class TestBlockDiagLazyTensorBatch(LazyTensorTestCase, unittest.TestCase):
seed = 0
should_test_sample = True
def create_lazy_tensor(self):
blocks = torch.randn(2, 6, 4, 4)
blocks = blocks.matmul(blocks.transpose(-1, -2))
blocks.add_(torch.eye(4, 4))
return BlockDiagLazyTensor(NonLazyTensor(blocks), block_dim=2)
def evaluate_lazy_tensor(self, lazy_tensor):
blocks = lazy_tensor.base_lazy_tensor.tensor
actual = torch.zeros(2, 24, 24)
for i in range(2):
for j in range(6):
actual[i, j * 4 : (j + 1) * 4, j * 4 : (j + 1) * 4] = blocks[i, j]
return actual
class TestBlockDiagLazyTensorMultiBatch(LazyTensorTestCase, unittest.TestCase):
seed = 0
# Because these LTs are large, we'll skil the big tests
should_test_sample = False
skip_slq_tests = True
def create_lazy_tensor(self):
blocks = torch.randn(2, 6, 5, 4, 4)
blocks = blocks.matmul(blocks.transpose(-1, -2))
blocks.add_(torch.eye(4, 4))
blocks.detach_()
return BlockDiagLazyTensor(NonLazyTensor(blocks), block_dim=1)
def evaluate_lazy_tensor(self, lazy_tensor):
blocks = lazy_tensor.base_lazy_tensor.tensor
actual = torch.zeros(2, 5, 24, 24)
for i in range(2):
for j in range(6):
for k in range(5):
actual[i, k, j * 4 : (j + 1) * 4, j * 4 : (j + 1) * 4] = blocks[i, k, j]
return actual
if __name__ == "__main__":
unittest.main()
| [
"torch.zeros",
"torch.eye",
"torch.randn"
] | 1.0.0 | beyucel/gpytorch | a5394937495756945b831d83035349579d8fac31 |
1.3 | from torch.utils.tensorboard import SummaryWriter
def log(*args):
iteration, loss, accuracy = args
writer = SummaryWriter()
writer.add_scalar("Loss", loss, iteration)
writer.add_scalar("Accuracy", accuracy, iteration)
| [
"torch.utils.tensorboard.SummaryWriter"
] | 1.3.0 | Mrityunjay2668/ObjectDetection | d3582311e5cf563c4f2ba7fdd87d8f56b60cccb1 |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Sequence, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from monai.networks.blocks.segresnet_block import ResBlock, get_conv_layer, get_norm_layer, get_upsample_layer
from monai.networks.layers.factories import Act, Dropout
from monai.utils import UpsampleMode
class SegResNet(nn.Module):
"""
SegResNet based on `3D MRI brain tumor segmentation using autoencoder regularization
<https://arxiv.org/pdf/1810.11654.pdf>`_.
The module does not include the variational autoencoder (VAE).
The model supports 2D or 3D inputs.
Args:
spatial_dims: spatial dimension of the input data. Defaults to 3.
init_filters: number of output channels for initial convolution layer. Defaults to 8.
in_channels: number of input channels for the network. Defaults to 1.
out_channels: number of output channels for the network. Defaults to 2.
dropout_prob: probability of an element to be zero-ed. Defaults to ``None``.
norm_name: feature normalization type, this module only supports group norm,
batch norm and instance norm. Defaults to ``group``.
num_groups: number of groups to separate the channels into. Defaults to 8.
use_conv_final: if add a final convolution block to output. Defaults to ``True``.
blocks_down: number of down sample blocks in each layer. Defaults to ``[1,2,2,4]``.
blocks_up: number of up sample blocks in each layer. Defaults to ``[1,1,1]``.
upsample_mode: [``"transpose"``, ``"nontrainable"``, ``"pixelshuffle"``]
The mode of upsampling manipulations.
Using the ``nontrainable`` modes cannot guarantee the model's reproducibility. Defaults to``nontrainable``.
- ``transpose``, uses transposed convolution layers.
- ``nontrainable``, uses non-trainable `linear` interpolation.
- ``pixelshuffle``, uses :py:class:`monai.networks.blocks.SubpixelUpsample`.
"""
def __init__(
self,
spatial_dims: int = 3,
init_filters: int = 8,
in_channels: int = 1,
out_channels: int = 2,
dropout_prob: Optional[float] = None,
norm_name: str = "group",
num_groups: int = 8,
use_conv_final: bool = True,
blocks_down: tuple = (1, 2, 2, 4),
blocks_up: tuple = (1, 1, 1),
upsample_mode: Union[UpsampleMode, str] = UpsampleMode.NONTRAINABLE,
):
super().__init__()
assert spatial_dims == 2 or spatial_dims == 3, "spatial_dims can only be 2 or 3."
self.spatial_dims = spatial_dims
self.init_filters = init_filters
self.blocks_down = blocks_down
self.blocks_up = blocks_up
self.dropout_prob = dropout_prob
self.norm_name = norm_name
self.num_groups = num_groups
self.upsample_mode = UpsampleMode(upsample_mode)
self.use_conv_final = use_conv_final
self.convInit = get_conv_layer(spatial_dims, in_channels, init_filters)
self.down_layers = self._make_down_layers()
self.up_layers, self.up_samples = self._make_up_layers()
self.relu = Act[Act.RELU](inplace=True)
self.conv_final = self._make_final_conv(out_channels)
if dropout_prob is not None:
self.dropout = Dropout[Dropout.DROPOUT, spatial_dims](dropout_prob)
def _make_down_layers(self):
down_layers = nn.ModuleList()
blocks_down, spatial_dims, filters, norm_name, num_groups = (
self.blocks_down,
self.spatial_dims,
self.init_filters,
self.norm_name,
self.num_groups,
)
for i in range(len(blocks_down)):
layer_in_channels = filters * 2 ** i
pre_conv = (
get_conv_layer(spatial_dims, layer_in_channels // 2, layer_in_channels, stride=2)
if i > 0
else nn.Identity()
)
down_layer = nn.Sequential(
pre_conv,
*[
ResBlock(spatial_dims, layer_in_channels, norm_name=norm_name, num_groups=num_groups)
for _ in range(blocks_down[i])
],
)
down_layers.append(down_layer)
return down_layers
def _make_up_layers(self):
up_layers, up_samples = nn.ModuleList(), nn.ModuleList()
upsample_mode, blocks_up, spatial_dims, filters, norm_name, num_groups = (
self.upsample_mode,
self.blocks_up,
self.spatial_dims,
self.init_filters,
self.norm_name,
self.num_groups,
)
n_up = len(blocks_up)
for i in range(n_up):
sample_in_channels = filters * 2 ** (n_up - i)
up_layers.append(
nn.Sequential(
*[
ResBlock(spatial_dims, sample_in_channels // 2, norm_name=norm_name, num_groups=num_groups)
for _ in range(blocks_up[i])
]
)
)
up_samples.append(
nn.Sequential(
*[
get_conv_layer(spatial_dims, sample_in_channels, sample_in_channels // 2, kernel_size=1),
get_upsample_layer(spatial_dims, sample_in_channels // 2, upsample_mode=upsample_mode),
]
)
)
return up_layers, up_samples
def _make_final_conv(self, out_channels: int):
return nn.Sequential(
get_norm_layer(self.spatial_dims, self.init_filters, norm_name=self.norm_name, num_groups=self.num_groups),
self.relu,
get_conv_layer(self.spatial_dims, self.init_filters, out_channels=out_channels, kernel_size=1, bias=True),
)
def forward(self, x):
x = self.convInit(x)
if self.dropout_prob is not None:
x = self.dropout(x)
down_x = []
for down in self.down_layers:
x = down(x)
down_x.append(x)
down_x.reverse()
for i, (up, upl) in enumerate(zip(self.up_samples, self.up_layers)):
x = up(x) + down_x[i + 1]
x = upl(x)
if self.use_conv_final:
x = self.conv_final(x)
return x
class SegResNetVAE(SegResNet):
"""
SegResNetVAE based on `3D MRI brain tumor segmentation using autoencoder regularization
<https://arxiv.org/pdf/1810.11654.pdf>`_.
The module contains the variational autoencoder (VAE).
The model supports 2D or 3D inputs.
Args:
spatial_dims: spatial dimension of the input data. Defaults to 3.
init_filters: number of output channels for initial convolution layer. Defaults to 8.
in_channels: number of input channels for the network. Defaults to 1.
out_channels: number of output channels for the network. Defaults to 2.
dropout_prob: probability of an element to be zero-ed. Defaults to ``None``.
norm_name: feature normalization type, this module only supports group norm,
batch norm and instance norm. Defaults to ``group``.
num_groups: number of groups to separate the channels into. Defaults to 8.
use_conv_final: if add a final convolution block to output. Defaults to ``True``.
blocks_down: number of down sample blocks in each layer. Defaults to ``[1,2,2,4]``.
blocks_up: number of up sample blocks in each layer. Defaults to ``[1,1,1]``.
upsample_mode: [``"transpose"``, ``"nontrainable"``, ``"pixelshuffle"``]
The mode of upsampling manipulations.
Using the ``nontrainable`` modes cannot guarantee the model's reproducibility. Defaults to `nontrainable`.
- ``transpose``, uses transposed convolution layers.
- ``nontrainable``, uses non-trainable `linear` interpolation.
- ``pixelshuffle``, uses :py:class:`monai.networks.blocks.SubpixelUpsample`.
use_vae: if use the variational autoencoder (VAE) during training. Defaults to ``False``.
input_image_size: the size of images to input into the network. It is used to
determine the in_features of the fc layer in VAE. When ``use_vae == True``, please
ensure that this parameter is set. Defaults to ``None``.
vae_estimate_std: whether to estimate the standard deviations in VAE. Defaults to ``False``.
vae_default_std: if not to estimate the std, use the default value. Defaults to 0.3.
vae_nz: number of latent variables in VAE. Defaults to 256.
Where, 128 to represent mean, and 128 to represent std.
"""
def __init__(
self,
input_image_size: Sequence[int],
vae_estimate_std: bool = False,
vae_default_std: float = 0.3,
vae_nz: int = 256,
spatial_dims: int = 3,
init_filters: int = 8,
in_channels: int = 1,
out_channels: int = 2,
dropout_prob: Optional[float] = None,
norm_name: str = "group",
num_groups: int = 8,
use_conv_final: bool = True,
blocks_down: tuple = (1, 2, 2, 4),
blocks_up: tuple = (1, 1, 1),
upsample_mode: Union[UpsampleMode, str] = "nontrainable",
):
super(SegResNetVAE, self).__init__(
spatial_dims=spatial_dims,
init_filters=init_filters,
in_channels=in_channels,
out_channels=out_channels,
dropout_prob=dropout_prob,
norm_name=norm_name,
num_groups=num_groups,
use_conv_final=use_conv_final,
blocks_down=blocks_down,
blocks_up=blocks_up,
upsample_mode=upsample_mode,
)
self.input_image_size = input_image_size
self.smallest_filters = 16
zoom = 2 ** (len(self.blocks_down) - 1)
self.fc_insize = [s // (2 * zoom) for s in self.input_image_size]
self.vae_estimate_std = vae_estimate_std
self.vae_default_std = vae_default_std
self.vae_nz = vae_nz
self._prepare_vae_modules()
self.vae_conv_final = self._make_final_conv(in_channels)
def _prepare_vae_modules(self):
zoom = 2 ** (len(self.blocks_down) - 1)
v_filters = self.init_filters * zoom
total_elements = int(self.smallest_filters * np.prod(self.fc_insize))
self.vae_down = nn.Sequential(
get_norm_layer(self.spatial_dims, v_filters, norm_name=self.norm_name, num_groups=self.num_groups),
self.relu,
get_conv_layer(self.spatial_dims, v_filters, self.smallest_filters, stride=2, bias=True),
get_norm_layer(
self.spatial_dims, self.smallest_filters, norm_name=self.norm_name, num_groups=self.num_groups
),
self.relu,
)
self.vae_fc1 = nn.Linear(total_elements, self.vae_nz)
self.vae_fc2 = nn.Linear(total_elements, self.vae_nz)
self.vae_fc3 = nn.Linear(self.vae_nz, total_elements)
self.vae_fc_up_sample = nn.Sequential(
get_conv_layer(self.spatial_dims, self.smallest_filters, v_filters, kernel_size=1),
get_upsample_layer(self.spatial_dims, v_filters, upsample_mode=self.upsample_mode),
get_norm_layer(self.spatial_dims, v_filters, norm_name=self.norm_name, num_groups=self.num_groups),
self.relu,
)
def _get_vae_loss(self, net_input: torch.Tensor, vae_input: torch.Tensor):
"""
Args:
net_input: the original input of the network.
vae_input: the input of VAE module, which is also the output of the network's encoder.
"""
x_vae = self.vae_down(vae_input)
x_vae = x_vae.view(-1, self.vae_fc1.in_features)
z_mean = self.vae_fc1(x_vae)
z_mean_rand = torch.randn_like(z_mean)
z_mean_rand.requires_grad_(False)
if self.vae_estimate_std:
z_sigma = self.vae_fc2(x_vae)
z_sigma = F.softplus(z_sigma)
vae_reg_loss = 0.5 * torch.mean(z_mean ** 2 + z_sigma ** 2 - torch.log(1e-8 + z_sigma ** 2) - 1)
x_vae = z_mean + z_sigma * z_mean_rand
else:
z_sigma = self.vae_default_std
vae_reg_loss = torch.mean(z_mean ** 2)
x_vae = z_mean + z_sigma * z_mean_rand
x_vae = self.vae_fc3(x_vae)
x_vae = self.relu(x_vae)
x_vae = x_vae.view([-1, self.smallest_filters] + self.fc_insize)
x_vae = self.vae_fc_up_sample(x_vae)
for up, upl in zip(self.up_samples, self.up_layers):
x_vae = up(x_vae)
x_vae = upl(x_vae)
x_vae = self.vae_conv_final(x_vae)
vae_mse_loss = F.mse_loss(net_input, x_vae)
vae_loss = vae_reg_loss + vae_mse_loss
return vae_loss
def forward(self, x):
net_input = x
x = self.convInit(x)
if self.dropout_prob is not None:
x = self.dropout(x)
down_x = []
for down in self.down_layers:
x = down(x)
down_x.append(x)
down_x.reverse()
vae_input = x
for i, (up, upl) in enumerate(zip(self.up_samples, self.up_layers)):
x = up(x) + down_x[i + 1]
x = upl(x)
if self.use_conv_final:
x = self.conv_final(x)
if self.training:
vae_loss = self._get_vae_loss(net_input, vae_input)
return x, vae_loss
return x, None
| [
"torch.nn.Linear",
"torch.nn.Identity",
"torch.nn.functional.softplus",
"torch.nn.ModuleList",
"torch.nn.functional.mse_loss",
"torch.randn_like",
"torch.log",
"torch.mean"
] | 1.5 | JoHof/MONAI | 70483b648fba92f0a8346e53dc14d686e56120a3 |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from monai.networks.blocks.convolutions import Convolution
from monai.networks.layers.factories import Act, Norm, split_args
class UnetResBlock(nn.Module):
"""
A skip-connection based module that can be used for DynUNet, based on:
`Automated Design of Deep Learning Methods for Biomedical Image Segmentation <https://arxiv.org/abs/1904.08128>`_.
`nnU-Net: Self-adapting Framework for U-Net-Based Medical Image Segmentation <https://arxiv.org/abs/1809.10486>`_.
Args:
spatial_dims: number of spatial dimensions.
in_channels: number of input channels.
out_channels: number of output channels.
kernel_size: convolution kernel size.
stride: convolution stride.
norm_name: [``"batch"``, ``"instance"``, ``"group"``]
feature normalization type and arguments. In this module, if using ``"group"``,
`in_channels` should be divisible by 16 (default value for ``num_groups``).
"""
def __init__(
self,
spatial_dims: int,
in_channels: int,
out_channels: int,
kernel_size: Union[Sequence[int], int],
stride: Union[Sequence[int], int],
norm_name: str,
):
super(UnetResBlock, self).__init__()
self.conv1 = get_conv_layer(
spatial_dims,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
conv_only=True,
)
self.conv2 = get_conv_layer(
spatial_dims,
out_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
conv_only=True,
)
self.conv3 = get_conv_layer(
spatial_dims,
in_channels,
out_channels,
kernel_size=1,
stride=stride,
conv_only=True,
)
self.lrelu = get_acti_layer(("leakyrelu", {"inplace": True, "negative_slope": 0.01}))
self.norm1 = get_norm_layer(spatial_dims, out_channels, norm_name)
self.norm2 = get_norm_layer(spatial_dims, out_channels, norm_name)
self.norm3 = get_norm_layer(spatial_dims, out_channels, norm_name)
self.downsample = in_channels != out_channels
stride_np = np.atleast_1d(stride)
if not np.all(stride_np == 1):
self.downsample = True
def forward(self, inp):
residual = inp
out = self.conv1(inp)
out = self.norm1(out)
out = self.lrelu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample:
residual = self.conv3(residual)
residual = self.norm3(residual)
out += residual
out = self.lrelu(out)
return out
class UnetBasicBlock(nn.Module):
"""
A CNN module module that can be used for DynUNet, based on:
`Automated Design of Deep Learning Methods for Biomedical Image Segmentation <https://arxiv.org/abs/1904.08128>`_.
`nnU-Net: Self-adapting Framework for U-Net-Based Medical Image Segmentation <https://arxiv.org/abs/1809.10486>`_.
Args:
spatial_dims: number of spatial dimensions.
in_channels: number of input channels.
out_channels: number of output channels.
kernel_size: convolution kernel size.
stride: convolution stride.
norm_name: [``"batch"``, ``"instance"``, ``"group"``]
feature normalization type and arguments. In this module, if using ``"group"``,
`in_channels` should be divisible by 16 (default value for ``num_groups``).
"""
def __init__(
self,
spatial_dims: int,
in_channels: int,
out_channels: int,
kernel_size: Union[Sequence[int], int],
stride: Union[Sequence[int], int],
norm_name: str,
):
super(UnetBasicBlock, self).__init__()
self.conv1 = get_conv_layer(
spatial_dims,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
conv_only=True,
)
self.conv2 = get_conv_layer(
spatial_dims,
out_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
conv_only=True,
)
self.lrelu = get_acti_layer(("leakyrelu", {"inplace": True, "negative_slope": 0.01}))
self.norm1 = get_norm_layer(spatial_dims, out_channels, norm_name)
self.norm2 = get_norm_layer(spatial_dims, out_channels, norm_name)
def forward(self, inp):
out = self.conv1(inp)
out = self.norm1(out)
out = self.lrelu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.lrelu(out)
return out
class UnetUpBlock(nn.Module):
"""
An upsampling module that can be used for DynUNet, based on:
`Automated Design of Deep Learning Methods for Biomedical Image Segmentation <https://arxiv.org/abs/1904.08128>`_.
`nnU-Net: Self-adapting Framework for U-Net-Based Medical Image Segmentation <https://arxiv.org/abs/1809.10486>`_.
Args:
spatial_dims: number of spatial dimensions.
in_channels: number of input channels.
out_channels: number of output channels.
kernel_size: convolution kernel size.
stride: convolution stride.
upsample_kernel_size: convolution kernel size for transposed convolution layers.
norm_name: [``"batch"``, ``"instance"``, ``"group"``]
feature normalization type and arguments. In this module, if using ``"group"``,
`in_channels` should be divisible by 16 (default value for ``num_groups``).
"""
def __init__(
self,
spatial_dims: int,
in_channels: int,
out_channels: int,
kernel_size: Union[Sequence[int], int],
stride: Union[Sequence[int], int],
upsample_kernel_size: Union[Sequence[int], int],
norm_name: str,
):
super(UnetUpBlock, self).__init__()
upsample_stride = upsample_kernel_size
self.transp_conv = get_conv_layer(
spatial_dims,
in_channels,
out_channels,
kernel_size=upsample_kernel_size,
stride=upsample_stride,
conv_only=True,
is_transposed=True,
)
self.conv_block = UnetBasicBlock(
spatial_dims,
out_channels + out_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
norm_name=norm_name,
)
def forward(self, inp, skip):
# number of channels for skip should equals to out_channels
out = self.transp_conv(inp)
out = torch.cat((out, skip), dim=1)
out = self.conv_block(out)
return out
class UnetOutBlock(nn.Module):
def __init__(self, spatial_dims: int, in_channels: int, out_channels: int):
super(UnetOutBlock, self).__init__()
self.conv = get_conv_layer(
spatial_dims, in_channels, out_channels, kernel_size=1, stride=1, bias=True, conv_only=True
)
def forward(self, inp):
out = self.conv(inp)
return out
def get_acti_layer(act: Union[Tuple[str, Dict], str]):
act_name, act_args = split_args(act)
act_type = Act[act_name]
return act_type(**act_args)
def get_norm_layer(spatial_dims: int, out_channels: int, norm_name: str, num_groups: int = 16):
if norm_name not in ["batch", "instance", "group"]:
raise ValueError(f"Unsupported normalization mode: {norm_name}")
else:
if norm_name == "group":
assert out_channels % num_groups == 0, "out_channels should be divisible by num_groups."
norm = Norm[norm_name](num_groups=num_groups, num_channels=out_channels, affine=True)
else:
norm = Norm[norm_name, spatial_dims](out_channels, affine=True)
return norm
def get_conv_layer(
spatial_dims: int,
in_channels: int,
out_channels: int,
kernel_size: Union[Sequence[int], int] = 3,
stride: Union[Sequence[int], int] = 1,
act: Optional[Union[Tuple, str]] = Act.PRELU,
norm: Union[Tuple, str] = Norm.INSTANCE,
bias: bool = False,
conv_only: bool = True,
is_transposed: bool = False,
):
padding = get_padding(kernel_size, stride)
output_padding = None
if is_transposed:
output_padding = get_output_padding(kernel_size, stride, padding)
return Convolution(
spatial_dims,
in_channels,
out_channels,
strides=stride,
kernel_size=kernel_size,
act=act,
norm=norm,
bias=bias,
conv_only=conv_only,
is_transposed=is_transposed,
padding=padding,
output_padding=output_padding,
)
def get_padding(
kernel_size: Union[Sequence[int], int],
stride: Union[Sequence[int], int],
) -> Union[Tuple[int, ...], int]:
kernel_size_np = np.atleast_1d(kernel_size)
stride_np = np.atleast_1d(stride)
padding_np = (kernel_size_np - stride_np + 1) / 2
error_msg = "padding value should not be negative, please change the kernel size and/or stride."
assert np.min(padding_np) >= 0, error_msg
padding = tuple(int(p) for p in padding_np)
return padding if len(padding) > 1 else padding[0]
def get_output_padding(
kernel_size: Union[Sequence[int], int],
stride: Union[Sequence[int], int],
padding: Union[Sequence[int], int],
) -> Union[Tuple[int, ...], int]:
kernel_size_np = np.atleast_1d(kernel_size)
stride_np = np.atleast_1d(stride)
padding_np = np.atleast_1d(padding)
out_padding_np = 2 * padding_np + stride_np - kernel_size_np
error_msg = "out_padding value should not be negative, please change the kernel size and/or stride."
assert np.min(out_padding_np) >= 0, error_msg
out_padding = tuple(int(p) for p in out_padding_np)
return out_padding if len(out_padding) > 1 else out_padding[0]
| [
"torch.cat"
] | 1.5 | JoHof/MONAI | 70483b648fba92f0a8346e53dc14d686e56120a3 |
0.4 | # coding: utf-8
import torch
from torch import nn
from torch.autograd import Variable
import numpy as np
from nnmnkwii.autograd import unit_variance_mlpg
class AbstractModel(object):
"""Interface for VC and TTS models
"""
def include_parameter_generation(self):
"""Whether model includes parameter generation or not.
"""
return False
class In2OutHighwayNet(AbstractModel, nn.Module):
"""Input-to-Output Highway Networks for voice conversion.
Trying to replicate the model described in the following paper:
https://www.jstage.jst.go.jp/article/transinf/E100.D/8/E100.D_2017EDL8034/
.. note::
Since model architecture itself includes parameter generation, we cannot
simply use the model for multi-stream features (e.g., in TTS, acoustic
features often consist multiple features; mgc, f0, vuv and bap.)
"""
def __init__(self, in_dim=118, out_dim=118, static_dim=118 // 2,
num_hidden=3, hidden_dim=512, dropout=0.5):
super(In2OutHighwayNet, self).__init__()
self.static_dim = static_dim
self.relu = nn.LeakyReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
# Transform gate (can be deep?)
self.T = nn.Linear(static_dim, static_dim)
# Hidden layers
in_sizes = [in_dim] + [hidden_dim] * (num_hidden - 1)
out_sizes = [hidden_dim] * num_hidden
self.H = nn.ModuleList(
[nn.Linear(in_size, out_size) for (in_size, out_size)
in zip(in_sizes, out_sizes)])
self.last_linear = nn.Linear(hidden_dim, out_dim)
self.dropout = nn.Dropout(dropout)
def include_parameter_generation(self):
return True
def forward(self, x, R, lengths=None):
# Add batch axis
x = x.unsqueeze(0) if x.dim() == 2 else x
x_static = x[:, :, :self.static_dim]
# T(x)
Tx = self.sigmoid(self.T(x_static))
# G(x)
for layer in self.H:
x = self.dropout(self.relu(layer(x)))
x = self.last_linear(x)
Gx = unit_variance_mlpg(R, x)
# y^ = x + T(x) * G(x)
return x, x_static + Tx * Gx
class In2OutRNNHighwayNet(AbstractModel, nn.Module):
def __init__(self, in_dim=118, out_dim=118, static_dim=118 // 2,
num_hidden=3, hidden_dim=512, bidirectional=False, dropout=0.5):
super(In2OutRNNHighwayNet, self).__init__()
self.static_dim = static_dim
self.num_direction = 2 if bidirectional else 1
self.relu = nn.LeakyReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
# Transform gate (can be deep?)
self.T = nn.Linear(static_dim, static_dim)
# Recurrent hidden layers
self.lstm = nn.LSTM(in_dim, hidden_dim, num_hidden, batch_first=True,
bidirectional=bidirectional, dropout=dropout)
self.hidden2out = nn.Linear(hidden_dim * self.num_direction, out_dim)
self.dropout = nn.Dropout(dropout)
def include_parameter_generation(self):
return True
def forward(self, x, R, lengths=None):
# Add batch axis
x = x.unsqueeze(0) if x.dim() == 2 else x
x_static = x[:, :, :self.static_dim]
# T(x)
Tx = self.sigmoid(self.T(x_static))
# Pack padded sequence for CuDNN
if isinstance(lengths, Variable):
lengths = lengths.data.cpu().long().numpy()
if lengths is not None:
inputs = nn.utils.rnn.pack_padded_sequence(
x, lengths, batch_first=True)
else:
inputs = x
# G(x)
output, _ = self.lstm(inputs)
if lengths is not None:
output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
output = self.hidden2out(output)
Gx = unit_variance_mlpg(R, output)
# y^ = x + T(x) * G(x)
return x, x_static + Tx * Gx
class MLP(AbstractModel, nn.Module):
def __init__(self, in_dim=118, out_dim=1, num_hidden=2, hidden_dim=256,
dropout=0.5, last_sigmoid=True, bidirectional=None):
# bidirectional is dummy
super(MLP, self).__init__()
in_sizes = [in_dim] + [hidden_dim] * (num_hidden - 1)
out_sizes = [hidden_dim] * num_hidden
self.layers = nn.ModuleList(
[nn.Linear(in_size, out_size) for (in_size, out_size)
in zip(in_sizes, out_sizes)])
self.last_linear = nn.Linear(hidden_dim, out_dim)
self.relu = nn.LeakyReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
self.dropout = nn.Dropout(dropout)
self.last_sigmoid = last_sigmoid
def forward(self, x, lengths=None):
for layer in self.layers:
x = self.dropout(self.relu(layer(x)))
x = self.last_linear(x)
return self.sigmoid(x) if self.last_sigmoid else x
# needs https://github.com/taolei87/sru
class SRURNN(AbstractModel, nn.Module):
def __init__(self, in_dim=118, out_dim=118, num_hidden=2, hidden_dim=256,
bidirectional=False, dropout=0, last_sigmoid=False,
use_relu=0, rnn_dropout=0.0):
super(SRURNN, self).__init__()
from cuda_functional import SRU
self.num_direction = 2 if bidirectional else 1
self.gru = SRU(in_dim, hidden_dim, num_hidden,
bidirectional=bidirectional, dropout=dropout,
use_relu=use_relu, rnn_dropout=rnn_dropout)
self.hidden2out = nn.Linear(hidden_dim * self.num_direction, out_dim)
self.sigmoid = nn.Sigmoid()
self.last_sigmoid = last_sigmoid
def forward(self, sequence, lengths):
# Batch first -> Time first
sequence = sequence.transpose(0, 1)
output, _ = self.gru(sequence)
# Time first -> Batch first
output = output.transpose(0, 1)
output = self.hidden2out(output)
return self.sigmoid(output) if self.last_sigmoid else output
class GRURNN(AbstractModel, nn.Module):
def __init__(self, in_dim=118, out_dim=118, num_hidden=2, hidden_dim=256,
bidirectional=False, dropout=0, last_sigmoid=False):
super(GRURNN, self).__init__()
self.num_direction = 2 if bidirectional else 1
self.gru = nn.LSTM(in_dim, hidden_dim, num_hidden, batch_first=True,
bidirectional=bidirectional, dropout=dropout)
self.hidden2out = nn.Linear(hidden_dim * self.num_direction, out_dim)
self.sigmoid = nn.Sigmoid()
self.last_sigmoid = last_sigmoid
def forward(self, sequence, lengths):
if isinstance(lengths, Variable):
lengths = lengths.data.cpu().long().numpy()
sequence = nn.utils.rnn.pack_padded_sequence(
sequence, lengths, batch_first=True)
output, _ = self.gru(sequence)
output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
output = self.hidden2out(output)
return self.sigmoid(output) if self.last_sigmoid else output
class LSTMRNN(AbstractModel, nn.Module):
def __init__(self, in_dim=118, out_dim=118, num_hidden=2, hidden_dim=256,
bidirectional=False, dropout=0, last_sigmoid=False):
super(LSTMRNN, self).__init__()
self.num_direction = 2 if bidirectional else 1
self.lstm = nn.LSTM(in_dim, hidden_dim, num_hidden, batch_first=True,
bidirectional=bidirectional, dropout=dropout)
self.hidden2out = nn.Linear(hidden_dim * self.num_direction, out_dim)
self.sigmoid = nn.Sigmoid()
self.last_sigmoid = last_sigmoid
def forward(self, sequence, lengths):
if isinstance(lengths, Variable):
lengths = lengths.data.cpu().long().numpy()
sequence = nn.utils.rnn.pack_padded_sequence(
sequence, lengths, batch_first=True)
output, _ = self.lstm(sequence)
output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
output = self.hidden2out(output)
return self.sigmoid(output) if self.last_sigmoid else output
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.nn.Sigmoid",
"torch.nn.LeakyReLU",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.utils.rnn.pack_padded_sequence"
] | 0.4.1 | karkirowle/gantts | f61d2b1ecb9493980338c9f598d74fc46120afe2 |
1.8 | import torch
import torch.nn as nn
#import torch.nn.functional as F
import math
class PositionalEmbedding(nn.Module):
'''
Encode position index to d_model dimension feature
'''
def __init__(self, d_model, max_len=5000):
super(PositionalEmbedding, self).__init__()
# buffer placeholder
pe = torch.zeros((max_len, d_model), requires_grad=False).float()
position = torch.arange(max_len).unsqueeze(-1).float() # (max_len, 1)
div_term = (torch.arange(0, d_model, 2) * (-math.log(10000.0)/d_model)).exp() # = 1/(10000)^(i/d_model)
pe[:, 0::2] = torch.sin(position * div_term) # (max_len, d_model/2), sth like sin(nx) where n is the position index
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0) # (1, max_len, d_model)
self.register_buffer('pe', pe) # add pe as the state of the class instead of parameter
def forward(self, x):
return self.pe[:, :x.size(1)] # (1, actual_len, d_model)
class TokenEmbedding(nn.Module):
'''
Use Conv1D to embed c_in dimension to d_model dimention feature.
Initialize weights using kaiming_normal()
'''
def __init__(self, c_in, d_model):
super(TokenEmbedding, self).__init__()
v1,v2 = torch.__version__.split('.')[0:2]
if (int(v1)==1 and int(v2)>=5) or int(v1)>1:
padding = 1
else:
padding = 2
self.tokenConv = nn.Conv1d(in_channels=c_in,
out_channels=d_model,
kernel_size=3,
padding=padding,
padding_mode='circular')
# initialize the weights
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight,
mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, x):
# x shape (B,L,D)
# permute and transpose are similar, transpose can only swith two dims
x = self.tokenConv(x.permute(0,2,1)).transpose(1,2) # permute to make D into channel dim for Conv
return x
class FixedEmbedding(nn.Module):
'''
Fix nn.Embedding weights which are initialized based on Positional Embedding
'''
def __init__(self, c_in, d_model):
super(FixedEmbedding, self).__init__()
w = torch.zeros((c_in, d_model), requires_grad=False).float()
# Positional Embedding
position = torch.arange(c_in).unsqueeze(-1).float()
div_term = (torch.arange(0, d_model, 2).float() * (-math.log(10000.0)/d_model)).exp()
w[:,0::2] = torch.sin(position * div_term)
w[:,1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model) # Embedding is just a lookup table
self.emb.weight = nn.Parameter(w, requires_grad=False) # Fixed embedding, no need to do back propagation
def forward(self, x):
return self.emb(x).detach() # detach to make the output a leave node since no back propagation required
class TemporalEmbedding(nn.Module):
'''
Encode temporal info based on FixedEmbedding or normal Embedding layer
Order of temporal info is [month, day, weekday, hour, minute(optional)]
'''
def __init__(self, d_model, embed_type='fixed', freq='h', minute_size=4):
# freq: h or t
super(TemporalEmbedding, self).__init__()
#minute_size = 4 # 15min interval
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
self.month_idx = 0
self.day_idx = 1
self.weekday_idx = 2
self.hour_idx = 3
self.minute_idx = 4
Embed = FixedEmbedding if embed_type=='fixed' else nn.Embedding
if freq=='t':
self.minute_embed = Embed(minute_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, x):
x = x.long()
minute_x = self.minute_embed(x[:,:,self.minute_idx]) if hasattr(self, 'minute_embed') else 0.
hour_x = self.hour_embed(x[:,:,self.hour_idx])
weekday_x = self.weekday_embed(x[:,:,self.weekday_idx])
day_x = self.day_embed(x[:,:,self.day_idx])
month_x = self.month_embed(x[:,:,self.month_idx])
return hour_x + weekday_x + day_x + month_x + minute_x
class TimeFeatureEmbedding(nn.Module):
'''
Use nn.Linear to do embedding.
freq refer to utils.timefeatues.py
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
'''
def __init__(self, d_model, embed_type='timeF', freq='h'):
super(TimeFeatureEmbedding, self).__init__()
freq_map = {'h':4, 't':5, 's':6, 'm':1, 'a':1, 'w':2, 'd':3, 'b':3} # refer to utils.timefeatues.py
d_inp = freq_map[freq]
self.embed = nn.Linear(d_inp, d_model)
def forward(self, x):
return self.embed(x)
class DataEmbedding(nn.Module):
def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):
super(DataEmbedding, self).__init__()
self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)
self.position_embedding = PositionalEmbedding(d_model=d_model)
self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, freq=freq) if embed_type!='timeF' else TimeFeatureEmbedding(d_model=d_model, embed_type=embed_type, freq=freq)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, x_mark):
x = self.value_embedding(x) + self.position_embedding(x) + self.temporal_embedding(x_mark)
return self.dropout(x) | [
"torch.nn.Linear",
"torch.cos",
"torch.nn.Dropout",
"torch.__version__.split",
"torch.zeros",
"torch.sin",
"torch.nn.Conv1d",
"torch.arange",
"torch.nn.init.kaiming_normal_",
"torch.nn.Parameter",
"torch.nn.Embedding"
] | 1.8.0 | macul99/Informer2020 | 7e1e3979ea912879e16194e3bf93062458f2cb9e |
1.0 | import torch
from terra.io import reader, writer
@writer(torch.Tensor)
def write_tensor(out, path):
torch.save(out, path)
return path
@reader(torch.Tensor)
def read_tensor(path):
return torch.load(path)
class TerraModule:
def __terra_write__(self, path):
torch.save({"state_dict": self.state_dict(), "config": self.config}, path)
@classmethod
def __terra_read__(cls, path):
# TODO: make it possible to specify a gpu
dct = torch.load(path, map_location="cpu")
model = cls(dct["config"])
model.load_state_dict(dct["state_dict"])
return model
| [
"torch.save",
"torch.load"
] | 1.0.0 | seyuboglu/terra | 7d5f8d8cdfbf819b52fb997b5b9746746d86b295 |
1.9 | """Evaluate training/validation set using models in checkpoints"""
import logging
import torch
from mlbench_core.aggregation.pytorch.centralized import AllReduceAggregation
from mlbench_core.controlflow.pytorch.helpers import iterate_dataloader
from mlbench_core.utils.pytorch.distributed import global_average
logger = logging.getLogger("mlbench")
class CheckpointsEvaluationControlFlow(object):
"""Evaluate models on training / validation dataset.
Args:
ckpt_dir (str): Path to checkpoints.
rank (int): The rank of the current process
world_size (int): The total number of workers
checkpointer (:obj:`Checkpointer`): Used to load checkpoints.
model (:obj:`torch.optim.Optimizer`): An optimizer for the given model.
epochs (int): Number of epochs to traing.
loss_function (:obj:`torch.nn.modules.loss._Loss`): loss function.
metrics (:obj:`list` of :obj:`mlbench_core.evaluation.pytorch.*`): metrics like TopKAccuracy.
use_cuda (bool): Whether to train on GPU or not. Default: `False`
dtype (str): The datatype to use for the dataloader data
max_batch_per_epoch (int): Maximum number of batches per epoch. Whole dataset
is used if not specified. Default: `None`
"""
def __init__(
self,
ckpt_dir,
rank,
world_size,
checkpointer,
model,
epochs,
loss_function,
metrics,
use_cuda=False,
dtype=None,
max_batch_per_epoch=None,
):
self.ckpt_dir = ckpt_dir
self.rank = rank
self.checkpointer = checkpointer
self.model = model
self.epochs = epochs
self.loss_function = loss_function
self.metrics = metrics
self.dtype = dtype
self.max_batch_per_epoch = max_batch_per_epoch
self.use_cuda = use_cuda
self.model_agg_fn = AllReduceAggregation(world_size=world_size).agg_model()
self._check_checkpoints()
def _check_checkpoints(self):
for epoch in range(self.epochs):
self.checkpointer.checkpoint_exists(self.ckpt_dir, self.rank, epoch)
def _load_model(self, epoch):
# Load epoch-rank model
model = self.checkpointer.load_model_by_epoch(
self.ckpt_dir, self.rank, epoch, self.model
)
# aggregate models
self.model_agg_fn(model, op="avg_world")
return model
def evaluate_by_epochs(self, dataloader):
"""Evaluate dataset using the averaged models.
In each epoch each process loads models and averages them. The averaged model is
used to evaluate train / validation dataset.
Args:
dataloader (:obj:`torch.utils.data.DataLoader`): The dataset to be evaluated.
Returns:
list: list of stats of models in each epoch.
"""
stats_list = []
for epoch in range(self.epochs):
# Same model for all workers.
model = self._load_model(epoch)
model.eval()
stats = {"epoch": epoch, "count": 0, "total_loss": 0}
for metric in self.metrics:
stats["total_" + metric.name] = 0
data_iter = iterate_dataloader(
dataloader, self.dtype, self.max_batch_per_epoch, self.use_cuda
)
with torch.no_grad():
for i, (data, target) in enumerate(data_iter):
output = model(data)
# Compute loss and metrics.
count = len(target)
stats["count"] += count
stats["total_loss"] += self.loss_function(output, target) * count
for metric in self.metrics:
stats["total_" + metric.name] += metric(output, target) * count
logger.info(
"E{:4}B{:4}: total loss={:10.3e}".format(
epoch, i, stats["total_loss"] / stats["count"]
)
)
# Keep globally averaged loss / metrics, etc.
stats["loss"] = global_average(stats["total_loss"], stats["count"]).item()
for metric in self.metrics:
stats[metric.name] = global_average(
stats["total_" + metric.name], stats["count"]
).item()
del stats["total_" + metric.name]
del stats["count"], stats["total_loss"]
stats_list.append(stats)
return stats_list
| [
"torch.no_grad"
] | 1.9.0 | mlbench/mlbench-core | 4fd3c7e6f1a5be69e52383ab2eb64cad257218c2 |
1.9 | import torch
@torch.jit.script
def orthogonalize(matrix, eps=torch.FloatTensor([1e-16])):
"""Function used to orthogonalize a matrix.
Args:
matrix (torch.Tensor): Matrix to orthogonalize
eps (torch.FloatTensor): Used to avoid division by zero (default: 1e-16)
"""
n, m = matrix.shape
for i in range(m):
# Normalize the i'th column
col = matrix[:, i : i + 1]
col /= torch.sqrt(torch.sum(col ** 2)) + eps
# Project it on the rest and remove it
if i + 1 < m:
rest = matrix[:, i + 1 :]
# rest -= torch.matmul(col.t(), rest) * col
rest -= torch.sum(col * rest, dim=0) * col
def pack_tensors(tensors, use_cuda=False):
"""
Packs a list of tensors into one 1-dimensional tensor.
Args:
tensors (list[torch.Tensor]): The tensors to pack
use_cuda (bool): Whether the resulting tensor should be on cuda
Returns:
(torch.Tensor, list[int], list[(int, int)]):
The flattened tensors, the list start indices of each packed tensor,
and the original shape of each tensor.
Those values are used to then unpack the tensor
"""
indices = [0]
for tensor in tensors:
new_end = indices[-1] + tensor.nelement()
indices.append(new_end)
tensor_sizes = [t.size() for t in tensors]
vec = torch.empty(
indices[-1],
device=tensors[0].device if tensors[0].is_cuda and use_cuda else "cpu",
dtype=tensors[0].dtype,
)
for tensor, start_idx, end_idx in zip(tensors, indices[:-1], indices[1:]):
vec[start_idx:end_idx] = tensor.data.view(-1)
return vec, indices, tensor_sizes
def unpack_tensors(aggregated, indices, sizes):
"""
Unpacks a 1-dimensional tensor into a list of tensors
Args:
aggregated (torch.Tensor): The 1-dimensional tensor
indices (List[Int]): The start index of each tensor
sizes (List[(Int, Int)]): The size of each resulting tensor
Returns:
List[torch.Tensor]: The unpacked tensors
"""
start_index = indices[:-1]
end_index = indices[1:]
tensors = []
for i, (start, end) in enumerate(zip(start_index, end_index)):
tensors.append(aggregated[start:end].view(sizes[i]))
return tensors
| [
"torch.FloatTensor",
"torch.empty",
"torch.sum"
] | 1.9.0 | mlbench/mlbench-core | 4fd3c7e6f1a5be69e52383ab2eb64cad257218c2 |
1.6 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertPreTrainedModel, BertModel
class BiEncoder(BertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = kwargs['bert']
def forward(self, context_input_ids=None, context_input_masks=None,
responses_input_ids=None, responses_input_masks=None, labels=None, mod='train'):
## only select the first response (whose lbl==1)
if labels is not None or mod=='get_base': #
responses_input_ids = responses_input_ids[:, 0, :].unsqueeze(1)
responses_input_masks = responses_input_masks[:, 0, :].unsqueeze(1)
#context
if context_input_ids is not None:
context_vec = self.bert(context_input_ids, context_input_masks)[0][:,0,:] # [bs,dim]
if mod == 'inference':
return context_vec
#candidats
batch_size, res_cnt, seq_length = responses_input_ids.shape
responses_input_ids = responses_input_ids.view(-1, seq_length)
responses_input_masks = responses_input_masks.view(-1, seq_length)
if mod == 'inference2':
cand_emb = responses_input_ids
else:
responses_input_ids = responses_input_ids.view(-1, seq_length)
responses_input_masks = responses_input_masks.view(-1, seq_length)
cand_emb = self.bert(responses_input_ids, responses_input_masks)[0][:,0,:] # [bs, dim]
if mod == 'get_base':
return cand_emb
responses_vec = cand_emb.view(batch_size, res_cnt, -1) # [bs, res_cnt, dim]
if labels is not None:
responses_vec = responses_vec.squeeze(1)
#logits = torch.matmul(context_vec, pt_candidates.t()) # [bs, bs]
#labels = torch.arange(batch_size, dtype=torch.long).to(logits.device)
#loss = nn.CrossEntropyLoss()(logits, labels)
#responses_vec = responses_vec.squeeze(1)
#dot_product = torch.matmul(context_vec, responses_vec.t()) # [bs, bs]
#mask = torch.eye(context_input_ids.size(0)).to(context_input_ids.device)
#loss = F.log_softmax(dot_product, dim=-1)
#loss = (-loss.sum(dim=1)).mean()
logits = torch.cdist(context_vec, responses_vec)
#logits = torch.cosine_similarity(context_vec, pt_candidates)
mask = torch.eye(logits.size(0)).to(context_input_ids.device)
loss = 1/(logits * torch.abs(mask-1)).sum(dim=-1) + (logits*mask).sum(dim=-1)
loss = loss.mean()
return loss
else:
context_vec = context_vec.unsqueeze(1)#.expand(responses_vec.size())
dot_product = torch.cdist(context_vec, responses_vec).squeeze()
#print(dot_product.size())
#print('context_vec', context_vec.size(), 'responses_vec', responses_vec.permute(0, 2, 1).size())
#dot_product = torch.matmul(context_vec, responses_vec.permute(0, 2, 1)).squeeze()
#print(dot_product.size())
return dot_product
class CrossEncoder(BertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = kwargs['bert']
self.linear = nn.Linear(config.hidden_size, 1)
def forward(self, text_input_ids, text_input_masks, text_input_segments, labels=None):
batch_size, neg, dim = text_input_ids.shape
text_input_ids = text_input_ids.reshape(-1, dim)
text_input_masks = text_input_masks.reshape(-1, dim)
text_input_segments = text_input_segments.reshape(-1, dim)
text_vec = self.bert(text_input_ids, text_input_masks, text_input_segments)[0][:,0,:] # [bs,dim]
score = self.linear(text_vec)
score = score.view(-1, neg)
if mod == 'inference':
context_vec = context_vec.unsqueeze(1)
return context_vec
else:
if labels is not None:
loss = -F.log_softmax(score, -1)[:,0].mean()
return loss
else:
return score
class PolyEncoder(BertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = kwargs['bert']
self.poly_m = kwargs['poly_m']
self.poly_code_embeddings = nn.Embedding(self.poly_m, config.hidden_size)
# https://github.com/facebookresearch/ParlAI/blob/master/parlai/agents/transformer/polyencoder.py#L355
torch.nn.init.normal_(self.poly_code_embeddings.weight, config.hidden_size ** -0.5)
def dot_attention(self, q, k, v):
# q: [bs, poly_m, dim] or [bs, res_cnt, dim]
# k=v: [bs, length, dim] or [bs, poly_m, dim]
attn_weights = torch.matmul(q, k.transpose(2, 1)) # [bs, poly_m, length]
attn_weights = F.softmax(attn_weights, -1)
output = torch.matmul(attn_weights, v) # [bs, poly_m, dim]
return output
def forward(self, context_input_ids=None, context_input_masks=None,
responses_input_ids=None, responses_input_masks=None, labels=None, mod='train'):
# during training, only select the first response
# we are using other instances in a batch as negative examples
if labels is not None or mod == 'get_base' or mod == 'inference2':
responses_input_ids = responses_input_ids[:, 0, :].unsqueeze(1)
responses_input_masks = responses_input_masks[:, 0, :].unsqueeze(1)
batch_size, res_cnt, seq_length = responses_input_ids.shape # res_cnt is 1 during training
# context encoder
if context_input_ids is not None:
if mod == 'inference2': #подаем батч с предложением пользователя и расширяем до размера батча-числа кандидатов
context_input_ids = context_input_ids.expand(batch_size, context_input_ids.shape[-1])
context_input_masks = context_input_masks.expand(batch_size, context_input_ids.shape[-1])
ctx_out = self.bert(context_input_ids, context_input_masks)[0] # [bs, length, dim]
poly_code_ids = torch.arange(self.poly_m, dtype=torch.long).to(context_input_ids.device)
poly_code_ids = poly_code_ids.unsqueeze(0).expand(batch_size, self.poly_m)
poly_codes = self.poly_code_embeddings(poly_code_ids) # [bs, poly_m, dim]
embs = self.dot_attention(poly_codes, ctx_out, ctx_out) # [bs, poly_m, dim]
# response encoder
if mod == 'inference':
cand_emb = responses_input_ids
else:
responses_input_ids = responses_input_ids.view(-1, seq_length)
responses_input_masks = responses_input_masks.view(-1, seq_length)
cand_emb = self.bert(responses_input_ids, responses_input_masks)[0][:,0,:] # [bs, dim]
if mod == 'get_base':
return cand_emb
cand_emb = cand_emb.view(batch_size, res_cnt, -1) # [bs, res_cnt, dim]
# merge
if labels is not None or mod == 'inference2':
# we are recycling responses for faster training
# we repeat responses for batch_size times to simulate test phase
# so that every context is paired with batch_size responses
cand_emb = cand_emb.permute(1, 0, 2) # [1, bs, dim]
cand_emb = cand_emb.expand(batch_size, batch_size, cand_emb.shape[2]) # [bs, bs, dim]
ctx_emb = self.dot_attention(cand_emb, embs, embs).squeeze() # [bs, bs, dim]
dot_product = (ctx_emb*cand_emb).sum(-1) # [bs, bs]
if mod == 'inference2':
return dot_product
mask = torch.eye(batch_size).to(context_input_ids.device) # [bs, bs]
loss = F.log_softmax(dot_product, dim=-1) * mask
loss = (-loss.sum(dim=1)).mean()
return loss
else:
ctx_emb = self.dot_attention(cand_emb, embs, embs) # [bs, res_cnt, dim]
dot_product = (ctx_emb*cand_emb).sum(-1)
return dot_product
| [
"torch.nn.Linear",
"torch.cdist",
"torch.arange",
"torch.nn.functional.log_softmax",
"torch.abs",
"torch.nn.init.normal_",
"torch.eye",
"torch.nn.functional.softmax",
"torch.matmul",
"torch.nn.Embedding"
] | 1.6.0 | Anpopaicoconat/Poly-Encoder | 779a6ec19bd6477947fcf44199fa06fc6353e18a |
1.5 | import transformers
import torch
MAX_LEN = 512
TRAIN_BATCH_SIZE = 4
VALID_BATCH_SIZE = 8
NUM_CLASSES = 5
DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
EPOCHS = 2
BERT_PATH = './input/prunebert-base-uncased-6-finepruned-w-distil-squad'
MODEL_PATH = './model/pytorch_model.bin'
TRAINING_FILE = './input/processed.csv'
CLASS_NAME = ['food', 'transport', 'shopping', 'bills', 'credit']
TOKENIZER = transformers.BertTokenizer.from_pretrained(
BERT_PATH,
do_lower_case=True
)
| [
"torch.device",
"torch.cuda.is_available"
] | 1.5.0 | robmarkcole/BERT_as_serverless_service | fbc4004677ae3811b08f89d577b5a45ce0bfbbd0 |
1.7 | #!/usr/bin/env python3
#
# Copyright 2020 Xiaomi Corporation (authors: Fangjun Kuang)
#
# See ../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run this single test, use
#
# ctest --verbose -R ctc_loss_test_py
from typing import List
import unittest
import k2
import torch
def _visualize_ctc_topo():
'''See https://git.io/JtqyJ
for what the resulting ctc_topo looks like.
'''
symbols = k2.SymbolTable.from_str('''
<blk> 0
a 1
b 2
''')
aux_symbols = k2.SymbolTable.from_str('''
a 1
b 2
''')
ctc_topo = k2.ctc_topo(2)
ctc_topo.labels_sym = symbols
ctc_topo.aux_labels_sym = aux_symbols
ctc_topo.draw('ctc_topo.pdf')
# Test cases are modified from
# https://github.com/baidu-research/warp-ctc/blob/master/torch_binding/TUTORIAL.md
#
#
# The CTC losses computed by warp-ctc, PyTorch, and k2 are identical.
#
# The gradients with respect to network outputs are also identical
# for PyTorch and k2.
class TestCtcLoss(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.devices = [torch.device('cpu')]
if torch.cuda.is_available() and k2.with_cuda:
cls.devices.append(torch.device('cuda', 0))
if torch.cuda.device_count() > 1:
torch.cuda.set_device(1)
cls.devices.append(torch.device('cuda', 1))
def test_case1(self):
for device in self.devices:
# suppose we have four symbols: <blk>, a, b, c, d
torch_activation = torch.tensor([0.2, 0.2, 0.2, 0.2,
0.2]).to(device)
k2_activation = torch_activation.detach().clone()
# (T, N, C)
torch_activation = torch_activation.reshape(
1, 1, -1).requires_grad_(True)
# (N, T, C)
k2_activation = k2_activation.reshape(1, 1,
-1).requires_grad_(True)
torch_log_probs = torch.nn.functional.log_softmax(
torch_activation, dim=-1) # (T, N, C)
# we have only one sequence and its label is `a`
targets = torch.tensor([1]).to(device)
input_lengths = torch.tensor([1]).to(device)
target_lengths = torch.tensor([1]).to(device)
torch_loss = torch.nn.functional.ctc_loss(
log_probs=torch_log_probs,
targets=targets,
input_lengths=input_lengths,
target_lengths=target_lengths,
reduction='mean')
assert torch.allclose(torch_loss,
torch.tensor([1.6094379425049]).to(device))
# (N, T, C)
k2_log_probs = torch.nn.functional.log_softmax(k2_activation,
dim=-1)
supervision_segments = torch.tensor([[0, 0, 1]], dtype=torch.int32)
dense_fsa_vec = k2.DenseFsaVec(k2_log_probs,
supervision_segments).to(device)
ctc_topo = k2.ctc_topo(4)
linear_fsa = k2.linear_fsa([1])
decoding_graph = k2.compose(ctc_topo, linear_fsa).to(device)
k2_loss = k2.ctc_loss(decoding_graph,
dense_fsa_vec,
reduction='mean',
target_lengths=target_lengths)
assert torch.allclose(torch_loss, k2_loss)
torch_loss.backward()
k2_loss.backward()
assert torch.allclose(torch_activation.grad, k2_activation.grad)
def test_case2(self):
for device in self.devices:
# (T, N, C)
torch_activation = torch.arange(1, 16).reshape(1, 3, 5).permute(
1, 0, 2).to(device)
torch_activation = torch_activation.to(torch.float32)
torch_activation.requires_grad_(True)
k2_activation = torch_activation.detach().clone().requires_grad_(
True)
torch_log_probs = torch.nn.functional.log_softmax(
torch_activation, dim=-1) # (T, N, C)
# we have only one sequence and its labels are `c,c`
targets = torch.tensor([3, 3]).to(device)
input_lengths = torch.tensor([3]).to(device)
target_lengths = torch.tensor([2]).to(device)
torch_loss = torch.nn.functional.ctc_loss(
log_probs=torch_log_probs,
targets=targets,
input_lengths=input_lengths,
target_lengths=target_lengths,
reduction='mean')
act = k2_activation.permute(1, 0, 2) # (T, N, C) -> (N, T, C)
k2_log_probs = torch.nn.functional.log_softmax(act, dim=-1)
supervision_segments = torch.tensor([[0, 0, 3]], dtype=torch.int32)
dense_fsa_vec = k2.DenseFsaVec(k2_log_probs,
supervision_segments).to(device)
ctc_topo = k2.ctc_topo(4)
linear_fsa = k2.linear_fsa([3, 3])
decoding_graph = k2.compose(ctc_topo, linear_fsa).to(device)
k2_loss = k2.ctc_loss(decoding_graph,
dense_fsa_vec,
reduction='mean',
target_lengths=target_lengths)
expected_loss = torch.tensor([7.355742931366],
device=device) / target_lengths
assert torch.allclose(torch_loss, k2_loss)
assert torch.allclose(torch_loss, expected_loss)
torch_loss.backward()
k2_loss.backward()
assert torch.allclose(torch_activation.grad, k2_activation.grad)
def test_case3(self):
for device in self.devices:
# (T, N, C)
torch_activation = torch.tensor([[
[-5, -4, -3, -2, -1],
[-10, -9, -8, -7, -6],
[-15, -14, -13, -12, -11.],
]]).permute(1, 0, 2).to(device).requires_grad_(True)
torch_activation = torch_activation.to(torch.float32)
torch_activation.requires_grad_(True)
k2_activation = torch_activation.detach().clone().requires_grad_(
True)
torch_log_probs = torch.nn.functional.log_softmax(
torch_activation, dim=-1) # (T, N, C)
# we have only one sequence and its labels are `b,c`
targets = torch.tensor([2, 3]).to(device)
input_lengths = torch.tensor([3]).to(device)
target_lengths = torch.tensor([2]).to(device)
torch_loss = torch.nn.functional.ctc_loss(
log_probs=torch_log_probs,
targets=targets,
input_lengths=input_lengths,
target_lengths=target_lengths,
reduction='mean')
act = k2_activation.permute(1, 0, 2) # (T, N, C) -> (N, T, C)
k2_log_probs = torch.nn.functional.log_softmax(act, dim=-1)
supervision_segments = torch.tensor([[0, 0, 3]], dtype=torch.int32)
dense_fsa_vec = k2.DenseFsaVec(k2_log_probs,
supervision_segments).to(device)
ctc_topo = k2.ctc_topo(4)
linear_fsa = k2.linear_fsa([2, 3])
decoding_graph = k2.compose(ctc_topo, linear_fsa).to(device)
k2_loss = k2.ctc_loss(decoding_graph,
dense_fsa_vec,
reduction='mean',
target_lengths=target_lengths)
expected_loss = torch.tensor([4.938850402832],
device=device) / target_lengths
assert torch.allclose(torch_loss, k2_loss)
assert torch.allclose(torch_loss, expected_loss)
torch_loss.backward()
k2_loss.backward()
assert torch.allclose(torch_activation.grad, k2_activation.grad)
def test_case4(self):
for device in self.devices:
# put case3, case2 and case1 into a batch
torch_activation_1 = torch.tensor(
[[0., 0., 0., 0., 0.]]).to(device).requires_grad_(True)
torch_activation_2 = torch.arange(1, 16).reshape(3, 5).to(
torch.float32).to(device).requires_grad_(True)
torch_activation_3 = torch.tensor([
[-5, -4, -3, -2, -1],
[-10, -9, -8, -7, -6],
[-15, -14, -13, -12, -11.],
]).to(device).requires_grad_(True)
k2_activation_1 = torch_activation_1.detach().clone(
).requires_grad_(True)
k2_activation_2 = torch_activation_2.detach().clone(
).requires_grad_(True)
k2_activation_3 = torch_activation_3.detach().clone(
).requires_grad_(True)
# [T, N, C]
torch_activations = torch.nn.utils.rnn.pad_sequence(
[torch_activation_3, torch_activation_2, torch_activation_1],
batch_first=False,
padding_value=0)
# [N, T, C]
k2_activations = torch.nn.utils.rnn.pad_sequence(
[k2_activation_3, k2_activation_2, k2_activation_1],
batch_first=True,
padding_value=0)
# [[b,c], [c,c], [a]]
targets = torch.tensor([2, 3, 3, 3, 1]).to(device)
input_lengths = torch.tensor([3, 3, 1]).to(device)
target_lengths = torch.tensor([2, 2, 1]).to(device)
torch_log_probs = torch.nn.functional.log_softmax(
torch_activations, dim=-1) # (T, N, C)
torch_loss = torch.nn.functional.ctc_loss(
log_probs=torch_log_probs,
targets=targets,
input_lengths=input_lengths,
target_lengths=target_lengths,
reduction='sum')
expected_loss = torch.tensor(
[4.938850402832, 7.355742931366, 1.6094379425049]).sum()
assert torch.allclose(torch_loss, expected_loss.to(device))
k2_log_probs = torch.nn.functional.log_softmax(k2_activations,
dim=-1)
supervision_segments = torch.tensor(
[[0, 0, 3], [1, 0, 3], [2, 0, 1]], dtype=torch.int32)
dense_fsa_vec = k2.DenseFsaVec(k2_log_probs,
supervision_segments).to(device)
ctc_topo = k2.ctc_topo(4)
# [ [b, c], [c, c], [a]]
linear_fsa = k2.linear_fsa([[2, 3], [3, 3], [1]])
decoding_graph = k2.compose(ctc_topo, linear_fsa).to(device)
k2_loss = k2.ctc_loss(decoding_graph,
dense_fsa_vec,
reduction='sum',
target_lengths=target_lengths)
assert torch.allclose(torch_loss, k2_loss)
scale = torch.tensor([1., -2, 3.5]).to(device)
(torch_loss * scale).sum().backward()
(k2_loss * scale).sum().backward()
assert torch.allclose(torch_activation_1.grad,
k2_activation_1.grad)
assert torch.allclose(torch_activation_2.grad,
k2_activation_2.grad)
assert torch.allclose(torch_activation_3.grad,
k2_activation_3.grad)
def test_random_case1(self):
# 1 sequence
for device in self.devices:
T = torch.randint(10, 100, (1,)).item()
C = torch.randint(20, 30, (1,)).item()
torch_activation = torch.rand((1, T + 10, C),
dtype=torch.float32,
device=device).requires_grad_(True)
k2_activation = torch_activation.detach().clone().requires_grad_(
True)
# [N, T, C] -> [T, N, C]
torch_log_probs = torch.nn.functional.log_softmax(
torch_activation.permute(1, 0, 2), dim=-1)
input_lengths = torch.tensor([T]).to(device)
target_lengths = torch.randint(1, T, (1,)).to(device)
targets = torch.randint(1, C - 1,
(target_lengths.item(),)).to(device)
torch_loss = torch.nn.functional.ctc_loss(
log_probs=torch_log_probs,
targets=targets,
input_lengths=input_lengths,
target_lengths=target_lengths,
reduction='mean')
k2_log_probs = torch.nn.functional.log_softmax(k2_activation,
dim=-1)
supervision_segments = torch.tensor([[0, 0, T]], dtype=torch.int32)
dense_fsa_vec = k2.DenseFsaVec(k2_log_probs,
supervision_segments).to(device)
ctc_topo = k2.ctc_topo(C - 1)
linear_fsa = k2.linear_fsa([targets.tolist()])
decoding_graph = k2.compose(ctc_topo, linear_fsa).to(device)
k2_loss = k2.ctc_loss(decoding_graph,
dense_fsa_vec,
reduction='mean',
target_lengths=target_lengths)
assert torch.allclose(torch_loss, k2_loss)
scale = torch.rand_like(torch_loss) * 100
(torch_loss * scale).sum().backward()
(k2_loss * scale).sum().backward()
assert torch.allclose(torch_activation.grad,
k2_activation.grad,
atol=1e-2)
def test_random_case2(self):
# 2 sequences
for device in self.devices:
T1 = torch.randint(10, 200, (1,)).item()
T2 = torch.randint(9, 100, (1,)).item()
C = torch.randint(20, 30, (1,)).item()
if T1 < T2:
T1, T2 = T2, T1
torch_activation_1 = torch.rand((T1, C),
dtype=torch.float32,
device=device).requires_grad_(True)
torch_activation_2 = torch.rand((T2, C),
dtype=torch.float32,
device=device).requires_grad_(True)
k2_activation_1 = torch_activation_1.detach().clone(
).requires_grad_(True)
k2_activation_2 = torch_activation_2.detach().clone(
).requires_grad_(True)
# [T, N, C]
torch_activations = torch.nn.utils.rnn.pad_sequence(
[torch_activation_1, torch_activation_2],
batch_first=False,
padding_value=0)
# [N, T, C]
k2_activations = torch.nn.utils.rnn.pad_sequence(
[k2_activation_1, k2_activation_2],
batch_first=True,
padding_value=0)
target_length1 = torch.randint(1, T1, (1,)).item()
target_length2 = torch.randint(1, T2, (1,)).item()
target_lengths = torch.tensor([target_length1,
target_length2]).to(device)
targets = torch.randint(1, C - 1,
(target_lengths.sum(),)).to(device)
# [T, N, C]
torch_log_probs = torch.nn.functional.log_softmax(
torch_activations, dim=-1)
input_lengths = torch.tensor([T1, T2]).to(device)
torch_loss = torch.nn.functional.ctc_loss(
log_probs=torch_log_probs,
targets=targets,
input_lengths=input_lengths,
target_lengths=target_lengths,
reduction='mean')
assert T1 >= T2
supervision_segments = torch.tensor([[0, 0, T1], [1, 0, T2]],
dtype=torch.int32)
k2_log_probs = torch.nn.functional.log_softmax(k2_activations,
dim=-1)
dense_fsa_vec = k2.DenseFsaVec(k2_log_probs,
supervision_segments).to(device)
ctc_topo = k2.ctc_topo(C - 1)
linear_fsa = k2.linear_fsa([
targets[:target_length1].tolist(),
targets[target_length1:].tolist()
])
decoding_graph = k2.compose(ctc_topo, linear_fsa).to(device)
k2_loss = k2.ctc_loss(decoding_graph,
dense_fsa_vec,
reduction='mean',
target_lengths=target_lengths)
assert torch.allclose(torch_loss, k2_loss)
scale = torch.rand_like(torch_loss) * 100
(torch_loss * scale).sum().backward()
(k2_loss * scale).sum().backward()
assert torch.allclose(torch_activation_1.grad,
k2_activation_1.grad,
atol=1e-2)
assert torch.allclose(torch_activation_2.grad,
k2_activation_2.grad,
atol=1e-2)
if __name__ == '__main__':
torch.manual_seed(20210109)
unittest.main()
| [
"torch.device",
"torch.rand",
"torch.rand_like",
"torch.arange",
"torch.nn.utils.rnn.pad_sequence",
"torch.nn.functional.log_softmax",
"torch.manual_seed",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.tensor",
"torch.randint",
"torch.allclose",
"torch.nn.functional.ctc_loss"
] | 1.7.1 | EmreOzkose/k2 | 818b138b33eabe440601df8910a2b97ac088594b |
1.8 | import xitorch
import torch
import pytest
from xitorch._core.pure_function import get_pure_function, PureFunction
def func1(x, a, b):
return x * a + b
@torch.jit.script
def jitfunc1(x, a, b):
return x * a + b
class TorchModule(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, x):
return x * self.a + self.b
class MyModule(xitorch.EditableModule):
def __init__(self, a, b):
self.a = a
self.b = b
def forward(self, x):
return x * self.a + self.b
def forward2(self, x):
return x + self.a
def getparamnames(self, methodname, prefix=""):
if methodname == "forward":
return [prefix + "a", prefix + "b"]
elif methodname == "forward2":
return [prefix + "a"]
else:
raise KeyError()
@pytest.mark.parametrize("fcn", [func1, jitfunc1])
def test_pure_function(fcn):
pfunc1 = get_pure_function(fcn)
assert isinstance(pfunc1, PureFunction)
assert len(pfunc1.objparams()) == 0
a = torch.tensor(2.0)
b = torch.tensor(1.0)
x = torch.tensor(1.5)
res = x * a + b
expr = lambda x, a, b: x * a + b
runtest_pfunc(pfunc1, (x, a, b), expr)
@pytest.mark.parametrize("clss", [TorchModule, MyModule])
def test_module_pfunc(clss):
a = torch.nn.Parameter(torch.tensor(2.0))
b = torch.nn.Parameter(torch.tensor(1.0))
x = torch.tensor(1.5)
module = clss(a, b)
pfunc = get_pure_function(module.forward)
expr = lambda x, a, b: x * a + b
runtest_pfunc(pfunc, (x,), expr)
@pytest.mark.parametrize("fcn", [func1, jitfunc1])
def test_make_sibling_fcn(fcn):
a = torch.tensor(2.0)
b = torch.tensor(1.0)
x = torch.tensor(1.5)
@xitorch.make_sibling(fcn)
def fminusx(x, a, b):
return fcn(x, a, b) - x
assert isinstance(fminusx, PureFunction)
assert len(fminusx.objparams()) == 0
expr = lambda x, a, b: x * a + b - x
runtest_pfunc(fminusx, (x, a, b), expr)
@pytest.mark.parametrize("clss", [TorchModule, MyModule])
def test_make_sibling_method(clss):
a = torch.nn.Parameter(torch.tensor(2.0))
b = torch.nn.Parameter(torch.tensor(1.0))
x = torch.tensor(1.5)
module = clss(a, b)
@xitorch.make_sibling(module.forward)
def fminusx(x):
return module.forward(x) - x
assert isinstance(fminusx, PureFunction)
assert len(fminusx.objparams()) == 2
expr = lambda x, a, b: x * a + b - x
runtest_pfunc(fminusx, (x,), expr)
def test_make_sibling_multiple():
a = torch.nn.Parameter(torch.tensor(2.0))
b = torch.nn.Parameter(torch.tensor(1.0))
x = torch.tensor(1.5)
module1 = TorchModule(a, b)
module2 = MyModule(a, b * 2)
@xitorch.make_sibling(module1.forward, module2.forward)
def newfcn(x):
return module1.forward(x) + module2.forward(x) * 2
assert isinstance(newfcn, PureFunction)
assert len(newfcn.objparams()) == 3 # not 4, because a is identical
expr = lambda x, a, b, b2: (x * a + b) + (x * a + b2) * 2
runtest_pfunc(newfcn, (x,), expr)
def test_make_sibling_multiple_nonunique_objs():
a = torch.nn.Parameter(torch.tensor(2.0))
b = torch.nn.Parameter(torch.tensor(1.0))
x = torch.tensor(1.5)
module = MyModule(a, b)
@xitorch.make_sibling(module.forward, module.forward2)
def newfcn(x):
return module.forward(x) + module.forward2(x) * 2
assert isinstance(newfcn, PureFunction)
assert len(newfcn.objparams()) == 2
expr = lambda x, a, b: (x * a + b) + (x + a) * 2
runtest_pfunc(newfcn, (x,), expr)
def runtest_pfunc(pfunc, params, expr):
objparams = pfunc.objparams()
# test original values
res0 = pfunc(*params)
res0_true = expr(*params, *objparams)
assert torch.allclose(res0, res0_true)
# test changing obj params
objparams1 = [p + 1.0 for p in objparams]
res1_true = expr(*params, *objparams1)
with pfunc.useobjparams(objparams1):
res1 = pfunc(*params)
assert torch.allclose(res1, res1_true)
# test recovery
res2 = pfunc(*params)
res2_true = res0_true
assert torch.allclose(res2, res2_true)
# test nested
objparams3 = [p * 2.0 for p in objparams]
res3_true = expr(*params, *objparams3)
with pfunc.useobjparams(objparams1):
assert torch.allclose(pfunc(*params), res1_true)
with pfunc.useobjparams(objparams3):
assert torch.allclose(pfunc(*params), res3_true)
assert torch.allclose(pfunc(*params), res1_true)
assert torch.allclose(pfunc(*params), res0_true)
| [
"torch.allclose",
"torch.tensor"
] | 1.8 | Jaikinator/xitorch | 053db8d27a7777baa7f572c2d37004e788ff4cb8 |
1.8 | import torch
import numpy as np
################### metropolis hastings ###################
def mh(logpfcn, x0, pparams, nsamples=10000, nburnout=5000, step_size=1.0, **unused):
"""
Perform Metropolis-Hasting steps to collect samples
Keyword arguments
-----------------
nsamples: int
The number of samples to be collected
nburnout: int
The number of initial steps to be performed before collecting samples
step_size: float
The size of the steps to be taken
"""
x, dtype, device = _mh_sample(logpfcn, x0, pparams, nburnout, step_size, False)
samples = _mh_sample(logpfcn, x, pparams, nsamples, step_size, True)
weights = torch.zeros((samples.shape[0],), dtype=dtype, device=device) + (1. / samples.shape[0])
return samples, weights
def _mh_sample(logpfcn, x0, pparams, nsamples, step_size, collect_samples):
x = x0
logpx = logpfcn(x0, *pparams)
dtype = logpx.dtype
device = logpx.device
log_rand = torch.log(torch.rand((nsamples,), dtype=dtype, device=device))
if collect_samples:
samples = torch.empty((nsamples, *x0.shape), dtype=x.dtype, device=x.device)
for i in range(nsamples):
xnext = x + step_size * torch.randn_like(x)
logpnext = logpfcn(xnext, *pparams)
logpratio = logpnext - logpx
# decide if we should accept the next point
if logpratio > 0:
accept = True
else:
accept = log_rand[i] < logpratio
# if accept, move the x into the new points
if accept:
logpx = logpnext
x = xnext
if collect_samples:
samples[i] = x
# return the samples if collect_samples, otherwise just return the last x
if collect_samples:
return samples
else:
return x, dtype, device
def mhcustom(logpfcn, x0, pparams, nsamples=10000, nburnout=5000, custom_step=None, **unused):
"""
Perform Metropolis sampling using custom_step
Keyword arguments
-----------------
nsamples: int
The number of samples to be collected
nburnout: int
The number of initial steps to be performed before collecting samples
custom_step: callable or None
Callable with call signature ``custom_step(x, *pparams)`` to produce the
next samples (already decided whether to accept or not).
This argument is **required**. If ``None``, it will raise an error
"""
if custom_step is None:
raise RuntimeError("custom_step must be specified for mhcustom method")
if not hasattr(custom_step, "__call__"):
raise RuntimeError("custom_step option for mhcustom must be callable")
x, dtype, device = _mhcustom_sample(logpfcn, x0, pparams, nburnout, custom_step, False)
xsamples = _mhcustom_sample(logpfcn, x0, pparams, nburnout, custom_step, True)
wsamples = torch.zeros((xsamples.shape[0],), dtype=dtype, device=device) + (1. / xsamples.shape[0])
return xsamples, wsamples
def _mhcustom_sample(logpfcn, x0, pparams, nsamples, custom_step, collect_samples):
x = x0
logpx = logpfcn(x0, *pparams)
dtype = logpx.dtype
device = logpx.device
if collect_samples:
samples = torch.empty((nsamples, *x0.shape), dtype=x.dtype, device=x.device)
samples[0] = x
for i in range(1, nsamples):
x = custom_step(x, *pparams)
if collect_samples:
samples[i] = x
if collect_samples:
return samples
else:
return x, dtype, device
################### dummy sampler just for 1D ###################
def dummy1d(logpfcn, x0, pparams, nsamples=100, lb=-np.inf, ub=np.inf, **unused):
dtype = x0.dtype
device = x0.device
# convert the bound to finite range
ub = torch.tensor(ub, dtype=dtype, device=device)
lb = torch.tensor(lb, dtype=dtype, device=device)
tu = torch.atan(ub)
tl = torch.atan(lb)
assert torch.numel(x0) == 1, "This dummy operation can only be done in 1D space"
tlg, wlg = np.polynomial.legendre.leggauss(nsamples)
tlg = torch.tensor(tlg, dtype=dtype, device=device)
wlg = torch.tensor(wlg, dtype=dtype, device=device)
wlg *= 0.5 * (tu - tl)
tsamples = tlg * (0.5 * (tu - tl)) + (0.5 * (tu + tl)) # (n, *nx)
xsamples = torch.tan(tsamples)
wt = torch.cos(tsamples)**(-2.)
wp = torch.empty_like(wt)
for i in range(nsamples):
wp[i] = torch.exp(logpfcn(xsamples[i], *pparams))
wsamples = wt * wlg * wp
wsamples = wsamples / wsamples.sum()
return xsamples, wsamples
| [
"torch.zeros",
"torch.rand",
"torch.numel",
"torch.cos",
"torch.tan",
"torch.atan",
"torch.randn_like",
"torch.tensor",
"torch.empty",
"torch.empty_like"
] | 1.8 | Jaikinator/xitorch | 053db8d27a7777baa7f572c2d37004e788ff4cb8 |
1.3 | from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from logger import TBLogger
from pathlib import Path
from torch.utils.data import DataLoader
from core.raft_v2_0 import RAFT
import core.datasets as datasets
from core.utils.flow_viz import flow_to_image
from core.utils.utils import dump_args_to_text
# exclude extremly large displacements
MAX_FLOW = 1000
SUM_FREQ = 100
CHKPT_FREQ = 5000
EVAL_FREQ = 1000
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def admm_loss(flow_preds, aux_vars, flow_gt, valid, fidelity_func = 'l1', rho = 0.0, params_dict = {}):
""" ADMM dervied Loss function defined over F,Q,C,beta of all iterations."""
n_predictions = len(flow_preds)
fidelity_loss = 0.0
reg_loss = 0.0
# extract admm auxiliary vars
q,c,betas = aux_vars
# exlude invalid pixels and extremely large diplacements
valid = (valid >= 0.5) & (flow_gt.abs().sum(dim=1) < MAX_FLOW)
for i in range(n_predictions):
i_weight = 0.8**(n_predictions - i - 1)
if fidelity_func == 'l1':
i_loss = (flow_preds[i] - flow_gt).abs()
elif fidelity_func == 'l2':
i_loss = (flow_preds[i] - flow_gt)**2
if rho > 0.0:
i_reg = 0.5 * rho * (q[i] - c[i] + betas[i])**2
else:
i_reg = 0.0
fidelity_loss += (valid[:, None] * i_weight * i_loss).mean()
reg_loss += i_reg.mean()
flow_loss = fidelity_loss + reg_loss
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
tv = total_variation(flow_preds[-1]).sum(dim=1)
epe = epe.view(-1)[valid.view(-1)]
tv = tv.view(-1)[valid.view(-1)]
metrics = {
'loss': flow_loss.item(),
'fid': fidelity_loss.item(),
'reg': reg_loss.item(),
'epe': epe.mean().item(),
'tv': tv.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
}
return flow_loss, {**metrics,**params_dict}
def triplet_sequence_loss(flow_preds, q_preds, flow_gt, valid, fidelity_func = 'l1', q_weight = 0.0):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds)
flow_loss = 0.0
# exlude invalid pixels and extremely large diplacements
valid = (valid >= 0.5) & (flow_gt.abs().sum(dim=1) < MAX_FLOW)
for i in range(n_predictions):
i_weight = 0.8**(n_predictions - i - 1)
if fidelity_func == 'l1':
i_loss = (flow_preds[i] - flow_gt).abs()
elif fidelity_func == 'l2':
i_loss = (flow_preds[i] - flow_gt)**2
if q_weight > 0.0:
i_reg = q_weight * (flow_preds[i] - q_preds[i])**2
else:
i_reg = 0.0
flow_loss += i_weight * (valid[:, None] * (i_loss + i_reg)).mean()
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
reg = torch.sum((flow_preds[-1] - q_preds[-1])**2, dim=1).sqrt()
epe = epe.view(-1)[valid.view(-1)]
reg = reg.view(-1)[valid.view(-1)]
metrics = {
'loss': flow_loss.item(),
'epe': epe.mean().item(),
'reg': reg.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
}
return flow_loss, metrics
def sequence_loss(flow_preds, flow_gt, valid, sup_loss = 'l1', tv_weight = 0.0):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds)
flow_loss = 0.0
# exlude invalid pixels and extremely large diplacements
valid = (valid >= 0.5) & (flow_gt.abs().sum(dim=1) < MAX_FLOW)
for i in range(n_predictions):
i_weight = 0.8**(n_predictions - i - 1)
if sup_loss == 'l1':
i_loss = (flow_preds[i] - flow_gt).abs()
elif sup_loss == 'l2':
i_loss = (flow_preds[i] - flow_gt)**2
if tv_weight > 0.0:
i_tv = tv_weight * total_variation(flow_preds[i])
else:
i_tv = 0.0
flow_loss += i_weight * (valid[:, None] * (i_loss + i_tv)).mean()
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[valid.view(-1)]
metrics = {
'loss': flow_loss.item(),
'epe': epe.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
}
return flow_loss, metrics
def total_variation(flow):
Dx = torch.tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype = torch.float, requires_grad = False).view(1,1,3,3).cuda()
Dy = torch.tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype = torch.float, requires_grad = False).view(1,1,3,3).cuda()
D = torch.cat((Dx, Dy), dim = 0)
u,v = torch.split(flow, 1, dim = 1)
Du = F.conv2d(u, D, padding = 1)
Dv = F.conv2d(v, D, padding = 1)
return torch.cat((Du.abs().sum(dim = 1, keepdim = True), Dv.sum(dim = 1, keepdim = True)), dim = 1)
def fetch_dataloader(args):
""" Create the data loader for the corresponding trainign set """
if args.dataset == 'chairs':
train_dataset = datasets.FlyingChairs(args, root=args.data_dir, image_size=args.curr_image_size)
elif args.dataset == 'things':
clean_dataset = datasets.SceneFlow(args, root=args.data_dir, image_size=args.curr_image_size, dstype='frames_cleanpass')
final_dataset = datasets.SceneFlow(args, root=args.data_dir, image_size=args.curr_image_size, dstype='frames_finalpass')
train_dataset = clean_dataset + final_dataset
elif args.dataset == 'sintel':
clean_dataset = datasets.MpiSintel(args, image_size=args.curr_image_size, dstype='clean')
final_dataset = datasets.MpiSintel(args, image_size=args.curr_image_size, dstype='final')
train_dataset = clean_dataset + final_dataset
elif args.dataset == 'kitti':
train_dataset = datasets.KITTI(args, image_size=args.curr_image_size, is_val=False)
gpuargs = {'num_workers': 4, 'drop_last' : True}
train_loader = DataLoader(train_dataset, batch_size=args.batch_size,
pin_memory=True, shuffle=True, **gpuargs)
if args.run_eval:
if args.eval_dataset == 'sintel':
valid_dataset = datasets.MpiSintel(args, image_size=args.curr_image_size, dstype='clean', root=args.eval_dir)
valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size,
pin_memory=True, shuffle=True, **gpuargs)
else:
valid_dataset = None
valid_loader = None
print('Training with %d image pairs' % len(train_dataset))
if args.run_eval:
print('Validating with %d image pairs' % len(valid_dataset))
return train_loader, valid_loader
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps,
pct_start=args.pct_start, cycle_momentum=False, anneal_strategy='linear', final_div_factor=1.0)
return optimizer, scheduler
class Logger:
def __init__(self, initial_step, model, scheduler, name):
self.model = model
self.scheduler = scheduler
self.name = name
self.total_steps = initial_step
self.running_loss = {}
def _print_training_status(self):
metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_lr()[0])
metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
name_str = self.name + " : "
# print the training status
print(name_str + training_str + metrics_str)
#for key in self.running_loss:
# self.running_loss[key] = 0.0
def push(self, metrics):
self.total_steps += 1
if self.total_steps % SUM_FREQ == 0:
self.running_loss = {}
for key in metrics:
if key not in self.running_loss:
self.running_loss[key] = 0.0
self.running_loss[key] += metrics[key]
if self.total_steps % SUM_FREQ == SUM_FREQ-1:
self._print_training_status()
def validate(args,model,valid_loader,tb_logger,step):
print('Evaluating...')
model.eval()
epe_list = []
with torch.no_grad():
for i_batch, data_blob in tqdm(enumerate(valid_loader)):
image1, image2, flow_gt, valid = [x.cuda() for x in data_blob]
flow_preds,_,_ = model(image1, image2, iters=args.eval_iters)
# measure epe in batch
valid = (valid >= 0.5) & (flow_gt.abs().sum(dim=1) < MAX_FLOW)
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[valid.view(-1)].mean().item()
epe_list.append(epe)
# Save and print eval results
print('Eval Summary - dataset: {} | step: {} | av. epe: {}'.format(args.eval_dataset, step, np.mean(epe_list)))
tb_logger.scalar_summary('Eval EPE', np.mean(epe_list), step)
B = args.batch_size
# Eval Images vs. Pred vs. GT
gt_list = [np.array(x) for x in np.array(flow_gt.detach().cpu()).transpose(0,2,3,1).tolist()]
pr_list = [np.array(x) for x in np.array(flow_preds[-1].detach().cpu()).transpose(0,2,3,1).tolist()]
gt_list = list(map(flow_to_image, gt_list))
pr_list = list(map(flow_to_image, pr_list))
tb_logger.image_summary('Eval - src & tgt, pred, gt',
[np.concatenate([np.concatenate([i.squeeze(0), j.squeeze(0)], axis = 1), np.concatenate([k, l], axis = 1)], axis=0)
for i,j,k,l in zip( np.split(np.array(image1.data.cpu()).astype(np.uint8).transpose(0,2,3,1), B, axis = 0),
np.split(np.array(image2.data.cpu()).astype(np.uint8).transpose(0,2,3,1), B, axis = 0),
gt_list,
pr_list)
],
step)
# Eval Error
pred_batch = [np.array(x) for x in np.array(flow_preds[-1].detach().cpu()).transpose(0,2,3,1).tolist()]
gt_batch = [np.array(x) for x in np.array(flow_gt.detach().cpu()).transpose(0,2,3,1).tolist()]
err_batch = [(np.sum(np.abs(pr - gt)**2, axis=2,keepdims=True)**0.5).astype(np.uint8) for pr,gt in zip(pred_batch, gt_batch)]
err_vis = [np.concatenate([gt, pr, np.tile(err,(1,1,3))], axis=0) for gt, pr, err in zip(gt_list, pr_list,err_batch )]
tb_logger.image_summary(f'Eval - Error', err_vis, step)
return
def train(args):
model = RAFT(args)
model = nn.DataParallel(model)
print("Parameter Count: %d" % count_parameters(model))
if args.restore_ckpt is not None:
model.load_state_dict(torch.load(args.restore_ckpt))
if args.image_size != args.curr_image_size:
model.module.admm_init.update_matrices_for_eval(shape=[sh // 8 for sh in args.curr_image_size])
model.module.admm_block.u_solver.update_matrices_for_eval(shape=[sh // 8 for sh in args.curr_image_size])
model.module.admm_block.v_solver.update_matrices_for_eval(shape=[sh // 8 for sh in args.curr_image_size])
print('Updated D matrices. Train image size is {}, Eval image size is {}'.format(args.image_size, args.curr_image_size))
model.cuda()
if 'chairs' not in args.dataset:
model.module.freeze_bn()
train_loader, valid_loader = fetch_dataloader(args)
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = args.initial_step
logger = Logger(args.initial_step, model, scheduler, args.name)
tb_logger = TBLogger(args.log_dir)
should_keep_training = True
while should_keep_training:
for i_batch, data_blob in enumerate(train_loader):
image1, image2, flow, valid = [x.cuda() for x in data_blob]
model.train()
optimizer.zero_grad()
# forward
flow_predictions, aux_vars, _ = model(image1, image2, iters=args.iters)
# keep track of specific admm params
#admm_params_dict = {'lamb': model.module.admm_block.SoftThresh.lamb.item(),
# 'eta': model.module.admm_block.UpdateMul.eta.item()}
# loss function
if args.loss_func == 'sequence':
loss, metrics = sequence_loss(flow_predictions, flow, valid, sup_loss=args.sup_loss, tv_weight = args.tv_weight)
elif args.loss_func == 'triplet':
loss, metrics = triplet_sequence_loss(flow_predictions, aux_vars, flow, valid, fidelity_func=args.sup_loss, q_weight = args.q_weight)
elif args.loss_func == 'admm':
loss, metrics = admm_loss(flow_predictions, aux_vars, flow, valid, fidelity_func=args.sup_loss, rho=args.admm_rho, params_dict=admm_params_dict)
# backward
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
scheduler.step()
total_steps += 1
logger.push(metrics)
if total_steps % SUM_FREQ == SUM_FREQ-1:
# Scalar Summaries
# ============================================================
tb_logger.scalar_summary('lr', optimizer.param_groups[0]['lr'], total_steps)
for key, value in logger.running_loss.items():
tb_logger.scalar_summary(key, value/SUM_FREQ, total_steps)
# Image Summaries
# ============================================================
if not args.run_eval:
B = args.batch_size
# Images vs. Pred vs. GT
gt_list = [np.array(x) for x in np.array(flow.detach().cpu()).transpose(0,2,3,1).tolist()]
pr_list = [np.array(x) for x in np.array(flow_predictions[-1].detach().cpu()).transpose(0,2,3,1).tolist()]
gt_list = list(map(flow_to_image, gt_list))
pr_list = list(map(flow_to_image, pr_list))
tb_logger.image_summary('src & tgt, pred, gt',
[np.concatenate([np.concatenate([i.squeeze(0), j.squeeze(0)], axis = 1), np.concatenate([k, l], axis = 1)], axis=0)
for i,j,k,l in zip( np.split(np.array(image1.data.cpu()).astype(np.uint8).transpose(0,2,3,1), B, axis = 0),
np.split(np.array(image2.data.cpu()).astype(np.uint8).transpose(0,2,3,1), B, axis = 0),
gt_list,
pr_list)
],
total_steps)
# Error
pred_batch = [np.array(x) for x in np.array(flow_predictions[-1].detach().cpu()).transpose(0,2,3,1).tolist()]
gt_batch = [np.array(x) for x in np.array(flow.detach().cpu()).transpose(0,2,3,1).tolist()]
err_batch = [(np.sum(np.abs(pr - gt)**2, axis=2,keepdims=True)**0.5).astype(np.uint8) for pr,gt in zip(pred_batch, gt_batch)]
err_vis = [np.concatenate([gt, pr, np.tile(err,(1,1,3))], axis=0) for gt, pr, err in zip(gt_list, pr_list,err_batch )]
tb_logger.image_summary(f'Error', err_vis, total_steps)
# Masks
Mx, My = aux_vars[1]
masks = [(255*np.concatenate([mx,my],axis=2)).astype(np.uint8).squeeze() for mx,my in zip(np.array(Mx.detach().cpu()).tolist(), np.array(My.detach().cpu()).tolist())]
tb_logger.image_summary(f'Masks', masks, total_steps)
if total_steps % EVAL_FREQ == EVAL_FREQ-1 and args.run_eval:
validate(args,model,valid_loader,tb_logger,total_steps)
if (total_steps % CHKPT_FREQ == CHKPT_FREQ-1 and args.save_checkpoints) is True:
PATH = args.log_dir + '/%d_%s.pth' % (total_steps+1, args.name)
torch.save(model.state_dict(), PATH)
if total_steps == args.num_steps:
should_keep_training = False
break
PATH = args.log_dir +'/%s.pth' % args.name
torch.save(model.state_dict(), PATH)
return PATH
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cuda_devices', default="0,1", help="choose which GPUs are available")
parser.add_argument('--name', default='bla', help="name your experiment")
parser.add_argument('--dataset', help="which dataset to use for training")
parser.add_argument('--data_dir', help='path to dataset')
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--save_checkpoints', action='store_true', help='save checkpoints during training')
parser.add_argument('--log_dir', default = os.path.join(os.getcwd(), 'checkpoints', datetime.now().strftime('%Y%m%d-%H%M%S')))
parser.add_argument('--run_eval', action='store_true')
parser.add_argument('--eval_dataset', default='sintel', help='which dataset to use for eval')
parser.add_argument('--eval_dir', help='path to eval dataset')
parser.add_argument('--eval_iters',type=int, default=12)
parser.add_argument('--lr', type=float, default=0.00002)
parser.add_argument('--pct_start', type=float, default=0.2)
parser.add_argument('--final_div_factor', type=float, default=1.0)
parser.add_argument('--sup_loss', help='supervised loss term', default='l1')
parser.add_argument('--loss_func', default='sequence')
parser.add_argument('--q_weight', type=float, help='total variation term weight', default=0.4)
parser.add_argument('--tv_weight', type=float, help='total variation term weight', default=0.0)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--initial_step', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--curr_image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--admm_solver', action='store_true', help='apply admm block')
parser.add_argument('--admm_iters',type=int,default=1)
parser.add_argument('--admm_mask', action='store_true', help='apply mask within admm block')
parser.add_argument('--admm_lamb', type=float, default=0.4)
parser.add_argument('--learn_lamb', action='store_true')
parser.add_argument('--admm_rho', type=float, default=0.01)
parser.add_argument('--admm_eta', type=float, default=0.01)
parser.add_argument('--learn_eta', action='store_true')
parser.add_argument('--iters', type=int, default=12)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
args = parser.parse_args()
#torch.manual_seed(1234)
#np.random.seed(1234)
# scale learning rate and batch size by number of GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_devices
num_gpus = torch.cuda.device_count()
args.batch_size = args.batch_size * num_gpus
args.lr = args.lr * num_gpus
args.num_gpus = num_gpus
if (not os.path.isdir(args.log_dir) and args.save_checkpoints) is True:
os.mkdir(args.log_dir)
print("Checkpoints will be saved to " + args.log_dir)
dump_args_to_text(args, args.log_dir)
train(args)
| [
"torch.cat",
"torch.optim.lr_scheduler.OneCycleLR",
"torch.split",
"torch.no_grad",
"torch.cuda.device_count",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.functional.conv2d",
"torch.nn.DataParallel",
"torch.sum"
] | 1.3.1 | gallif/raft | 11a35ff5ede31918a360eca2f1481bc5fec9b5e5 |
1.8 | import torch
import pandas as pd
import sys
import numpy as np
from tqdm import tqdm
from model_arch.discriminator import DiscriminatorLaw
from dfencoder.autoencoder import AutoEncoder
from dfencoder.dataframe import EncoderDataFrame
from utils.evaluate_func import evaluate_pred, evaluate_distribution, evaluate_fairness
from utils.helpers import preprocess_dataset
from utils.helpers import setup_logging
from utils.helpers import load_config
from utils.helpers import features_setting
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import GradientBoostingRegressor
import argparse
# def load_aemodel(model, path, df):
# print("Path {}".format(path))
# print("Model ", model)
# print(df)
# model.build_model(df.copy())
# model.load_state_dict(path)
# model.eval()
# return model
def get_predict(ae_model, generator, discriminator, df, normal_features, full_features, l = ''):
GD_prediction = 'GD_prediction' + l
df_generator = df[normal_features].copy()
df_autoencoder = df[full_features].copy()
Z = ae_model.get_representation(df_autoencoder)
Z = Z.cpu().detach().numpy()
reg = LinearRegression()
reg.fit(Z, df['ZFYA'].values)
y_pred = reg.predict(Z)
df["AL_prediction"] = y_pred
"""Generator + Linear regression"""
Z = generator.custom_forward(df_generator)
Z = Z.cpu().detach().numpy()
reg = LinearRegression()
reg.fit(Z, df['ZFYA'].values)
y_pred = reg.predict(Z)
df["GL_prediction"] = y_pred
"""Generator + Discriminator"""
Z = generator.custom_forward(df_generator)
predictor_agnostic = discriminator(Z)
y_pred = predictor_agnostic.cpu().detach().numpy().reshape(-1)
df[GD_prediction] = y_pred
return df
if __name__ == "__main__":
"""Parsing argument"""
# parser = argparse.ArgumentParser()
# parser.add_argument('--lambda_weight', type=str, default="0.1 0.5 1 1.5 2 2.5 3 3.5 4 4.5 5 5.5 6 6.5 7 7.5 8 8.5 9 9.5 10 20 30 40 50")
# parser.add_argument('--run_lambda', action='store_true')
# args = parser.parse_args()
# run_lambda = args.run_lambda
# lambda_weight = args.lambda_weight
# print(lambda_weight)
# print(lambda_weight.split(" "))
# lambda_weight = [float(x) for x in lambda_weight.split(' ')]
# lambda_weight = [str(x) for x in lambda_weight]
# if run_lambda:
# print("Run lambda with lambda ", lambda_weight)
# else:
# print("Run normal flow")
"""Device"""
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
"""Load configuration"""
config_path = "/home/trduong/Data/counterfactual_fairness_game_theoric/configuration.yml"
conf = load_config(config_path)
"""Set up logging"""
logger = setup_logging(conf['log_train_law'])
"""Load data"""
data_path = conf['data_law']
df = pd.read_csv(data_path)
"""Setup features"""
data_name = "law"
dict_ = features_setting("law")
sensitive_features = dict_["sensitive_features"]
normal_features = dict_["normal_features"]
categorical_features = dict_["categorical_features"]
continuous_features = dict_["continuous_features"]
full_features = dict_["full_features"]
target = dict_["target"]
selected_race = ['White', 'Black']
df = df[df['race'].isin(selected_race)]
df = df.reset_index(drop = True)
"""Preprocess data"""
df = preprocess_dataset(df, [], categorical_features)
# df['ZFYA'] = (df['ZFYA']-df['ZFYA'].mean())/df['ZFYA'].std()
df = df[['LSAT', 'UGPA', 'sex', 'race', 'ZFYA']]
_, df_test = train_test_split(df, test_size=0.2, random_state=0)
"""Load auto encoder"""
df_autoencoder = df_test[full_features].copy()
emb_size = 128
ae_model = AutoEncoder(
input_shape=df_test[full_features].shape[1],
encoder_layers=[512, 512, emb_size], # model architecture
decoder_layers=[], # decoder optional - you can create bottlenecks if you like
activation='relu',
swap_p=0.2, # noise parameter
lr=0.01,
lr_decay=.99,
batch_size=512, # 512
verbose=False,
optimizer='sgd',
scaler='gauss_rank', # gauss rank scaling forces your numeric features into standard normal distributions
)
ae_model.to(device)
# ae_model = load_aemodel(ae_model, conf['law_encoder'], df_test_autoencoder)
ae_model.build_model(df_test[full_features].copy())
ae_model.load_state_dict(torch.load(conf['law_encoder']))
ae_model.eval()
"""Load generator"""
emb_size = 128
df_generator = df_test[normal_features]
generator = AutoEncoder(
input_shape = df_generator.shape[1],
encoder_layers=[256, 256, emb_size], # model architecture
decoder_layers=[], # decoder optional - you can create bottlenecks if you like
encoder_dropout = 0.5,
decoder_dropout = 0.5,
activation='tanh',
swap_p=0.2, # noise parameter
lr=0.0001,
lr_decay=.99,
batch_size=512, # 512
verbose=False,
optimizer='sgd',
scaler='gauss_rank', # gauss rank scaling forces your numeric features into standard normal distributions
)
generator.to(device)
generator.build_model(df[normal_features])
generator.eval()
"""Load discriminator"""
discriminator = DiscriminatorLaw(emb_size)
discriminator.to(device)
discriminator.load_state_dict(torch.load(conf['law_discriminator']))
discriminator.eval()
# if run_lambda:
# for l in lambda_weight:
# print("Lambda ", l)
# generator.load_state_dict(torch.load(conf["lambda_law_generator"].format(l)))
# discriminator.load_state_dict(torch.load(conf["lambda_law_discriminator"].format(l)))
# df_test = get_predict(ae_model, generator, df_test, normal_features, full_features, l)
# else:
generator.load_state_dict(torch.load(conf['law_generator']))
discriminator.load_state_dict(torch.load(conf['law_discriminator']))
df_test = get_predict(ae_model, generator, discriminator, df_test, normal_features, full_features)
# if run_lambda:
# df_test.to_csv(conf["ivr_law_lambda"], index = False)
# else:
df_test.to_csv(conf["ivr_law"], index = False)
"""Autoencoder + Linear regression"""
# Z = ae_model.get_representation(df_autoencoder)
# Z = Z.cpu().detach().numpy()
# reg = LinearRegression()
# reg.fit(Z, df['ZFYA'].values)
# y_pred = reg.predict(Z)
# df["AL_prediction"] = y_pred
"""Generator + Linear regression"""
# Z = generator.custom_forward(df_generator)
# Z = Z.cpu().detach().numpy()
# reg = LinearRegression()
# reg.fit(Z, df['ZFYA'].values)
# y_pred = reg.predict(Z)
# df["GL_prediction"] = y_pred
"""Generator + Discriminator"""
# Z = generator.custom_forward(df_generator)
# predictor_agnostic = discriminator_agnostic(Z)
# y_pred = predictor_agnostic.cpu().detach().numpy().reshape(-1)
# df["GD_prediction"] = y_pred
| [
"torch.device",
"torch.cuda.is_available",
"torch.load"
] | 1.8.1 | tridungduong16/counterfactual_fairness_game_theoretic | 794d5224f9c656c06e5eb197ebbe1875f1856e7e |
1.8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 4 20:55:24 2021
@author: trduong
"""
import pandas as pd
import numpy as np
import logging
import yaml
import pyro
import torch
import pyro.distributions as dist
import sys
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
from torch import nn
from pyro.nn import PyroModule
from tqdm import tqdm
def GroundTruthModel():
exo_dist = {
'Nrace': dist.Bernoulli(torch.tensor(0.75)),
'Nsex': dist.Bernoulli(torch.tensor(0.5)),
'Nknowledge': dist.Normal(torch.tensor(0.), torch.tensor(1.))
}
R = pyro.sample("Race", exo_dist['Nrace'])
S = pyro.sample("Sex", exo_dist['Nsex'])
K = pyro.sample("Knowledge", exo_dist['Nknowledge'])
G = pyro.sample("UGPA", dist.Normal(K + R + S, 0.1))
L = pyro.sample("LSAT", dist.Normal(K + R + S, 0.1))
F = pyro.sample("ZFYA", dist.Normal(K + R + S, 0.1))
def infer_knowledge(df, loop):
"""
:param df: DESCRIPTION
:type df: TYPE
:return: DESCRIPTION
:rtype: TYPE
"""
knowledge = []
for i in tqdm(range(len(df))):
conditioned = pyro.condition(GroundTruthModel, data={"UGPA": df["UGPA"][i], "LSAT": df["LSAT"][i]})
posterior = pyro.infer.Importance(conditioned, num_samples=loop).run()
post_marginal = pyro.infer.EmpiricalMarginal(posterior, "Knowledge")
post_samples = [post_marginal().item() for _ in range(loop)]
post_unique, post_counts = np.unique(post_samples, return_counts=True)
mean = np.mean(post_samples)
knowledge.append(mean)
return knowledge
# def infer_knowledge_test(df):
# """
# :param df: DESCRIPTION
# :type df: TYPE
# :return: DESCRIPTION
# :rtype: TYPE
# """
# knowledge = []
# for i in tqdm(range(len(df))):
# conditioned = pyro.condition(GroundTruthModel, data={"UGPA": df["UGPA"][i], "LSAT": df["LSAT"][i]})
# posterior = pyro.infer.Importance(conditioned, num_samples=200).run()
# post_marginal = pyro.infer.EmpiricalMarginal(posterior, "Knowledge")
# post_samples = [post_marginal().item() for _ in range(200)]
# post_unique, post_counts = np.unique(post_samples, return_counts=True)
# mean = np.mean(post_samples)
# knowledge.append(mean)
# return knowledge
if __name__ == "__main__":
"""Load configuration"""
with open("/home/trduong/Data/counterfactual_fairness_game_theoric/configuration.yml", 'r') as stream:
try:
conf = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
"""Set up logging"""
logger = logging.getLogger('genetic')
file_handler = logging.FileHandler(filename=conf['log_law'])
stdout_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
file_handler.setFormatter(formatter)
stdout_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
logger.setLevel(logging.DEBUG)
"""Load data"""
data_path = conf['data_law']
df = pd.read_csv(data_path)
"""Setup features"""
sensitive_feature = ['race', 'sex']
normal_feature = ['LSAT', 'UGPA']
categorical_feature = ['race', 'sex']
full_feature = sensitive_feature + normal_feature
target = 'ZFYA'
selected_race = ['White', 'Black']
df = df[df['race'].isin(selected_race)]
df = df.reset_index(drop = True)
"""Preprocess data"""
df['LSAT'] = (df['LSAT']-df['LSAT'].mean())/df['LSAT'].std()
df['UGPA'] = (df['UGPA']-df['UGPA'].mean())/df['UGPA'].std()
df['ZFYA'] = (df['ZFYA']-df['ZFYA'].mean())/df['ZFYA'].std()
le = preprocessing.LabelEncoder()
df['race'] = le.fit_transform(df['race'])
df['sex'] = le.fit_transform(df['sex'])
"""Full model"""
logger.debug('Full model')
reg = LinearRegression().fit(df[full_feature], df['ZFYA'])
y_pred = reg.predict(df[full_feature].values)
df['full_prediction'] = y_pred.reshape(-1)
"""Unaware model"""
logger.debug('Unware model')
reg = LinearRegression().fit(df[normal_feature], df['ZFYA'])
y_pred = reg.predict(df[normal_feature].values)
df['unaware_prediction'] = y_pred.reshape(-1)
"""Counterfactual fairness model"""
for i in ['LSAT', 'UGPA', 'ZFYA']:
df[i] = [torch.tensor(x) for x in df[i].values]
logger.debug('Counterfactual fairness model')
df_sample = df.sample(frac=0.2, replace=True, random_state=1).reset_index()
knowledged = infer_knowledge(df_sample, loop=5)
knowledged = np.array(knowledged).reshape(-1,1)
reg = LinearRegression().fit(knowledged, df_sample['ZFYA'])
knowledged = infer_knowledge(df, loop =5)
knowledged = np.array(knowledged).reshape(-1,1)
y_pred = reg.predict(knowledged)
df['cf_prediction'] = y_pred.reshape(-1)
df['ZFYA'] = [x.detach().numpy() for x in df['ZFYA']]
df['LSAT'] = [x.detach().numpy() for x in df['LSAT']]
df['UGPA'] = [x.detach().numpy() for x in df['UGPA']]
"""Output the result"""
df = df[['race', 'sex', 'LSAT', 'UGPA', 'ZFYA', 'full_prediction', 'unaware_prediction', 'cf_prediction']]
df.to_csv(conf['result_law'], index = False)
sys.modules[__name__].__dict__.clear()
| [
"torch.tensor"
] | 1.8.1 | tridungduong16/counterfactual_fairness_game_theoretic | 794d5224f9c656c06e5eb197ebbe1875f1856e7e |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates.
# Initial version was taken from https://github.com/ChenRocks/UNITER/
# and adapted for MMF.
import copy
import logging
import random
from collections import MutableMapping, namedtuple
from dataclasses import asdict, dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from mmf.common.registry import registry
from mmf.models import BaseModel
from mmf.modules.losses import MMFLoss
from mmf.utils.general import retry_n
from omegaconf import MISSING, DictConfig, OmegaConf
from torch import Tensor, nn
from transformers.modeling_bert import BertConfig, BertEmbeddings, BertModel
NUM_RETRIES = 6
EMPTY_CONFIG = OmegaConf.create({})
DEFAULT_PRETRAINING_HEAD_CONFIGS = {
"mlm": {"type": "mlm"},
"itm": {"type": "itm"},
"mrc": {"type": "mrc"},
"mrfr": {"type": "mrfr"},
"wra": {"type": "wra"},
}
DEFAULT_PRETRAINING_TASKS = "mlm,itm,mrc,mrfr,wra"
logger = logging.getLogger()
class UNITERImageEmbeddings(nn.Module):
"""
Image Embeddings used by UNITER.
Code modified from https://github.com/ChenRocks/UNITER/blob/master/model/model.py
Performs a linear projection then normalization over image and position features.
"""
def __init__(
self,
img_dim: int = 2048,
hidden_size: int = 768,
eps: float = 1e-12,
hidden_dropout_prob: float = 0,
pos_dim: int = 7,
):
super().__init__()
self.img_linear = nn.Linear(img_dim, hidden_size)
self.img_layer_norm = nn.LayerNorm(hidden_size, eps=eps)
self.pos_layer_norm = nn.LayerNorm(hidden_size, eps=eps)
self.pos_linear = nn.Linear(pos_dim, hidden_size)
self.mask_embedding = nn.Embedding(2, img_dim, padding_idx=0)
self.final_layer_norm = nn.LayerNorm(hidden_size, eps=eps)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(
self,
img_feat: Tensor,
img_pos_feat: Tensor,
type_embeddings: Tensor,
img_masks: Optional[Tensor] = None,
) -> Tensor:
if img_masks is not None:
self.mask_embedding.weight.data[0, :].fill_(0)
mask = self.mask_embedding(img_masks.long())
img_feat = img_feat + mask
transformed_im = self.img_layer_norm(self.img_linear(img_feat))
transformed_pos = self.pos_layer_norm(self.pos_linear(img_pos_feat))
embeddings = transformed_im + transformed_pos + type_embeddings
embeddings = self.final_layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UNITERModelBase(nn.Module):
"""UNITER embedding and transformer trunk for V-L modeling.
Modified from https://github.com/ChenRocks/UNITER/ for MMF.
https://arxiv.org/pdf/1909.11740.pdf
By default, this model uses the pretrained bert-base-uncased
transformer trunk with from huggingface.
To train on this model through MMF, look at the UNITER model,
which supports pretraining and finetuning of UNITERModelBase
with configurable heads.
For an example of using this model standalone,
take a look at its unit test in `test_uniter.py`.
"""
def __init__(
self,
random_init: bool = False,
bert_model_name: str = "bert-base-uncased",
img_dim: int = 2048,
hidden_size: int = 768,
hidden_dropout_prob: float = 0,
text_embeddings: DictConfig = EMPTY_CONFIG,
encoder: DictConfig = EMPTY_CONFIG,
):
super().__init__()
bert_config = retry_n(
NUM_RETRIES,
BertConfig.from_pretrained,
bert_model_name,
**OmegaConf.to_container(text_embeddings),
)
self.text_embeddings = BertEmbeddings(bert_config)
self.img_embeddings = UNITERImageEmbeddings(
img_dim=img_dim,
hidden_size=hidden_size,
hidden_dropout_prob=hidden_dropout_prob,
)
bert_model_name = bert_model_name
hf_config = retry_n(
NUM_RETRIES,
BertConfig.from_pretrained,
bert_model_name,
**OmegaConf.to_container(encoder),
)
if random_init:
bert_model = BertModel(hf_config)
else:
bert_model = retry_n(
NUM_RETRIES,
BertModel.from_pretrained,
bert_model_name,
config=hf_config,
)
self.encoder = bert_model.encoder
self.pooler = bert_model.pooler
def _compute_txt_embeddings(
self,
input_ids: Tensor,
position_ids: Tensor,
token_type_ids: Optional[Tensor] = None,
) -> Tensor:
output = self.text_embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
)
return output
def _compute_img_embeddings(
self,
img_feat: Tensor,
img_pos_feat: Tensor,
img_masks: Optional[Tensor] = None,
img_type_ids: Optional[Tensor] = None,
) -> Tensor:
if img_type_ids is None:
img_type_ids = torch.ones_like(img_feat[:, :, 0].long())
img_type_embeddings = self.text_embeddings.token_type_embeddings(img_type_ids)
output = self.img_embeddings(
img_feat, img_pos_feat, img_type_embeddings, img_masks
)
return output
def _compute_img_txt_embeddings(
self,
input_ids: Tensor,
position_ids: Tensor,
img_feat: Tensor,
img_pos_feat: Tensor,
img_masks: Optional[Tensor] = None,
txt_type_ids: Optional[Tensor] = None,
img_type_ids: Optional[Tensor] = None,
) -> Tensor:
txt_emb = self._compute_txt_embeddings(input_ids, position_ids, txt_type_ids)
img_emb = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids
)
embedding_output = torch.cat([txt_emb, img_emb], dim=1)
return embedding_output
def forward(
self,
input_ids: Tensor,
position_ids: Tensor,
img_feat: Tensor,
img_pos_feat: Tensor,
attention_mask: Tensor,
img_masks: Optional[Tensor] = None,
txt_type_ids: Optional[Tensor] = None,
img_type_ids: Optional[Tensor] = None,
input_modality: str = "image-text",
) -> Tuple[Tensor, Tensor]:
# compute self-attention mask
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
# https://github.com/huggingface/transformers/issues/542 for details
# on why we add very negative values to attention scores
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# embedding layer
if input_modality == "image":
# image only
embedding_output = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids
)
elif input_modality == "text":
# text only
embedding_output = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids
)
else:
embedding_output = self._compute_img_txt_embeddings(
input_ids,
position_ids,
img_feat,
img_pos_feat,
img_masks,
txt_type_ids,
img_type_ids,
)
encoded_layers = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
output_hidden_states=True,
)
layers = namedtuple("TransformerOutput", ["final_layer", "hidden_layers"])
return layers(encoded_layers[0], encoded_layers[1])
def _infer_with_heads(
processed_sample_list: Dict[str, Tensor],
uniter_model: Any,
heads: Dict[str, Any],
losses: Dict[str, Any],
) -> Dict[str, Tensor]:
sequence_output = uniter_model(
processed_sample_list["input_ids"],
processed_sample_list["position_ids"],
processed_sample_list["image_feat"],
processed_sample_list["img_pos_feat"],
processed_sample_list["attention_mask"],
img_masks=processed_sample_list["image_mask"],
).final_layer
dataset_name = processed_sample_list["dataset_name"]
task = processed_sample_list.get("task", dataset_name)
outputs = heads[task](sequence_output, processed_sample_list=processed_sample_list)
if isinstance(outputs, MutableMapping) and "losses" in outputs:
return outputs
logits = outputs
if isinstance(outputs, MutableMapping) and "scores" in outputs:
logits = outputs["scores"]
logits = logits.contiguous().view(-1, logits.size(-1))
output = losses[dataset_name](processed_sample_list, {"scores": logits})
return {"losses": output, "scores": logits}
class UNITERForClassification(nn.Module):
"""UNITER wrapper for classification
Example params:
head_configs = {"vqa2": {"type": "mlp", "num_labels": 3129}}
losses_configs = {"vqa2": "logit_bce"}
tasks = "vqa2"
"""
def __init__(
self,
head_configs: Dict,
loss_configs: Dict,
tasks: Union[str, List],
random_init: bool = False,
bert_model_name: str = "bert-base-uncased",
img_dim: int = 2048,
hidden_size: int = 768,
hidden_dropout_prob: float = 0,
text_embeddings: Any = EMPTY_CONFIG,
encoder: Any = EMPTY_CONFIG,
):
super().__init__()
self.loss_configs = loss_configs
self.uniter = UNITERModelBase(
random_init=random_init,
bert_model_name=bert_model_name,
img_dim=img_dim,
hidden_size=hidden_size,
hidden_dropout_prob=hidden_dropout_prob,
text_embeddings=text_embeddings,
encoder=encoder,
)
self.heads = nn.ModuleDict()
self.tasks = tasks
if isinstance(self.tasks, str):
self.tasks = self.tasks.split(",")
for task in self.tasks:
assert task in head_configs, (
f"Task {task} is specified in your model configs"
+ " but there is no head configured for the task. "
+ "Head configs can be added under model_config.heads "
+ "in your yaml configs. Either remove this task if UNITER"
+ " is not meant to run on a dataset named {task}"
+ " or add a head config."
)
head_config = head_configs[task]
head_type = head_config.get("type", "mlp")
head_class = registry.get_transformer_head_class(head_type)
self.heads[task] = head_class(head_config)
self.init_losses()
def init_losses(self):
self.losses = nn.ModuleDict()
for task in self.tasks:
if task not in self.loss_configs:
logger.warning(
f"No loss defined for {task}. Head is expected "
+ "to return dict with 'losses'"
)
continue
loss_config = self.loss_configs[task]
self.losses[task] = MMFLoss(loss_config)
def forward(self, processed_sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
return _infer_with_heads(
processed_sample_list, self.uniter, self.heads, self.losses
)
class UNITERForPretraining(nn.Module):
"""UNITER wrapper for pretraining"""
def __init__(
self,
head_configs: Optional[Dict] = None,
loss_configs: Optional[Dict] = None,
tasks: Union[List, str] = DEFAULT_PRETRAINING_TASKS,
mask_probability: float = 0,
random_init: bool = False,
bert_model_name: str = "bert-base-uncased",
img_dim: int = 2048,
hidden_size: int = 768,
hidden_dropout_prob: float = 0,
text_embeddings: Any = EMPTY_CONFIG,
encoder: Any = EMPTY_CONFIG,
):
super().__init__()
if head_configs is None:
head_configs = copy.deepcopy(DEFAULT_PRETRAINING_HEAD_CONFIGS)
if loss_configs is None:
loss_configs = {}
self.loss_configs = loss_configs
self.mask_probability = mask_probability
self.uniter = UNITERModelBase(
random_init=random_init,
bert_model_name=bert_model_name,
img_dim=img_dim,
hidden_size=hidden_size,
hidden_dropout_prob=hidden_dropout_prob,
text_embeddings=text_embeddings,
encoder=encoder,
)
self.heads = nn.ModuleDict()
self.tasks = tasks
if isinstance(self.tasks, str):
self.tasks = self.tasks.split(",")
for task in self.tasks:
head_config = head_configs[task]
head_type = head_config.get("type", "mlp")
head_class = registry.get_transformer_head_class(head_type)
if head_type == "mrfr":
self.heads[task] = head_class(
self.uniter.img_embeddings.img_linear.weight, **head_config
)
elif head_type in ("itm", "mlm", "mlp"):
self.heads[task] = head_class(head_config)
else:
self.heads[task] = head_class(**head_config)
self.init_losses()
def init_losses(self):
self.losses = nn.ModuleDict()
for task in self.tasks:
if task not in self.loss_configs:
logger.warning(
f"No loss defined for {task}. Head is expected "
+ "to return dict with 'losses'"
)
continue
loss_config = self.loss_configs[task]
self.losses[task] = MMFLoss(loss_config)
def forward(self, processed_sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
assert "is_correct" in processed_sample_list, (
"UNITER pretraining requires mismatched captions."
+ " Please add 'false_caption': true under dataset_config in your "
+ "yaml configs."
)
self._process_sample_list_for_pretraining(processed_sample_list)
task = processed_sample_list["task"]
if task == "mlm":
self._preprocess_mlm(processed_sample_list)
elif task == "itm":
self._preprocess_itm(processed_sample_list)
elif task == "mrc":
self._preprocess_mrc(processed_sample_list)
elif task == "mrfr":
self._preprocess_mrfr(processed_sample_list)
elif task == "wra":
self._preprocess_wra(processed_sample_list)
else:
raise ValueError(f"Task {task} is not supported for pretraining!")
return _infer_with_heads(
processed_sample_list, self.uniter, self.heads, self.losses
)
def _process_sample_list_for_pretraining(
self, processed_sample_list: Dict[str, Tensor]
):
task = processed_sample_list["task"]
if task in ("mrfr", "mrc"):
self._add_image_feat_masked(processed_sample_list)
# mrc assumes cls prob is a key in sample list,
# having cls prob as a key in sample list makes it easier
# mask negative pairs due to mismatched captions
processed_sample_list["cls_prob"] = torch.tensor(
processed_sample_list["image_info_0"]["cls_prob"]
)
if task not in ("wra", "itm"):
self._remove_mismatched_captions(processed_sample_list)
def _add_image_feat_masked(self, processed_sample_list: Dict[str, Tensor]):
img_feat_masked = torch.clone(processed_sample_list["image_feat"])
num_feat = img_feat_masked.size(1)
img_masks = [
self._get_img_mask(self.mask_probability, num_feat)
for _ in range(img_feat_masked.size(0))
]
img_masks = torch.tensor(img_masks).bool().to(img_feat_masked.device)
img_masks_ext = img_masks.unsqueeze(-1).expand_as(img_feat_masked)
processed_sample_list["image_feat_masked"] = img_feat_masked.data.masked_fill(
img_masks_ext, 0
)
processed_sample_list["image_mask"] = img_masks
def _get_img_mask(self, mask_prob: float, num_bb: int) -> Tensor:
img_mask = list(map(bool, np.random.binomial(1, mask_prob, num_bb)))
if not any(img_mask):
# at least mask 1
img_mask[random.choice(range(num_bb))] = True
return img_mask
def _preprocess_mlm(self, processed_sample_list: Dict[str, Tensor]):
assert "lm_label_ids" in processed_sample_list
assert "input_ids_masked" in processed_sample_list
ignore_index = self.heads["mlm"].config.ignore_index
mlm_labels = {}
mlm_labels["text"] = processed_sample_list["lm_label_ids"]
mlm_labels["image"] = torch.full(
processed_sample_list["image_feat"].shape[:2],
fill_value=ignore_index,
dtype=torch.long,
device=mlm_labels["text"].device,
)
mlm_labels["combined_labels"] = torch.cat(
[mlm_labels["text"], mlm_labels["image"]], dim=-1
)
processed_sample_list["mlm_labels"] = mlm_labels
processed_sample_list["input_ids"] = processed_sample_list["input_ids_masked"]
def _preprocess_itm(self, processed_sample_list: Dict[str, Tensor]):
assert "is_correct" in processed_sample_list
processed_sample_list["itm_labels"] = {
"is_correct": processed_sample_list["is_correct"]
}
def _get_feature_mask(self, image_mask, sentence_len):
bs = image_mask.size(0)
padding_for_txt = torch.zeros((bs, sentence_len)).to(image_mask)
concat_mask = torch.cat([padding_for_txt, image_mask], dim=-1)
return concat_mask
def _mask_inputs_in_sample_list(self, processed_sample_list, mask_key):
assert "image_feat_masked" in processed_sample_list
sentence_len = processed_sample_list["input_ids"].size(1)
processed_sample_list[mask_key] = self._get_feature_mask(
processed_sample_list["image_mask"], sentence_len
)
processed_sample_list["image_feat"] = processed_sample_list["image_feat_masked"]
def _preprocess_mrc(self, processed_sample_list: Dict[str, Tensor]):
assert "cls_prob" in processed_sample_list
assert "image_mask" in processed_sample_list
assert "image_feat_masked" in processed_sample_list
mrc_label_key = self.heads["mrc"].mrc_label_key
mrc_mask_key = self.heads["mrc"].mrc_mask_key
image_mask = processed_sample_list["image_mask"]
cls_prob = processed_sample_list["cls_prob"].to(image_mask.device)
img_masks_ext = image_mask.unsqueeze(-1).expand_as(cls_prob) # (n, m, d)
cls_dim = cls_prob.size(2)
cls_prob = cls_prob[img_masks_ext].contiguous().view(-1, cls_dim)
processed_sample_list[mrc_label_key] = cls_prob
self._mask_inputs_in_sample_list(processed_sample_list, mrc_mask_key)
def _preprocess_mrfr(self, processed_sample_list: Dict[str, Tensor]):
assert "image_mask" in processed_sample_list
assert "image_feat_masked" in processed_sample_list
mrfr_target_key = self.heads["mrfr"].mrfr_target_key
mrfr_mask_key = self.heads["mrfr"].mrfr_mask_key
image_mask = processed_sample_list["image_mask"]
image_feat = processed_sample_list["image_feat"]
img_masks_ext = image_mask.unsqueeze(-1).expand_as(image_feat) # (n, m, d)
feat_dim = image_feat.size(2)
feat_targets = image_feat[img_masks_ext].contiguous().view(-1, feat_dim)
processed_sample_list[mrfr_target_key] = feat_targets
self._mask_inputs_in_sample_list(processed_sample_list, mrfr_mask_key)
def _preprocess_wra(self, processed_sample_list: Dict[str, Tensor]):
assert "is_correct" in processed_sample_list
ot_inputs_key = self.heads["wra"].ot_inputs_key
wra_label_key = self.heads["wra"].wra_label_key
txt_lens = [i.size(0) for i in processed_sample_list["input_ids"]]
num_bbs = [f.size(0) for f in processed_sample_list["image_feat"]]
def _compute_pad(lens: List[int]):
max_len = max(lens)
pad = torch.zeros(len(lens), max_len)
for i, l in enumerate(lens):
pad.data[i, l:].fill_(1)
return pad
device = processed_sample_list["input_ids"].device
txt_pad = _compute_pad(txt_lens).to(device).bool()
img_pad = _compute_pad(num_bbs).to(device).bool()
ot_inputs = {"txt_pad": txt_pad, "img_pad": img_pad}
processed_sample_list[ot_inputs_key] = ot_inputs
processed_sample_list[wra_label_key] = processed_sample_list["is_correct"]
def _remove_mismatched_captions(self, processed_sample_list: Dict[str, Tensor]):
assert "is_correct" in processed_sample_list
pos_pairs = processed_sample_list["is_correct"].ne(0)
pos_pairs_mask = torch.where(pos_pairs.any(), pos_pairs, pos_pairs.new([True]))
tensor_names = [
"input_ids",
"input_mask",
"image_feat",
"img_pos_feat",
"attention_mask",
"image_mask",
"image_feat_masked",
"lm_label_ids",
"cls_prob",
]
for name in tensor_names:
x = processed_sample_list.get(name)
if x is None:
continue
if x.dim() == 1:
assert x.size(0) == pos_pairs_mask.size(0), (
f"tensor {name} has shape {x.shape} but expected "
+ f"{pos_pairs_mask.size(0)} at dim 0."
)
x = x[pos_pairs_mask]
else:
x = x[pos_pairs_mask, ::]
@registry.register_model("uniter")
class UNITER(BaseModel):
"""Modification for Joint Vision-Language Encoding"""
@dataclass
class Config:
random_init: bool = False
bert_model_name: str = "bert-base-uncased"
img_dim: int = 2048
hidden_size: int = 768
hidden_dropout_prob: float = 0
text_embeddings: Any = field(default_factory=lambda: {})
encoder: Any = field(default_factory=lambda: {})
heads: Any = MISSING
losses: Any = field(default_factory=lambda: {})
tasks: Any = MISSING
do_pretraining: bool = False
def __init__(self, config):
super().__init__(config)
self.config = OmegaConf.create({**asdict(self.Config()), **config})
self.do_pretraining = self.config.do_pretraining
@classmethod
def config_path(cls):
return "configs/models/uniter/defaults.yaml"
def build(self):
configs = dict(**self.config)
configs["head_configs"] = configs.pop("heads")
configs["loss_configs"] = configs.pop("losses")
params_keys = [
"head_configs",
"loss_configs",
"tasks",
"random_init",
"bert_model_name",
"img_dim",
"hidden_size",
"hidden_dropout_prob",
"text_embeddings",
"encoder",
]
if self.do_pretraining:
# take value from config when the key exists,
# otherwise use constructor defaults
params_keys += ["mask_probability"]
params = {key: configs[key] for key in params_keys if key in configs}
self.uniter = UNITERForPretraining(**params)
else:
params = {key: configs[key] for key in params_keys if key in configs}
self.uniter = UNITERForClassification(**params)
self.tasks = self.config.tasks
if isinstance(self.tasks, str):
self.tasks = self.tasks.split(",")
def init_losses(self):
"""
Defer loss management to submodels,
do nothing when called by build_model.
"""
pass
def add_pos_feat(self, sample_list: Dict[str, Tensor]):
assert "image_info_0" in sample_list
assert "bbox" in sample_list["image_info_0"]
# (x1, y1, x2, y2), dim = (bs, num_feats, 4)
bboxs = torch.tensor(sample_list["image_info_0"]["bbox"])[:, :, :4]
norm_xy = torch.clone(bboxs)
# if bboxs are not normalized, just do it here
if norm_xy[0, 0, 0] < 1:
img_h = (
torch.tensor(sample_list["image_info_0"]["image_height"])
.unsqueeze(1)
.unsqueeze(1)
) # (bs,)
img_w = (
torch.tensor(sample_list["image_info_0"]["image_width"])
.unsqueeze(1)
.unsqueeze(1)
) # (bs,)
max_image_size = torch.cat([img_w, img_h, img_w, img_h], dim=-1)
max_image_size = max_image_size.to(norm_xy.device)
norm_xy /= max_image_size
bbox_w = (norm_xy[:, :, 2] - norm_xy[:, :, 0]).unsqueeze(-1)
bbox_h = (norm_xy[:, :, 3] - norm_xy[:, :, 1]).unsqueeze(-1)
area = bbox_w * bbox_h
# normalized (x1, y1, x2, y2, w, h, area)
pos_feat = torch.cat([norm_xy, bbox_w, bbox_h, area], dim=-1).to(
sample_list["image_feature_0"]
)
sample_list["img_pos_feat"] = pos_feat
def add_custom_params(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
image_feat = sample_list["image_feat"] = sample_list["image_feature_0"]
image_info = getattr(sample_list, "image_info_0", {})
image_dim = getattr(image_info, "max_features", None)
sample_list["image_dim"] = image_dim
image_mask = torch.arange(image_feat.size(-2), device=image_feat.device).expand(
image_feat.size()[:-1]
)
if len(image_dim.size()) < len(image_mask.size()):
image_dim = image_dim.unsqueeze(-1)
assert len(image_dim.size()) == len(image_mask.size())
image_mask = image_mask < image_dim
sample_list["image_mask"] = image_mask.long()
sample_list["attention_mask"] = torch.cat(
(sample_list["input_mask"], sample_list["image_mask"]), dim=-1
)
task_index = torch.randint(len(self.tasks), (1,)).item()
sample_list["task"] = self.tasks[task_index]
sample_list["position_ids"] = torch.arange(
0,
sample_list["input_ids"].size(1),
dtype=torch.long,
device=image_feat.device,
).unsqueeze(0)
self.add_pos_feat(sample_list)
return sample_list
def forward(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
sample_list = self.add_custom_params(sample_list)
return self.uniter(sample_list)
def get_attention_mask(
self,
sample_list: Dict[str, Tensor],
text_embedding: Tensor,
image_embedding: Tensor,
) -> Tensor:
image_mask = getattr(sample_list, "image_mask", None)
if image_mask is not None and sample_list.input_mask is not None:
attention_mask = torch.cat((sample_list.input_mask, image_mask), dim=-1)
elif image_mask is not None:
text_mask = torch.ones(
text_embedding.size()[:-1],
dtype=text_embedding.dtype,
device=text_embedding.device,
)
attention_mask = torch.cat((image_mask, text_mask), dim=-1)
elif sample_list.input_mask is not None:
image_mask = torch.ones(
image_embedding.size()[:-1],
dtype=image_embedding.dtype,
device=image_embedding.device,
)
attention_mask = torch.cat((image_mask, sample_list.input_mask), dim=-1)
else:
attention_mask = None
return attention_mask
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.ModuleDict",
"torch.full",
"torch.tensor",
"torch.clone",
"torch.nn.Embedding"
] | 1.6.0 | sisilmehta2000/mmf | ac1bb736f281ffbde367cfe9cf6f4f78fc890fc4 |
1.6 | import torch
import numpy as np
from sklearn import model_selection
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler, SequentialSampler
from cutout import Cutout
from autoaugment import CIFAR10Policy
import pickle
def get_train_valid_loader(data_dir,
batch_size,
augment=True,
shuffle=False,
show_sample=False,
num_workers=4,
pin_memory=True,
cutout=False,
cutout_length=16,
auto_augment=False,
resize=False,
datasetType='Full',
resizelength=300):
"""
Utility function for loading and returning train and valid
multi-process iterators over the CIFAR-10 dataset. A sample
9x9 grid of the images can be optionally displayed.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- show_sample: plot 9x9 sample grid of the dataset.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.2673, 0.2564, 0.2762]
normalize = transforms.Normalize(
mean=CIFAR_MEAN,
std=CIFAR_STD,
)
# define transforms
if resize:
valid_transform = transforms.Compose([
transforms.Resize(resizelength),
transforms.ToTensor(),
normalize,
])
else:
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
if resize:
train_transform = [
transforms.Resize(resizelength),
transforms.RandomCrop(resizelength, padding=4),
]
else:
train_transform = [
transforms.RandomCrop(32, padding=4),
]
train_transform.extend([
transforms.RandomHorizontalFlip(),
])
if auto_augment:
train_transform.extend([
CIFAR10Policy(),
])
train_transform.extend([
transforms.ToTensor(),
normalize,
])
if cutout:
train_transform.extend([
Cutout(cutout_length),
])
train_transform = transforms.Compose(train_transform)
'''
if resize:
train_transform = transforms.Compose([
transforms.Resize(resizelength),
transforms.RandomCrop(resizelength, padding=4),
transforms.RandomHorizontalFlip(), CIFAR10Policy(),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(), CIFAR10Policy(),
transforms.ToTensor(),
normalize,
])
if cutout:
train_transform.transforms.append(Cutout(cutout_length)) #can be changed
'''
'''
else:
train_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
'''
# load the dataset
train_dataset = datasets.CIFAR100(
root=data_dir, train=True,
download=True, transform=train_transform,
)
valid_dataset = datasets.CIFAR100(
root=data_dir, train=True,
download=True, transform=valid_transform,
)
# Generate stratified splits, and store indexes
'''
targets = train_dataset.targets
print (len(targets))
train_idx, valid_idx = model_selection.train_test_split(
np.arange(len(targets)), test_size=0.02, train_size=0.08, random_state=42, shuffle=True, stratify=targets)
# Check stratification
print(np.unique(np.array(targets)[train_idx], return_counts=True))
print(np.unique(np.array(targets)[valid_idx], return_counts=True))
with open('./data/cifar-100/trainPartial10Cifar100Indexes', 'wb') as f:
pickle.dump(train_idx, f)
with open('./data/cifar-100/valPartial10Cifar100Indexes', 'wb') as f:
pickle.dump(valid_idx, f)
'''
if datasetType.lower() == 'full':
with open(data_dir+'trainFullCifar100Indexes', 'rb') as f:
train_idx = pickle.load(f)
with open(data_dir+'valFullCifar100Indexes', 'rb') as f:
valid_idx = pickle.load(f)
elif "trainentire" in datasetType.lower():
return (get_entire_train(train_dataset, batch_size, shuffle, num_workers, pin_memory, data_dir,resize,resizelength))
elif "partial" in datasetType.lower() and "fly" in datasetType.lower():
targets = train_dataset.targets
train_idx, valid_idx = model_selection.train_test_split(
np.arange(len(targets)), test_size=0.02, train_size=0.08, random_state=42, shuffle=True, stratify=targets)
#print(len(set(train_idx)-set(valid_idx)))
else:#Partial
with open(data_dir+'trainPartial10Cifar100Indexes', 'rb') as f:
train_idx = pickle.load(f)
with open(data_dir+'valPartial10Cifar100Indexes', 'rb') as f:
valid_idx = pickle.load(f)
# Datasets are already shuffled using scikit to create the indexes
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=shuffle,
sampler=SequentialSampler(train_idx),
num_workers=num_workers, pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, shuffle=shuffle,
sampler=SequentialSampler(valid_idx),
num_workers=num_workers, pin_memory=pin_memory,
)
'''
# Full Dataset, normal
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
'''
n_classes = 100
return (train_loader, valid_loader, n_classes)
def get_entire_train(train_dataset, batch_size, shuffle, num_workers, pin_memory, data_dir, resize=False, resizelength=300):
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.2673, 0.2564, 0.2762]
normalize = transforms.Normalize(
mean=CIFAR_MEAN,
std=CIFAR_STD,
)
valid_transform = []
# define transforms
if resize:
valid_transform = [
transforms.Resize(resizelength),
]
valid_transform.extend([
transforms.ToTensor(),
normalize,
])
valid_transform = transforms.Compose(valid_transform)
valid_dataset = datasets.CIFAR100(
root=data_dir, train=False,
download=True, transform=valid_transform,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, pin_memory=pin_memory,
)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
#print(len(train_loader) * batch_size)
#print(len(valid_loader)* batch_size)
n_classes = 100
return (train_loader, valid_loader, n_classes)
def get_test_loader(data_dir,
batch_size,
shuffle=False,
num_workers=4,
pin_memory=False,
resize=False,
resizelength=300):
"""
Utility function for loading and returning a multi-process
test iterator over the CIFAR-10 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- shuffle: whether to shuffle the dataset after every epoch.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.2673, 0.2564, 0.2762]
normalize = transforms.Normalize(
mean=CIFAR_MEAN,
std=CIFAR_STD,
)
# define transform
if resize:
transform = transforms.Compose([
transforms.Resize(resizelength),
transforms.ToTensor(),
normalize,])
else:
transform = transforms.Compose([
transforms.ToTensor(),
normalize,])
dataset = datasets.CIFAR100(
root=data_dir, train=False,
download=True, transform=transform,
)
'''
# Generate partial test dataset
targets = dataset.targets
_, test_idx = model_selection.train_test_split(
np.arange(len(targets)), test_size=0.1, random_state=42, shuffle=True, stratify=targets)
with open('./data/testPartial10Cifar100Indexes', 'wb') as f:
pickle.dump(test_idx, f)
print(np.unique(np.array(targets)[test_idx], return_counts=True))
'''
# Full test dataset
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory
)
return data_loader
'''
def get_train_valid_loader(data_dir,
batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
show_sample=False,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning train and valid
multi-process iterators over the CIFAR-10 dataset. A sample
9x9 grid of the images can be optionally displayed.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- show_sample: plot 9x9 sample grid of the dataset.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transforms
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
if augment:
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# load the dataset
train_dataset = datasets.CIFAR100(
root=data_dir, train=True,
download=True, transform=train_transform,
)
valid_dataset = datasets.CIFAR100(
root=data_dir, train=True,
download=True, transform=valid_transform,
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
# visualize some images
if show_sample:
sample_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=9, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
data_iter = iter(sample_loader)
images, labels = data_iter.next()
X = images.numpy().transpose([0, 2, 3, 1])
plot_images(X, labels)
return (train_loader, valid_loader)
'''
if __name__ == "__main__":
# Tests
trainloader, valloader, n_classes = get_train_valid_loader("./data/cifar-100/", 10, \
datasetType="PartialFly")
| [
"torch.utils.data.sampler.SequentialSampler",
"torch.utils.data.DataLoader"
] | 1.6.0 | VascoLopes/LCMNAS | f5a5707c3bd6306a5831d1c78a30a1fd2d7c9912 |
1.6 | import torch
from torch import nn
class RunningStats(nn.Module):
def __init__(self, shape, eps = 1e-5):
super().__init__()
shape = shape if isinstance(shape, tuple) else (shape,)
self.shape = shape
self.eps = eps
self.n = 0
self.register_buffer('old_mean', torch.zeros(shape), persistent = False)
self.register_buffer('new_mean', torch.zeros(shape), persistent = False)
self.register_buffer('old_std', torch.zeros(shape), persistent = False)
self.register_buffer('new_std', torch.zeros(shape), persistent = False)
def clear(self):
self.n = 0
def push(self, x):
self.n += 1
if self.n == 1:
self.old_mean.copy_(x.data)
self.new_mean.copy_(x.data)
self.old_std.zero_()
self.new_std.zero_()
return
self.new_mean.copy_(self.old_mean + (x - self.old_mean) / self.n)
self.new_std.copy_(self.old_std + (x - self.old_mean) * (x - self.new_mean))
self.old_mean.copy_(self.new_mean)
self.old_std.copy_(self.new_std)
def mean(self):
return self.new_mean if self.n else torch.zeros_like(self.new_mean)
def variance(self):
return (self.new_std / (self.n - 1)) if self.n > 1 else torch.zeros_like(self.new_std)
def rstd(self):
return torch.rsqrt(self.variance() + self.eps)
def norm(self, x):
return (x - self.mean()) * self.rstd()
| [
"torch.zeros",
"torch.zeros_like"
] | 1.6 | lucidrains/anymal-belief-state-encoder-decoder-pytorch | e8d4acfa2c81073a88980832185212ba2802287b |
1.4 | """ Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in:
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'
- https://arxiv.org/abs/2010.11929
`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`
- https://arxiv.org/abs/2106.10270
The official jax code is released and available at https://github.com/google-research/vision_transformer
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
Hacked together by / Copyright 2020, Ross Wightman
"""
import math
import logging
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from .helpers import build_model_with_cfg, resolve_pretrained_cfg, named_apply, adapt_input_conv, checkpoint_seq
from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_, lecun_normal_
from .registry import register_model
_logger = logging.getLogger(__name__)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# patch models (weights from official Google JAX impl)
'vit_tiny_patch16_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'),
'vit_tiny_patch16_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_small_patch32_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'),
'vit_small_patch32_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_small_patch16_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'),
'vit_small_patch16_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_base_patch32_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'),
'vit_base_patch32_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_base_patch16_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'),
'vit_base_patch16_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_base_patch8_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'),
'vit_large_patch32_224': _cfg(
url='', # no official model weights for this combo, only for in21k
),
'vit_large_patch32_384': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_large_patch16_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz'),
'vit_large_patch16_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_large_patch14_224': _cfg(url=''),
'vit_huge_patch14_224': _cfg(url=''),
'vit_giant_patch14_224': _cfg(url=''),
'vit_gigantic_patch14_224': _cfg(url=''),
'vit_base2_patch32_256': _cfg(url='', input_size=(3, 256, 256), crop_pct=0.95),
# patch models, imagenet21k (weights from official Google JAX impl)
'vit_tiny_patch16_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_small_patch32_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_small_patch16_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_base_patch32_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_base_patch16_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_base_patch8_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_large_patch32_224_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth',
num_classes=21843),
'vit_large_patch16_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz',
num_classes=21843),
'vit_huge_patch14_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz',
hf_hub_id='timm/vit_huge_patch14_224_in21k',
num_classes=21843),
# SAM trained models (https://arxiv.org/abs/2106.01548)
'vit_base_patch32_224_sam': _cfg(
url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz'),
'vit_base_patch16_224_sam': _cfg(
url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz'),
# DINO pretrained - https://arxiv.org/abs/2104.14294 (no classifier head, for fine-tune only)
'vit_small_patch16_224_dino': _cfg(
url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_small_patch8_224_dino': _cfg(
url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_base_patch16_224_dino': _cfg(
url='https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_base_patch8_224_dino': _cfg(
url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
# ViT ImageNet-21K-P pretraining by MILL
'vit_base_patch16_224_miil_in21k': _cfg(
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/vit_base_patch16_224_in21k_miil.pth',
mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221,
),
'vit_base_patch16_224_miil': _cfg(
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm'
'/vit_base_patch16_224_1k_miil_84_4.pth',
mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear',
),
}
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.attn(self.norm1(x)))
x = x + self.drop_path2(self.mlp(self.norm2(x)))
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
"""
def __init__(
self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token',
embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., weight_init='',
embed_layer=PatchEmbed, norm_layer=None, act_layer=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
global_pool (str): type of global pooling for final sequence (default: 'token')
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
weight_init: (str): weight init scheme
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
act_layer: (nn.Module): MLP activation layer
"""
super().__init__()
assert global_pool in ('', 'avg', 'token')
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 1
self.grad_checkpointing = False
self.patch_embed = embed_layer(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)
for i in range(depth)])
use_fc_norm = self.global_pool == 'avg'
self.norm = norm_layer(embed_dim) if not use_fc_norm else nn.Identity()
# Representation layer. Used for original ViT models w/ in21k pretraining.
self.representation_size = representation_size
self.pre_logits = nn.Identity()
if representation_size:
self._reset_representation(representation_size)
# Classifier Head
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
final_chs = self.representation_size if self.representation_size else self.embed_dim
self.head = nn.Linear(final_chs, num_classes) if num_classes > 0 else nn.Identity()
if weight_init != 'skip':
self.init_weights(weight_init)
def _reset_representation(self, representation_size):
self.representation_size = representation_size
if self.representation_size:
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(self.embed_dim, self.representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
def init_weights(self, mode=''):
assert mode in ('jax', 'jax_nlhb', 'moco', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
trunc_normal_(self.pos_embed, std=.02)
nn.init.normal_(self.cls_token, std=1e-6)
named_apply(get_init_weights_vit(mode, head_bias), self)
def _init_weights(self, m):
# this fn left here for compat with downstream users
init_weights_vit_timm(m)
@torch.jit.ignore()
def load_pretrained(self, checkpoint_path, prefix=''):
_load_weights(self, checkpoint_path, prefix)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes: int, global_pool=None, representation_size=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg', 'token')
self.global_pool = global_pool
if representation_size is not None:
self._reset_representation(representation_size)
final_chs = self.representation_size if self.representation_size else self.embed_dim
self.head = nn.Linear(final_chs, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
x = self.pos_drop(x + self.pos_embed)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.fc_norm(x)
x = self.pre_logits(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def init_weights_vit_timm(module: nn.Module, name: str = ''):
""" ViT weight initialization, original timm impl (for reproducibility) """
if isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
def init_weights_vit_jax(module: nn.Module, name: str = '', head_bias: float = 0.):
""" ViT weight initialization, matching JAX (Flax) impl """
if isinstance(module, nn.Linear):
if name.startswith('head'):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
elif name.startswith('pre_logits'):
lecun_normal_(module.weight)
nn.init.zeros_(module.bias)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
def init_weights_vit_moco(module: nn.Module, name: str = ''):
""" ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed """
if isinstance(module, nn.Linear):
if 'qkv' in name:
# treat the weights of Q, K, V separately
val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1]))
nn.init.uniform_(module.weight, -val, val)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
def get_init_weights_vit(mode='jax', head_bias: float = 0.):
if 'jax' in mode:
return partial(init_weights_vit_jax, head_bias=head_bias)
elif 'moco' in mode:
return init_weights_vit_moco
else:
return init_weights_vit_timm
@torch.no_grad()
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):
""" Load weights from .npz checkpoints for official Google Brain Flax implementation
"""
import numpy as np
def _n2p(w, t=True):
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
w = w.flatten()
if t:
if w.ndim == 4:
w = w.transpose([3, 2, 0, 1])
elif w.ndim == 3:
w = w.transpose([2, 0, 1])
elif w.ndim == 2:
w = w.transpose([1, 0])
return torch.from_numpy(w)
w = np.load(checkpoint_path)
if not prefix and 'opt/target/embedding/kernel' in w:
prefix = 'opt/target/'
if hasattr(model.patch_embed, 'backbone'):
# hybrid
backbone = model.patch_embed.backbone
stem_only = not hasattr(backbone, 'stem')
stem = backbone if stem_only else backbone.stem
stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))
stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))
stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))
if not stem_only:
for i, stage in enumerate(backbone.stages):
for j, block in enumerate(stage.blocks):
bp = f'{prefix}block{i + 1}/unit{j + 1}/'
for r in range(3):
getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))
getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))
getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))
if block.downsample is not None:
block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))
block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))
block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))
embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])
else:
embed_conv_w = adapt_input_conv(
model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))
model.patch_embed.proj.weight.copy_(embed_conv_w)
model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))
model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))
pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)
if pos_embed_w.shape != model.pos_embed.shape:
pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
model.pos_embed.copy_(pos_embed_w)
model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))
model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))
if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
for i, block in enumerate(model.blocks.children()):
block_prefix = f'{prefix}Transformer/encoderblock_{i}/'
mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'
block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))
block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))
block.attn.qkv.weight.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))
block.attn.qkv.bias.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))
block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))
block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))
for r in range(2):
getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))
getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))
block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))
block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))
def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
_logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if 'model' in state_dict:
# For deit models
state_dict = state_dict['model']
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == 'pos_embed' and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(
v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
# NOTE this extra code to support handling of repr size for in21k pretrained models
pretrained_cfg = resolve_pretrained_cfg(variant, kwargs=kwargs)
default_num_classes = pretrained_cfg['num_classes']
num_classes = kwargs.get('num_classes', default_num_classes)
repr_size = kwargs.pop('representation_size', None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model = build_model_with_cfg(
VisionTransformer, variant, pretrained,
pretrained_cfg=pretrained_cfg,
representation_size=repr_size,
pretrained_filter_fn=checkpoint_filter_fn,
pretrained_custom_load='npz' in pretrained_cfg['url'],
**kwargs)
return model
@register_model
def vit_tiny_patch16_224(pretrained=False, **kwargs):
""" ViT-Tiny (Vit-Ti/16)
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_tiny_patch16_384(pretrained=False, **kwargs):
""" ViT-Tiny (Vit-Ti/16) @ 384x384.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch32_224(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/32)
"""
model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch32_384(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/32) at 384x384.
"""
model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch16_384(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base2_patch32_256(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32)
# FIXME experiment
"""
model_kwargs = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, **kwargs)
model = _create_vision_transformer('vit_base2_patch32_256', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch8_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch14_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/14)
"""
model_kwargs = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch14_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_huge_patch14_224(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
"""
model_kwargs = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_huge_patch14_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_giant_patch14_224(pretrained=False, **kwargs):
""" ViT-Giant model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
"""
model_kwargs = dict(patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_giant_patch14_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_gigantic_patch14_224(pretrained=False, **kwargs):
""" ViT-Gigantic model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
"""
model_kwargs = dict(patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_gigantic_patch14_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_tiny_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Tiny (Vit-Ti/16).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer('vit_tiny_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(
patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch8_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(
patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch8_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights
"""
model_kwargs = dict(
patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs)
model = _create_vision_transformer('vit_large_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_huge_patch14_224_in21k(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights
"""
model_kwargs = dict(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280, **kwargs)
model = _create_vision_transformer('vit_huge_patch14_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_sam(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548
"""
# NOTE original SAM weights release worked with representation_size=768
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_sam', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_224_sam(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548
"""
# NOTE original SAM weights release worked with representation_size=768
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_224_sam', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch16_224_dino(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16) w/ DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch16_224_dino', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch8_224_dino(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/8) w/ DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294
"""
model_kwargs = dict(patch_size=8, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch8_224_dino', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_dino(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) /w DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_dino', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch8_224_dino(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/8) w/ DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294
"""
model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch8_224_dino', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_miil_in21k(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_miil_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_miil(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_miil', pretrained=pretrained, **model_kwargs)
return model
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.Identity",
"torch.nn.init.constant_",
"torch.nn.Tanh",
"torch.no_grad",
"torch.nn.functional.interpolate",
"torch.nn.init.xavier_uniform_",
"torch.linspace",
"torch.from_numpy",
"torch.jit.ignore",
"torch.nn.init.normal_",
"torch.jit.is_scripting",
"torch.nn.init.uniform_",
"torch.nn.init.zeros_"
] | 1.4.0 | ares89/pytorch-image-models | dc51334cdc05757dc9004583aac8668ebd892b03 |
1.1 | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import copy
import json
import math
import logging
import collections
import sys
from io import open
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling import BertLayerNorm as LayerNorm
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax, sample_logits
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-config.json",
}
TF_WEIGHTS_NAME = 'model.ckpt'
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update({
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias})
for i, (out_l, proj_l, tie_proj) in enumerate(zip(
model.crit.out_layers,
model.crit.out_projs,
config.tie_projs)):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({
layer_str + 'b': out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({
layer_str + 'lookup_table': out_l.weight,
layer_str + 'b': out_l.bias})
if not tie_proj:
tf_to_pt_map.update({
layer_str + 'proj': proj_l
})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({
layer_str + 'lookup_table': embed_l.weight,
layer_str + 'proj_W': proj_l
})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({
'transformer/r_r_bias': r_r_list,
'transformer/r_w_bias': r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if 'kernel' in name or 'proj' in name:
array = np.transpose(array)
if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1:
# Here we will split the TF weigths
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
print("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + '/Adam', None)
tf_weights.pop(name + '/Adam_1', None)
print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
class TransfoXLConfig(object):
"""Configuration class to store the configuration of a `TransfoXLModel`.
"""
def __init__(self,
vocab_size_or_config_json_file=267735,
cutoffs=[20000, 40000, 200000],
d_model=1024,
d_embed=1024,
n_head=16,
d_head=64,
d_inner=4096,
div_val=4,
pre_lnorm=False,
n_layer=18,
tgt_len=128,
ext_len=0,
mem_len=1600,
clamp_len=1000,
same_length=True,
proj_share_all_but_first=True,
attn_type=0,
sample_softmax=-1,
adaptive=True,
tie_weight=True,
dropout=0.1,
dropatt=0.0,
untie_r=True,
init="normal",
init_range=0.01,
proj_init_std=0.01,
init_std=0.02):
"""Constructs TransfoXLConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `TransfoXLModel` or a configuration json file.
cutoffs: cutoffs for the adaptive softmax
d_model: Dimensionality of the model's hidden states.
d_embed: Dimensionality of the embeddings
d_head: Dimensionality of the model's heads.
div_val: divident value for adapative input and softmax
pre_lnorm: apply LayerNorm to the input instead of the output
d_inner: Inner dimension in FF
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
tgt_len: number of tokens to predict
ext_len: length of the extended context
mem_len: length of the retained previous heads
same_length: use the same attn length for all tokens
proj_share_all_but_first: True to share all but first projs, False not to share.
attn_type: attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
clamp_len: use the same pos embeddings after clamp_len
sample_softmax: number of samples in sampled softmax
adaptive: use adaptive softmax
tie_weight: tie the word embedding and softmax weights
dropout: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
dropatt: The dropout ratio for the attention probabilities.
untie_r: untie relative position biases
embd_pdrop: The dropout ratio for the embeddings.
init: parameter initializer to use
init_range: parameters initialized by U(-init_range, init_range).
proj_init_std: parameters initialized by N(0, init_std)
init_std: parameters initialized by N(0, init_std)
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.n_token = vocab_size_or_config_json_file
self.cutoffs = []
self.cutoffs.extend(cutoffs)
self.tie_weight = tie_weight
if proj_share_all_but_first:
self.tie_projs = [False] + [True] * len(self.cutoffs)
else:
self.tie_projs = [False] + [False] * len(self.cutoffs)
self.d_model = d_model
self.d_embed = d_embed
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.pre_lnorm = pre_lnorm
self.n_layer = n_layer
self.n_head = n_head
self.tgt_len = tgt_len
self.ext_len = ext_len
self.mem_len = mem_len
self.same_length = same_length
self.attn_type = attn_type
self.clamp_len = clamp_len
self.sample_softmax = sample_softmax
self.adaptive = adaptive
self.dropout = dropout
self.dropatt = dropatt
self.untie_r = untie_r
self.init = init
self.init_range = init_range
self.proj_init_std = proj_init_std
self.init_std = init_std
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `TransfoXLConfig` from a Python dictionary of parameters."""
config = TransfoXLConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `TransfoXLConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:,None,:].expand(-1, bsz, -1)
else:
return pos_emb[:,None,:]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False, r_r_bias=None, r_w_bias=None):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [hlen x bsz x n_head x d_head]
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
##### layer normalization
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
# [qlen x klen x bsz x n_head]
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head]
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = h + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False,
r_r_bias=None, r_w_bias=None):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).bool()
m = min(h, w)
mask[:m,:m] = torch.triu(mask[:m,:m])
mask[-m:,-m:] = torch.tril(mask[-m:,-m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:,:,None,None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:,:,None,None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None,:,:,None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:,:,:,None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout,
**kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax>0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj],
dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(TransfoXLPreTrainedModel, self).__init__()
if not isinstance(config, TransfoXLConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `TransfoXLConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weight(self, weight):
if self.config.init == 'uniform':
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == 'normal':
nn.init.normal_(weight, 0.0, self.config.init_std)
def init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
self.init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
self.init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
self.init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
self.init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
self.init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
self.init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
self.init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
self.init_bias(m.r_bias)
def set_num_special_tokens(self, num_special_tokens):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `transfo-xl-wt103`
- a path or url to a pretrained model archive containing:
. `transfo_xl_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance
- a path or url to a pretrained model archive containing:
. `transfo_xl_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific TransformerXL class
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
logger.error(
"Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find file {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
archive_file
)
)
return None
try:
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in PRETRAINED_CONFIG_ARCHIVE_MAP:
logger.error(
"Couldn't reach server at '{}' to download pretrained model configuration file.".format(
config_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find file {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_CONFIG_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
config_file
)
)
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = TransfoXLConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
return load_tf_weights_in_transfo_xl(model, config, pretrained_model_name_or_path)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'transformer') and any(s.startswith('transformer.') for s in state_dict.keys()):
start_prefix = 'transformer.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
# Make sure we are still sharing the input and output embeddings
if hasattr(model, 'tie_weights'):
model.tie_weights()
return model
class TransfoXLModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`mems`: optional memomry of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`last_hidden_state`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, self.config.d_model]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, new_mems)
```
"""
def __init__(self, config):
super(TransfoXLModel, self).__init__(config)
self.n_token = config.n_token
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(config.n_token, config.d_embed, config.d_model, config.cutoffs,
div_val=config.div_val)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type == 1: # learnable embeddings
for i in range(config.n_layer):
self.layers.append(
RelLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type in [2, 3]: # absolute embeddings
for i in range(config.n_layer):
self.layers.append(
DecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 1: # learnable
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.r_bias = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head))
elif self.attn_type == 2: # absolute standard
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 3: # absolute deeper SA
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.apply(self.init_weights)
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self, data):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, data.size(1), self.config.d_model,
dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None: return None
# mems is not None
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1+mlen)
+ torch.tril(all_ones, -mask_shift_len)).bool()[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1+mlen).bool()[:,:,None]
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 1: # learnable
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len :]
r_bias = self.r_bias[i][-self.clamp_len :]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, r_emb, self.r_w_bias[i],
r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, input_ids, mems=None):
""" Params:
input_ids :: [bsz, len]
mems :: optional mems from previous forwar passes (or init_mems)
list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Returns:
tuple (last_hidden, new_mems) where:
new_mems: list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
last_hidden: output of the last layer:
shape :: [bsz, len, self.config.d_model]
"""
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
input_ids = input_ids.transpose(0, 1).contiguous()
if mems is None:
mems = self.init_mems(input_ids)
last_hidden, new_mems = self._forward(input_ids, mems=mems)
# We transpose back here to shape [bsz, len, hidden_dim]
last_hidden = last_hidden.transpose(0, 1).contiguous()
return (last_hidden, new_mems)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
This model add an (adaptive) softmax head on top of the TransfoXLModel
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Call self.tie_weights() if you update/load the weights of the transformer to keep the weights tied.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`target`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the target token indices selected in the range [0, self.config.n_token[
`mems`: an optional memory of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`softmax_output`: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape [batch_size, sequence_length]
else:
log probabilities of tokens, shape [batch_size, sequence_length, n_tokens]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, mems=new_mems)
```
"""
def __init__(self, config):
super(TransfoXLLMHeadModel, self).__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
# use sampled softmax
if config.sample_softmax > 0:
self.out_layer = nn.Linear(config.d_model, config.n_token)
self.sampler = LogUniformSampler(config.n_token, config.sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(config.n_token, config.d_embed, config.d_model,
config.cutoffs, div_val=config.div_val)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Run this to be sure output and input (adaptive) softmax weights are tied """
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight
# adaptive softmax (including standard softmax)
else:
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, data):
return self.transformer.init_mems(data)
def forward(self, input_ids, target=None, mems=None):
""" Params:
input_ids :: [bsz, len]
target :: [bsz, len]
Returns:
tuple(softmax_output, new_mems) where:
new_mems: list (num layers) of hidden states at the entry of each layer
shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids
softmax_output: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape :: [bsz, len]
else:
log probabilities of tokens, shape :: [bsz, len, n_tokens]
"""
bsz = input_ids.size(0)
tgt_len = input_ids.size(1)
last_hidden, new_mems = self.transformer(input_ids, mems)
pred_hid = last_hidden[:, -tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.config.tie_weight
logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler)
softmax_output = -F.log_softmax(logit, -1)[:, :, 0]
else:
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target)
if target is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
else:
softmax_output = softmax_output.view(bsz, tgt_len)
# We transpose back
return (softmax_output, new_mems)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.ParameterList",
"torch.einsum",
"torch.nn.ModuleList",
"torch.ones",
"torch.load",
"torch.chunk",
"torch.ger",
"torch.nn.init.constant_",
"torch.tril",
"torch.nn.init.normal_",
"torch.Tensor",
"torch.zeros",
"torch.nn.functional.log_softmax",
"torch.nn.ReLU",
"torch.nn.functional.linear",
"torch.nn.functional.softmax",
"torch.nn.init.uniform_",
"torch.nn.Dropout",
"torch.arange",
"torch.no_grad",
"torch.from_numpy",
"torch.triu",
"torch.nn.Embedding"
] | 1.1.0 | 9173860/WMSeg | 526d3ad0bf17bc657d9100cbcb7a0d8682b10643 |
1.6 | import numpy as np
import torch
class FeaturesLinear(torch.nn.Module):
def __init__(self, field_dims, output_dim=1):
super().__init__()
self.fc = torch.nn.Embedding(sum(field_dims), output_dim)
self.bias = torch.nn.Parameter(torch.zeros((output_dim, )))
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]),
dtype=np.long)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return torch.sum(self.fc(x), dim=1) + self.bias
| [
"torch.zeros"
] | 1.6 | jianzhnie/AutoTabular | d630c78290a52f8c73885afb16884e18135c34f6 |
1.7 | import io
import PIL.Image
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import torch
import torchvision.utils as vutils
from torchvision.transforms import ToTensor
from utils.utils import enforce_orthog, get_inverse_tf, get_T_ba
from utils.utils import getApproxTimeStamps, wrapto2pi
def convert_plt_to_img():
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close()
buf.seek(0)
return PIL.Image.open(buf)
def convert_plt_to_tensor():
return ToTensor()(convert_plt_to_img())
def draw_batch(batch, out, config):
"""Creates an image of the radar scan, scores, and keypoint matches for a single batch."""
# Draw radar image
radar = batch['data'][0].squeeze().numpy()
plt.subplots()
plt.imshow(radar, cmap='gray')
radar_img = convert_plt_to_tensor()
# Draw keypoint matches
src = out['src'][0].squeeze().detach().cpu().numpy()
tgt = out['tgt'][0].squeeze().detach().cpu().numpy()
match_weights = out['match_weights'][0].squeeze().detach().cpu().numpy()
nms = config['vis_keypoint_nms']
max_w = np.max(match_weights)
plt.imshow(radar, cmap='gray')
for i in range(src.shape[0]):
if match_weights[i] < nms * max_w:
continue
plt.plot([src[i, 0], tgt[i, 0]], [src[i, 1], tgt[i, 1]], c='w', linewidth=2, zorder=2)
plt.scatter(src[i, 0], src[i, 1], c='g', s=5, zorder=3)
plt.scatter(tgt[i, 0], tgt[i, 1], c='r', s=5, zorder=4)
match_img = convert_plt_to_tensor()
# Draw scores
scores = out['scores'][0].squeeze().detach().cpu().numpy()
plt.subplots()
plt.imshow(scores, cmap='inferno')
score_img = convert_plt_to_tensor()
return vutils.make_grid([radar_img, score_img, match_img])
def draw_matches(batch, out, config, solver):
azimuth_step = (2 * np.pi) / 400
cart_pixel_width = config['cart_pixel_width']
cart_resolution = config['cart_resolution']
if (cart_pixel_width % 2) == 0:
cart_min_range = (cart_pixel_width / 2 - 0.5) * cart_resolution
else:
cart_min_range = cart_pixel_width // 2 * cart_resolution
T_met_pix = np.array([[0, -cart_resolution, 0, cart_min_range],
[cart_resolution, 0, 0, -cart_min_range],
[0, 0, 1, 0],
[0, 0, 0, 1]])
T_pix_met = np.linalg.inv(T_met_pix)
keypoint_ints = out['keypoint_ints']
ids = torch.nonzero(keypoint_ints[0, 0] > 0, as_tuple=False).squeeze(1)
src = out['src_rc'][0, ids].squeeze().detach().cpu().numpy()
tgt = out['tgt_rc'][0, ids].squeeze().detach().cpu().numpy()
radar = batch['data'][0].squeeze().numpy()
_, axs = plt.subplots(1, 3, tight_layout=True)
# Raw locations overlayed, no transforms
axs[0].imshow(radar, cmap='gray', extent=(0, 640, 640, 0), interpolation='none')
axs[0].set_axis_off()
axs[0].set_title('raw')
for i in range(src.shape[0]):
axs[0].plot([src[i, 0], tgt[i, 0]], [src[i, 1], tgt[i, 1]], c='w', linewidth=1, zorder=2)
axs[0].scatter(src[i, 0], src[i, 1], c='limegreen', s=2, zorder=3)
axs[0].scatter(tgt[i, 0], tgt[i, 1], c='r', s=2, zorder=4)
src = out['src'][0, ids].squeeze().detach().cpu().numpy()
tgt = out['tgt'][0, ids].squeeze().detach().cpu().numpy()
# Use Rigid Transform
axs[1].imshow(radar, cmap='gray', extent=(0, 640, 640, 0), interpolation='none')
axs[1].set_axis_off()
axs[1].set_title('rigid')
T_tgt_src = get_T_ba(out, a=0, b=1)
error = np.zeros((src.shape[0], 2))
for i in range(src.shape[0]):
x1 = np.array([src[i, 0], src[i, 1], 0, 1]).reshape(4, 1)
x2 = np.array([tgt[i, 0], tgt[i, 1], 0, 1]).reshape(4, 1)
x1 = T_tgt_src @ x1
e = x1 - x2
error[i, 1] = np.sqrt(e.T @ e)
error[i, 0] = int(wrapto2pi(np.arctan2(x2[1, 0], x2[0, 0])) // azimuth_step)
x1 = T_pix_met @ x1
x2 = T_pix_met @ x2
axs[1].plot([x1[0, 0], x2[0, 0]], [x1[1, 0], x2[1, 0]], c='w', linewidth=1, zorder=2)
axs[1].scatter(x1[0, 0], x1[1, 0], c='limegreen', s=2, zorder=3)
axs[1].scatter(x2[0, 0], x2[1, 0], c='r', s=2, zorder=4)
# Use Interpolated Poses
t1 = batch['timestamps'][0].numpy().squeeze()
t2 = batch['timestamps'][1].numpy().squeeze()
times1 = getApproxTimeStamps([src], [t1])[0]
times2 = getApproxTimeStamps([tgt], [t2])[0]
t_refs = batch['t_ref'].numpy()
T_1a = np.identity(4, dtype=np.float32)
T_1b = np.identity(4, dtype=np.float32)
axs[2].imshow(radar, cmap='gray', extent=(0, 640, 640, 0), interpolation='none')
axs[2].set_axis_off()
axs[2].set_title('interp')
error2 = np.zeros((src.shape[0], 2))
for i in range(src.shape[0]):
solver.getPoseBetweenTimes(T_1a, times1[i], t_refs[1, 0, 0])
solver.getPoseBetweenTimes(T_1b, times2[i], t_refs[1, 0, 0])
x1 = np.array([src[i, 0], src[i, 1], 0, 1]).reshape(4, 1)
x2 = np.array([tgt[i, 0], tgt[i, 1], 0, 1]).reshape(4, 1)
x1 = T_1a @ x1
x2 = T_1b @ x2
e = x1 - x2
error2[i, 1] = np.sqrt(e.T @ e)
error2[i, 0] = int(wrapto2pi(np.arctan2(x2[1, 0], x2[0, 0])) // azimuth_step)
x1 = T_pix_met @ x1
x2 = T_pix_met @ x2
axs[2].plot([x1[0, 0], x2[0, 0]], [x1[1, 0], x2[1, 0]], c='w', linewidth=1, zorder=2)
axs[2].scatter(x1[0, 0], x1[1, 0], c='limegreen', s=2, zorder=3)
axs[2].scatter(x2[0, 0], x2[1, 0], c='r', s=2, zorder=4)
plt.savefig('matches.pdf', bbox_inches='tight', pad_inches=0.0)
plt.figure()
idx = np.argsort(error[:, 0])
error = error[idx, :]
idx = np.argsort(error2[:, 0])
error2 = error2[idx, :]
plt.plot(error[:, 0], error[:, 1], color='b', label='raw error', linewidth=1)
plt.plot(error2[:, 0], error2[:, 1], color='r', label='interp error', linewidth=1)
plt.title('raw error')
plt.legend()
plt.savefig('matches2.pdf', bbox_inches='tight', pad_inches=0.0)
def draw_batch_steam(batch, out, config):
"""Creates an image of the radar scan, scores, and keypoint matches for a single batch."""
# Draw radar image
radar = batch['data'][0].squeeze().numpy()
radar_tgt = batch['data'][-1].squeeze().numpy()
plt.imshow(np.concatenate((radar, radar_tgt), axis=1), cmap='gray')
plt.title('radar src-tgt pair')
radar_img = convert_plt_to_tensor()
# Draw keypoint matches
src = out['src_rc'][-1].squeeze().detach().cpu().numpy()
tgt = out['tgt_rc'][-1].squeeze().detach().cpu().numpy()
keypoint_ints = out['keypoint_ints']
ids = torch.nonzero(keypoint_ints[-1, 0] > 0, as_tuple=False).squeeze(1)
ids_cpu = ids.cpu()
plt.imshow(np.concatenate((radar, radar_tgt), axis=1), cmap='gray')
delta = radar.shape[1]
for i in range(src.shape[0]):
if i in ids_cpu:
custom_colour = 'g'
plt.plot([src[i, 0], tgt[i, 0] + delta], [src[i, 1], tgt[i, 1]], c='y', linewidth=0.5, zorder=2)
plt.scatter(src[i, 0], src[i, 1], c=custom_colour, s=5, zorder=3)
plt.scatter(tgt[i, 0] + delta, tgt[i, 1], c=custom_colour, s=5, zorder=4)
plt.title('matches')
match_img = convert_plt_to_tensor()
plt.imshow(np.concatenate((radar, radar_tgt), axis=0), cmap='gray')
delta = radar.shape[1]
for i in range(src.shape[0]):
if i in ids_cpu:
custom_colour = 'g'
plt.plot([src[i, 0], tgt[i, 0]], [src[i, 1], tgt[i, 1] + delta], c='y', linewidth=0.5, zorder=2)
plt.scatter(src[i, 0], src[i, 1], c=custom_colour, s=5, zorder=3)
plt.scatter(tgt[i, 0], tgt[i, 1] + delta, c=custom_colour, s=5, zorder=4)
plt.title('matches')
match_img2 = convert_plt_to_tensor()
# Draw scores
scores = out['scores'][-1]
if scores.size(0) == 3:
scores = scores[1] + scores[2]
scores = scores.squeeze().detach().cpu().numpy()
plt.imshow(scores, cmap='inferno')
plt.colorbar()
plt.title('log det weight (weight score vis)')
score_img = convert_plt_to_tensor()
# Draw detector scores
detector_scores = out['detector_scores'][-1].squeeze().detach().cpu().numpy()
plt.imshow(detector_scores, cmap='inferno')
plt.colorbar()
plt.title('detector score')
dscore_img = convert_plt_to_tensor()
# Draw point-to-point error
src_p = out['src'][-1].squeeze().T
tgt_p = out['tgt'][-1].squeeze().T
R_tgt_src = out['R'][0, -1, :2, :2]
t_st_in_t = out['t'][0, -1, :2, :]
error = tgt_p - (R_tgt_src @ src_p + t_st_in_t)
error2_sqrt = torch.sqrt(torch.sum(error * error, dim=0).squeeze())
error2_sqrt = error2_sqrt[ids_cpu].detach().cpu().numpy()
plt.imshow(radar, cmap='gray')
plt.scatter(src[ids_cpu, 0], src[ids_cpu, 1], c=error2_sqrt, s=5, zorder=2, cmap='rainbow')
plt.clim(0.0, 1)
plt.colorbar()
plt.title('P2P error')
p2p_img = convert_plt_to_tensor()
return vutils.make_grid([dscore_img, score_img, radar_img]), vutils.make_grid([match_img, match_img2]), \
vutils.make_grid([p2p_img])
def plot_sequences(T_gt, T_pred, seq_lens, returnTensor=True, T_icra=None, savePDF=False, fnames=None, flip=True):
"""Creates a top-down plot of the predicted odometry results vs. ground truth."""
seq_indices = []
idx = 0
for s in seq_lens:
seq_indices.append(list(range(idx, idx + s - 1)))
idx += (s - 1)
matplotlib.rcParams.update({'font.size': 16, 'xtick.labelsize': 16, 'ytick.labelsize': 16,
'axes.linewidth': 1.5, 'font.family': 'serif', 'pdf.fonttype': 42})
T_flip = np.identity(4)
T_flip[1, 1] = -1
T_flip[2, 2] = -1
imgs = []
for seq_i, indices in enumerate(seq_indices):
T_gt_ = np.identity(4)
T_pred_ = np.identity(4)
T_icra_ = np.identity(4)
if flip:
T_gt_ = np.matmul(T_flip, T_gt_)
T_pred_ = np.matmul(T_flip, T_pred_)
x_gt = []
y_gt = []
x_pred = []
y_pred = []
x_icra = []
y_icra = []
for i in indices:
T_gt_ = np.matmul(T_gt[i], T_gt_)
T_pred_ = np.matmul(T_pred[i], T_pred_)
enforce_orthog(T_gt_)
enforce_orthog(T_pred_)
T_gt_temp = get_inverse_tf(T_gt_)
T_pred_temp = get_inverse_tf(T_pred_)
x_gt.append(T_gt_temp[0, 3])
y_gt.append(T_gt_temp[1, 3])
x_pred.append(T_pred_temp[0, 3])
y_pred.append(T_pred_temp[1, 3])
if T_icra is not None:
T_icra_ = np.matmul(T_icra[i], T_icra_)
enforce_orthog(T_icra_)
T_icra_temp = get_inverse_tf(T_icra_)
x_icra.append(T_icra_temp[0, 3])
y_icra.append(T_icra_temp[1, 3])
plt.figure(figsize=(10, 10), tight_layout=True)
plt.grid(color='k', which='both', linestyle='--', alpha=0.75, dashes=(8.5, 8.5))
plt.axes().set_aspect('equal')
plt.plot(x_gt, y_gt, 'k', linewidth=2.5, label='GT')
if x_icra and y_icra:
plt.plot(x_icra, y_icra, 'r', linewidth=2.5, label='MC-RANSAC')
plt.plot(x_pred, y_pred, 'b', linewidth=2.5, label='HERO')
plt.xlabel('x (m)', fontsize=16)
plt.ylabel('y (m)', fontsize=16)
plt.legend(loc="upper left", edgecolor='k', fancybox=False)
if savePDF and fnames is not None:
plt.savefig(fnames[seq_i], bbox_inches='tight', pad_inches=0.0)
if returnTensor:
imgs.append(convert_plt_to_tensor())
else:
imgs.append(convert_plt_to_img())
return imgs
| [
"torch.sum",
"torch.nonzero"
] | 1.7.1 | MPieter/hero_radar_odometry | 107c1a07b22784fec54c22e5f8bb03251cc9f786 |
1.7 | from networks.twobranch import twobranch
import torch
import utils
from copy import deepcopy
class twobranch_sum(twobranch):
def __init__(self, pretrained=False, backbone='resnet18', num_out=100, togray="mean", scramble=False,select_kernels='all', remove_batchnorm=None):
self.backbone = backbone
self.num_out = num_out
self.togray = togray
self.scramble = scramble
self.remove_batchnorm = remove_batchnorm
self.select_kernels=select_kernels
#Module init
super(twobranch_sum, self).__init__(pretrained=pretrained,backbone=backbone,num_out=num_out,togray=togray,scramble=scramble,remove_batchnorm=remove_batchnorm,select_kernels=select_kernels)
len_fe=len(list(self.feature_extractor.parameters())[-1])
self.fc=torch.nn.Linear(in_features=len_fe, out_features=self.num_out, bias=True)
self.head_var = 'fc'
def forward(self,x):
x1=deepcopy(x)
if self.scramble is True:
x1=utils.batch2scramble(deepcopy(x))
x2=utils.batch2gray(deepcopy(x),transform_type=self.togray)
return self.forward_sum(x1,x2)
| [
"torch.nn.Linear"
] | 1.7.1 | dberga/FACIL | c11dd157bc53cfac91814a52c57bddc385365c61 |
1.4 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import tempfile
import shutil
from glob import glob
import logging
import nibabel as nib
import numpy as np
import torch
from ignite.engine import Engine
from torch.utils.data import DataLoader
from monai import config
from monai.handlers import CheckpointLoader, SegmentationSaver, StatsHandler, MeanDice
from monai.data import NiftiDataset, create_test_image_3d, sliding_window_inference
from monai.transforms import Compose, AddChannel, ScaleIntensity, ToTensor
from monai.networks.nets import UNet
from monai.networks.utils import predict_segmentation
def main():
config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
tempdir = tempfile.mkdtemp()
print(f"generating synthetic data to {tempdir} (this may take a while)")
for i in range(5):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
# define transforms for image and segmentation
imtrans = Compose([ScaleIntensity(), AddChannel(), ToTensor()])
segtrans = Compose([AddChannel(), ToTensor()])
ds = NiftiDataset(images, segs, transform=imtrans, seg_transform=segtrans, image_only=False)
device = torch.device("cuda:0")
net = UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
)
net.to(device)
# define sliding window size and batch size for windows inference
roi_size = (96, 96, 96)
sw_batch_size = 4
def _sliding_window_processor(engine, batch):
net.eval()
with torch.no_grad():
val_images, val_labels = batch[0].to(device), batch[1].to(device)
seg_probs = sliding_window_inference(val_images, roi_size, sw_batch_size, net)
return seg_probs, val_labels
evaluator = Engine(_sliding_window_processor)
# add evaluation metric to the evaluator engine
MeanDice(add_sigmoid=True, to_onehot_y=False).attach(evaluator, "Mean_Dice")
# StatsHandler prints loss at every iteration and print metrics at every epoch,
# we don't need to print loss for evaluator, so just print metrics, user can also customize print functions
val_stats_handler = StatsHandler(
name="evaluator",
output_transform=lambda x: None, # no need to print loss value, so disable per iteration output
)
val_stats_handler.attach(evaluator)
# for the array data format, assume the 3rd item of batch data is the meta_data
file_saver = SegmentationSaver(
output_dir="tempdir",
output_ext=".nii.gz",
output_postfix="seg",
name="evaluator",
batch_transform=lambda x: x[2],
output_transform=lambda output: predict_segmentation(output[0]),
)
file_saver.attach(evaluator)
# the model was trained by "unet_training_array" example
ckpt_saver = CheckpointLoader(load_path="./runs/net_checkpoint_50.pth", load_dict={"net": net})
ckpt_saver.attach(evaluator)
# sliding window inference for one image at every iteration
loader = DataLoader(ds, batch_size=1, num_workers=1, pin_memory=torch.cuda.is_available())
state = evaluator.run(loader)
shutil.rmtree(tempdir)
if __name__ == "__main__":
main()
| [
"torch.device",
"torch.no_grad",
"torch.cuda.is_available"
] | 1.4 | murraycutforth/MONAI | ad06dff7f85711048690b2e85c99d51001612708 |
1.5 | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
import torch.nn as nn
from examples.common.sample_config import SampleConfig
from examples.object_detection.layers.modules.ssd_head import MultiOutputSequential, SSDDetectionOutput
from nncf.torch.checkpoint_loading import load_state
def conv_bn(inp, oup, kernel, stride, padding):
return nn.Sequential(
nn.Conv2d(inp, oup, kernel, stride, padding, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
def mobilenet(start_input_channels=3):
model = MultiOutputSequential(
[11, 13],
[
conv_bn(start_input_channels, 32, 3, 2, 1),
conv_dw(32, 64, 1),
conv_dw(64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
conv_dw(256, 256, 1),
conv_dw(256, 512, 2),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 1024, 2),
conv_dw(1024, 1024, 1)
]
)
return model
def extra_layers(start_input_channels):
return MultiOutputSequential(
[1, 3, 5, 7],
[
conv_bn(start_input_channels, 256, 1, 1, 0),
conv_bn(256, 512, 3, 2, 1),
conv_bn(512, 128, 1, 1, 0),
conv_bn(128, 256, 3, 2, 1),
conv_bn(256, 128, 1, 1, 0),
conv_bn(128, 256, 3, 2, 1),
conv_bn(256, 64, 1, 1, 0),
conv_bn(64, 128, 3, 2, 1)
]
)
class MobileNetSSD(nn.Module):
def __init__(self, num_classes, cfg):
super().__init__()
self.cfg = cfg
self.num_classes = num_classes
self.basenet = mobilenet()
self.extras = extra_layers(1024)
NUM_INPUT_FEATURES = [512, 1024, 512, 256, 256, 128]
self.detection_head = SSDDetectionOutput(NUM_INPUT_FEATURES, num_classes, cfg)
def forward(self, x):
img_tensor = x[0].clone().unsqueeze(0)
sources, x = self.basenet(x)
extra_sources, x = self.extras(x)
return self.detection_head(sources + extra_sources, img_tensor)
def build_ssd_mobilenet(cfg, size, num_classes, config):
if size != 300:
raise ValueError("Only Mobilenet-SSD with input size 300 is supported")
mobilenet_ssd = MobileNetSSD(num_classes, cfg)
if config.basenet and (config.resuming_checkpoint_path is None) and (config.weights is None):
print('Loading base network...')
basenet_weights = torch.load(config.basenet)['state_dict']
new_weights = {}
for wn, wv in basenet_weights.items():
wn = wn.replace('model.', '')
new_weights[wn] = wv
load_state(mobilenet_ssd.basenet, new_weights, is_resume=False)
return mobilenet_ssd
def ssd_mobilenet():
ssd_params = SampleConfig({
"variance": [0.1, 0.1, 0.2, 0.2],
"max_sizes": [60, 111, 162, 213, 264, 315],
"min_sizes": [30, 60, 111, 162, 213, 264],
"steps": [16, 32, 64, 100, 150, 300],
"aspect_ratios": [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
"clip": False,
"flip": True,
"top_k": 200
})
return MobileNetSSD(21, ssd_params)
| [
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.load"
] | 1.5.0 | xiao1228/nncf | 307262119ee3f50eec2fa4022b2ef96693fd8448 |
1.3 | #!/usr/bin/env python
# original all-but-the-top code:
# https://gist.github.com/lgalke/febaaa1313d9c11f3bc8240defed8390
import sys, os
import logging
import argparse
logger = logging.getLogger(__name__)
import numpy as np
from sklearn.decomposition import PCA
from gensim.models import KeyedVectors
import torch
def all_but_the_top(v, D):
"""
Arguments:
:v: word vectors of shape (n_words, n_dimensions)
:D: number of principal components to subtract
"""
# 1. Subtract mean vector
v_tilde = v - np.mean(v, axis=0)
# 2. Compute the first `D` principal components
# on centered embedding vectors
pca = PCA(n_components=D)
pca = pca.fit(v_tilde)
# Subtract first `D` principal components
# [vocab_size, emb_size] @ [emb_size, D] @ [D, emb_size] -> [vocab_size, emb_size]
emb_pca = pca.transform(v_tilde).reshape(-1, D, 1) * \
pca.components_.reshape(1, D, -1)
emb_pca = emb_pca.sum(axis=-2)
v_hat = v_tilde - emb_pca
return v_hat
# main
def apply_all_but_the_top(input_file: str, output_file: str, n_comp: int):
vectors = np.load(input_file)
slice_id = 3 if args.exclude_eos else 2
special_embs = vectors[:slice_id]
word_embs = vectors[slice_id:]
word_embs = all_but_the_top(word_embs, n_comp)
v_hat = np.vstack((special_embs, word_embs))
# norm
# use torch to avoid "divide by zero" error
if not args.skip_norm:
v_hat = torch.from_numpy(v_hat)
v_hat = torch.functional.F.normalize(v_hat)
v_hat = v_hat.numpy()
np.save(output_file, v_hat)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", required=True,
help="Path to input file in npy format")
parser.add_argument("-o", "--output", required=True,
help="Path to output file")
parser.add_argument("-d", "--n-components", required=False,
type=int, default=3, help="Num of PCA components to substruct.")
parser.add_argument("--exclude-eos", required=False,
action="store_true", help="")
parser.add_argument("--skip-norm", required=False,
action="store_true", help="")
args = parser.parse_args()
print(args, file=sys.stderr)
apply_all_but_the_top(args.input, args.output, args.n_components)
| [
"torch.from_numpy",
"torch.functional.F.normalize"
] | 1.3.1 | toshohirasawa/mmt-with-monolingual-data | 3f80f3a1807e1a837ef82d75917c1cf581270b84 |
1.2 | # -*- coding: utf-8 -*-
import importlib
import re
import torch
import torch.nn as nn
import attacut
from attacut import logger
log = logger.get_logger(__name__)
def get_device():
if torch.cuda.is_available():
return "cuda"
else:
return "cpu"
class ConvolutionBatchNorm(nn.Module):
def __init__(self, channels, filters, kernel_size, stride=1, dilation=1):
super(ConvolutionBatchNorm, self).__init__()
padding = kernel_size // 2
padding += padding * (dilation-1)
self.conv = nn.Conv1d(
channels,
filters,
kernel_size,
stride=stride,
dilation=dilation,
padding=padding
)
self.bn = nn.BatchNorm1d(filters)
def forward(self, x):
return self.bn(self.conv(x))
class ConvolutionLayer(nn.Module):
def __init__(self, channels, filters, kernel_size, stride=1, dilation=1):
super(ConvolutionLayer, self).__init__()
padding = kernel_size // 2
padding += padding * (dilation-1)
self.conv = nn.Conv1d(
channels,
filters,
kernel_size,
stride=stride,
dilation=dilation,
padding=padding
)
def forward(self, x):
return self.conv(x)
class BaseModel(nn.Module):
dataset = None
@classmethod
def load(cls, path, data_config, model_config, with_eval=True):
model = cls(data_config, model_config)
model_path = "%s/model.pth" % path
model.load_state_dict(torch.load(model_path, map_location="cpu"))
log.info("loaded: %s|%s (variables %d)" % (
model_path,
model_config,
model.total_trainable_params()
))
if with_eval:
log.info("setting model to eval mode")
model.eval()
return model
def total_trainable_params(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def get_model(model_name) -> BaseModel:
module_path = "attacut.models.%s" % model_name
log.info("Taking %s" % module_path)
model_mod = importlib.import_module(module_path)
return model_mod.Model | [
"torch.nn.BatchNorm1d",
"torch.cuda.is_available",
"torch.load",
"torch.nn.Conv1d"
] | 1.2.0 | huak95/attacut | 100333931023cd009daeddec0cba4cdfce3d0b68 |
1.6 | import logging
import os
from dataclasses import dataclass
from typing import Any, Dict, Optional, Union
import torch.optim.lr_scheduler
from allennlp.common import Registrable
from allennlp.common.checks import ConfigurationError, check_for_gpu
from allennlp.common.util import int_to_device
logger = logging.getLogger(__name__)
@dataclass
class TrainerCheckpoint:
model_state: Dict[str, Any]
trainer_state: Dict[str, Any]
class Trainer(Registrable):
"""
The base class for an AllenNLP trainer. It can do pretty much
anything you want. Your subclass should implement `train`
and also probably `from_params`.
"""
default_implementation = "gradient_descent"
def __init__(
self,
serialization_dir: str = None,
cuda_device: Optional[Union[int, torch.device]] = None,
distributed: bool = False,
local_rank: int = 0,
world_size: int = 1,
) -> None:
if cuda_device is None:
from torch import cuda
if cuda.device_count() > 0:
cuda_device = 0
else:
cuda_device = -1
check_for_gpu(cuda_device)
if serialization_dir is None:
import tempfile
self._serialization_dir = tempfile.mkdtemp()
else:
self._serialization_dir = serialization_dir
# Ensure serialization directory exists.
os.makedirs(self._serialization_dir, exist_ok=True)
if isinstance(cuda_device, list):
raise ConfigurationError(
"In AllenNLP 1.0, the Trainer can only be assigned a single `cuda_device`. "
"Instead, we use torch's DistributedDataParallel at the command level, meaning "
"our Trainer always uses a single GPU per process."
)
if distributed and world_size <= 1:
raise ConfigurationError(
"Distributed training can be performed only with more than 1 device. Check "
"`cuda_device` key in the experiment configuration."
)
self.cuda_device = int_to_device(cuda_device)
self._distributed = distributed
self._rank = local_rank
self._primary = self._rank == 0
self._world_size = world_size
def train(self) -> Dict[str, Any]:
"""
Train a model and return the results.
"""
raise NotImplementedError
def get_checkpoint_state(self) -> TrainerCheckpoint:
"""
Returns a tuple of (model state, training state), where training state could have several
internal components (e.g., for an, optimizer, learning rate scheduler, etc.).
"""
raise NotImplementedError
def get_best_weights_path(self) -> Optional[str]:
"""Returns the path to file containing the current best weights."""
return None
| [
"torch.cuda.device_count"
] | 1.6.0 | alle-pawols/allennlp | 7d4a67263d7a210aca22d4f2b03e8568d3c34a48 |
1.6 | """
A suite of differentiable methods to compute the bias direction
or concept subspace representing binary protected variables.
"""
import torch
import sklearn
import numpy as np
from allennlp.common.checks import ConfigurationError
class BiasDirection:
"""
Parent class for bias direction classes.
# Parameters
requires_grad : `bool`, optional (default=`False`)
Option to enable gradient calculation.
"""
def __init__(self, requires_grad: bool = False):
self.requires_grad = requires_grad
def _normalize_bias_direction(self, bias_direction: torch.Tensor):
return bias_direction / torch.linalg.norm(bias_direction)
class PCABiasDirection(BiasDirection):
"""
PCA-based bias direction. Computes one-dimensional subspace that is the span
of a specific concept (e.g. gender) using PCA. This subspace minimizes the sum of
squared distances from all seed word embeddings.
!!! Note
It is uncommon to utilize more than one direction to represent a concept.
Implementation and terminology based on Rathore, A., Dev, S., Phillips, J.M., Srikumar,
V., Zheng, Y., Yeh, C.M., Wang, J., Zhang, W., & Wang, B. (2021).
[VERB: Visualizing and Interpreting Bias Mitigation Techniques for
Word Representations](https://api.semanticscholar.org/CorpusID:233168618).
ArXiv, abs/2104.02797.
"""
def __call__(self, seed_embeddings: torch.Tensor):
"""
# Parameters
!!! Note
In the examples below, we treat gender identity as binary, which does not accurately
characterize gender in real life.
seed_embeddings : `torch.Tensor`
A tensor of size (batch_size, ..., dim) containing seed word embeddings related to
a concept. For example, if the concept is gender, seed_embeddings could contain embeddings
for words like "man", "king", "brother", "woman", "queen", "sister", etc.
# Returns
bias_direction : `torch.Tensor`
A unit tensor of size (dim, ) representing the concept subspace.
"""
# Some sanity checks
if seed_embeddings.ndim < 2:
raise ConfigurationError("seed_embeddings1 must have at least two dimensions.")
with torch.set_grad_enabled(self.requires_grad):
# pca_lowrank centers the embeddings by default
# There will be two dimensions when applying PCA to
# definitionally-gendered words: 1) the gender direction,
# 2) all other directions, with the gender direction being principal.
_, _, V = torch.pca_lowrank(seed_embeddings, q=2)
# get top principal component
bias_direction = V[:, 0]
return self._normalize_bias_direction(bias_direction)
class PairedPCABiasDirection(BiasDirection):
"""
Paired-PCA-based bias direction. Computes one-dimensional subspace that is the span
of a specific concept (e.g. gender) as the first principle component of the
difference vectors between seed word embedding pairs.
!!! Note
It is uncommon to utilize more than one direction to represent a concept.
Based on: T. Bolukbasi, K. W. Chang, J. Zou, V. Saligrama, and A. Kalai. [Man is to
computer programmer as woman is to homemaker? debiasing word embeddings]
(https://api.semanticscholar.org/CorpusID:1704893).
In ACM Transactions of Information Systems, 2016.
Implementation and terminology based on Rathore, A., Dev, S., Phillips, J.M., Srikumar,
V., Zheng, Y., Yeh, C.M., Wang, J., Zhang, W., & Wang, B. (2021).
[VERB: Visualizing and Interpreting Bias Mitigation Techniques for
Word Representations](https://api.semanticscholar.org/CorpusID:233168618).
ArXiv, abs/2104.02797.
"""
def __call__(self, seed_embeddings1: torch.Tensor, seed_embeddings2: torch.Tensor):
"""
# Parameters
!!! Note
In the examples below, we treat gender identity as binary, which does not accurately
characterize gender in real life.
seed_embeddings1 : `torch.Tensor`
A tensor of size (batch_size, ..., dim) containing seed word
embeddings related to a concept group. For example, if the concept is gender,
seed_embeddings1 could contain embeddings for linguistically masculine words, e.g.
"man", "king", "brother", etc.
seed_embeddings2: `torch.Tensor`
A tensor of the same size as seed_embeddings1 containing seed word
embeddings related to a different group for the same concept. For example,
seed_embeddings2 could contain embeddings for linguistically feminine words, e.g.
"woman", "queen", "sister", etc.
!!! Note
For Paired-PCA, the embeddings at the same positions in each of seed_embeddings1 and
seed_embeddings2 are expected to form seed word pairs. For example, if the concept
is gender, the embeddings for ("man", "woman"), ("king", "queen"), ("brother", "sister"), etc.
should be at the same positions in seed_embeddings1 and seed_embeddings2.
!!! Note
All tensors are expected to be on the same device.
# Returns
bias_direction : `torch.Tensor`
A unit tensor of size (dim, ) representing the concept subspace.
"""
# Some sanity checks
if seed_embeddings1.size() != seed_embeddings2.size():
raise ConfigurationError("seed_embeddings1 and seed_embeddings2 must be the same size.")
if seed_embeddings1.ndim < 2:
raise ConfigurationError(
"seed_embeddings1 and seed_embeddings2 must have at least two dimensions."
)
with torch.set_grad_enabled(self.requires_grad):
paired_embeddings = seed_embeddings1 - seed_embeddings2
_, _, V = torch.pca_lowrank(
paired_embeddings,
q=min(paired_embeddings.size(0), paired_embeddings.size(1)) - 1,
)
bias_direction = V[:, 0]
return self._normalize_bias_direction(bias_direction)
class TwoMeansBiasDirection(BiasDirection):
"""
Two-means bias direction. Computes one-dimensional subspace that is the span
of a specific concept (e.g. gender) as the normalized difference vector of the
averages of seed word embedding sets.
!!! Note
It is uncommon to utilize more than one direction to represent a concept.
Based on: Dev, S., & Phillips, J.M. (2019). [Attenuating Bias in Word Vectors]
(https://api.semanticscholar.org/CorpusID:59158788). AISTATS.
Implementation and terminology based on Rathore, A., Dev, S., Phillips, J.M., Srikumar,
V., Zheng, Y., Yeh, C.M., Wang, J., Zhang, W., & Wang, B. (2021).
[VERB: Visualizing and Interpreting Bias Mitigation Techniques for
Word Representations](https://api.semanticscholar.org/CorpusID:233168618).
ArXiv, abs/2104.02797.
"""
def __call__(self, seed_embeddings1: torch.Tensor, seed_embeddings2: torch.Tensor):
"""
# Parameters
!!! Note
In the examples below, we treat gender identity as binary, which does not accurately
characterize gender in real life.
seed_embeddings1 : `torch.Tensor`
A tensor of size (embeddings1_batch_size, ..., dim) containing seed word
embeddings related to a specific concept group. For example, if the concept is gender,
seed_embeddings1 could contain embeddings for linguistically masculine words, e.g.
"man", "king", "brother", etc.
seed_embeddings2: `torch.Tensor`
A tensor of size (embeddings2_batch_size, ..., dim) containing seed word
embeddings related to a different group for the same concept. For example,
seed_embeddings2 could contain embeddings for linguistically feminine words, , e.g.
"woman", "queen", "sister", etc.
!!! Note
seed_embeddings1 and seed_embeddings2 need NOT be the same size. Furthermore,
the embeddings at the same positions in each of seed_embeddings1 and seed_embeddings2
are NOT expected to form seed word pairs.
!!! Note
All tensors are expected to be on the same device.
# Returns
bias_direction : `torch.Tensor`
A unit tensor of size (dim, ) representing the concept subspace.
"""
# Some sanity checks
if seed_embeddings1.ndim < 2 or seed_embeddings2.ndim < 2:
raise ConfigurationError(
"seed_embeddings1 and seed_embeddings2 must have at least two dimensions."
)
if seed_embeddings1.size(-1) != seed_embeddings2.size(-1):
raise ConfigurationError("All seed embeddings must have same dimensionality.")
with torch.set_grad_enabled(self.requires_grad):
seed_embeddings1_mean = torch.mean(seed_embeddings1, dim=0)
seed_embeddings2_mean = torch.mean(seed_embeddings2, dim=0)
bias_direction = seed_embeddings1_mean - seed_embeddings2_mean
return self._normalize_bias_direction(bias_direction)
class ClassificationNormalBiasDirection(BiasDirection):
"""
Classification normal bias direction. Computes one-dimensional subspace that is the span
of a specific concept (e.g. gender) as the direction perpendicular to the classification
boundary of a linear support vector machine fit to classify seed word embedding sets.
!!! Note
It is uncommon to utilize more than one direction to represent a concept.
Based on: Ravfogel, S., Elazar, Y., Gonen, H., Twiton, M., & Goldberg, Y. (2020).
[Null It Out: Guarding Protected Attributes by Iterative Nullspace Projection]
(https://api.semanticscholar.org/CorpusID:215786522). ArXiv, abs/2004.07667.
Implementation and terminology based on Rathore, A., Dev, S., Phillips, J.M., Srikumar,
V., Zheng, Y., Yeh, C.M., Wang, J., Zhang, W., & Wang, B. (2021).
[VERB: Visualizing and Interpreting Bias Mitigation Techniques for
Word Representations](https://api.semanticscholar.org/CorpusID:233168618).
ArXiv, abs/2104.02797.
"""
def __init__(self):
super().__init__()
def __call__(self, seed_embeddings1: torch.Tensor, seed_embeddings2: torch.Tensor):
"""
# Parameters
!!! Note
In the examples below, we treat gender identity as binary, which does not accurately
characterize gender in real life.
seed_embeddings1 : `torch.Tensor`
A tensor of size (embeddings1_batch_size, ..., dim) containing seed word
embeddings related to a specific concept group. For example, if the concept is gender,
seed_embeddings1 could contain embeddings for linguistically masculine words, e.g.
"man", "king", "brother", etc.
seed_embeddings2: `torch.Tensor`
A tensor of size (embeddings2_batch_size, ..., dim) containing seed word
embeddings related to a different group for the same concept. For example,
seed_embeddings2 could contain embeddings for linguistically feminine words, , e.g.
"woman", "queen", "sister", etc.
!!! Note
seed_embeddings1 and seed_embeddings2 need NOT be the same size. Furthermore,
the embeddings at the same positions in each of seed_embeddings1 and seed_embeddings2
are NOT expected to form seed word pairs.
!!! Note
All tensors are expected to be on the same device.
!!! Note
This bias direction method is NOT differentiable.
# Returns
bias_direction : `torch.Tensor`
A unit tensor of size (dim, ) representing the concept subspace.
"""
# Some sanity checks
if seed_embeddings1.ndim < 2 or seed_embeddings2.ndim < 2:
raise ConfigurationError(
"seed_embeddings1 and seed_embeddings2 must have at least two dimensions."
)
if seed_embeddings1.size(-1) != seed_embeddings2.size(-1):
raise ConfigurationError("All seed embeddings must have same dimensionality.")
device = seed_embeddings1.device
seed_embeddings1 = seed_embeddings1.flatten(end_dim=-2).detach().cpu().numpy()
seed_embeddings2 = seed_embeddings2.flatten(end_dim=-2).detach().cpu().numpy()
X = np.vstack([seed_embeddings1, seed_embeddings2])
Y = np.concatenate([[0] * seed_embeddings1.shape[0], [1] * seed_embeddings2.shape[0]])
classifier = sklearn.svm.SVC(kernel="linear").fit(X, Y)
bias_direction = torch.Tensor(classifier.coef_[0]).to(device)
return self._normalize_bias_direction(bias_direction)
| [
"torch.pca_lowrank",
"torch.linalg.norm",
"torch.mean",
"torch.Tensor",
"torch.set_grad_enabled"
] | 1.6.0 | alle-pawols/allennlp | 7d4a67263d7a210aca22d4f2b03e8568d3c34a48 |
1.5 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging
import numpy as np
import torch
import pyro
import pyro.distributions as dist
from pyro.contrib.examples.bart import load_bart_od
from pyro.contrib.forecast import ForecastingModel, backtest
from pyro.ops.tensor_utils import periodic_cumsum, periodic_repeat
logging.getLogger("pyro").setLevel(logging.DEBUG)
logging.getLogger("pyro").handlers[0].setLevel(logging.DEBUG)
def preprocess(args):
"""
Extract a tensor of (arrivals,departures) to Embarcadero station.
"""
print("Loading data")
dataset = load_bart_od()
# The full dataset has all station->station ridership counts for all of 50
# train stations. In this simple example we will model only the aggretate
# counts to and from a single station, Embarcadero.
i = dataset["stations"].index("EMBR")
arrivals = dataset["counts"][:, :, i].sum(-1)
departures = dataset["counts"][:, i, :].sum(-1)
data = torch.stack([arrivals, departures], dim=-1)
# This simple example uses no covariates, so we will construct a
# zero-element tensor of the correct length as empty covariates.
covariates = torch.zeros(len(data), 0)
return data, covariates
# We define a model by subclassing the ForecastingModel class and implementing
# a single .model() method.
class Model(ForecastingModel):
# The .model() method inputs two tensors: a fake tensor zero_data that is
# the same size and dtype as the real data (but of course the generative
# model shouldn't depend on the value of the data it generates!), and a
# tensor of covariates. Our simple model depends on no covariates, so we
# simply pass in an empty tensor (see the preprocess() function above).
def model(self, zero_data, covariates):
period = 24 * 7
duration, dim = zero_data.shape[-2:]
assert dim == 2 # Data is bivariate: (arrivals, departures).
# Sample global parameters.
noise_scale = pyro.sample("noise_scale",
dist.LogNormal(torch.full((dim,), -3), 1).to_event(1))
assert noise_scale.shape[-1:] == (dim,)
trans_timescale = pyro.sample("trans_timescale",
dist.LogNormal(torch.zeros(dim), 1).to_event(1))
assert trans_timescale.shape[-1:] == (dim,)
trans_loc = pyro.sample("trans_loc", dist.Cauchy(0, 1 / period))
trans_loc = trans_loc.unsqueeze(-1).expand(trans_loc.shape + (dim,))
assert trans_loc.shape[-1:] == (dim,)
trans_scale = pyro.sample("trans_scale",
dist.LogNormal(torch.zeros(dim), 0.1).to_event(1))
trans_corr = pyro.sample("trans_corr",
dist.LKJCorrCholesky(dim, torch.ones(())))
trans_scale_tril = trans_scale.unsqueeze(-1) * trans_corr
assert trans_scale_tril.shape[-2:] == (dim, dim)
obs_scale = pyro.sample("obs_scale",
dist.LogNormal(torch.zeros(dim), 0.1).to_event(1))
obs_corr = pyro.sample("obs_corr",
dist.LKJCorrCholesky(dim, torch.ones(())))
obs_scale_tril = obs_scale.unsqueeze(-1) * obs_corr
assert obs_scale_tril.shape[-2:] == (dim, dim)
# Note the initial seasonality should be sampled in a plate with the
# same dim as the time_plate, dim=-1. That way we can repeat the dim
# below using periodic_repeat().
with pyro.plate("season_plate", period, dim=-1):
season_init = pyro.sample("season_init",
dist.Normal(torch.zeros(dim), 1).to_event(1))
assert season_init.shape[-2:] == (period, dim)
# Sample independent noise at each time step.
with self.time_plate:
season_noise = pyro.sample("season_noise",
dist.Normal(0, noise_scale).to_event(1))
assert season_noise.shape[-2:] == (duration, dim)
# Construct a prediction. This prediction has an exactly repeated
# seasonal part plus slow seasonal drift. We use two deterministic,
# linear functions to transform our diagonal Normal noise to nontrivial
# samples from a Gaussian process.
prediction = (periodic_repeat(season_init, duration, dim=-2) +
periodic_cumsum(season_noise, period, dim=-2))
assert prediction.shape[-2:] == (duration, dim)
# Construct a joint noise model. This model is a GaussianHMM, whose
# .rsample() and .log_prob() methods are parallelized over time; this
# this entire model is parallelized over time.
init_dist = dist.Normal(torch.zeros(dim), 100).to_event(1)
trans_mat = trans_timescale.neg().exp().diag_embed()
trans_dist = dist.MultivariateNormal(trans_loc, scale_tril=trans_scale_tril)
obs_mat = torch.eye(dim)
obs_dist = dist.MultivariateNormal(torch.zeros(dim), scale_tril=obs_scale_tril)
noise_model = dist.GaussianHMM(init_dist, trans_mat, trans_dist, obs_mat, obs_dist,
duration=duration)
assert noise_model.event_shape == (duration, dim)
# The final statement registers our noise model and prediction.
self.predict(noise_model, prediction)
def main(args):
pyro.enable_validation(__debug__)
data, covariates = preprocess(args)
# We will model positive count data by log1p-transforming it into real
# valued data. But since we want to evaluate back in the count domain, we
# will also define a transform to apply during evaluation, transforming
# from real back to count-valued data. Truth is mapped by the log1p()
# inverse expm1(), but the prediction will be sampled from a Poisson
# distribution.
data = data.log1p()
def transform(pred, truth):
pred = torch.poisson(pred.clamp(min=1e-4).expm1())
truth = truth.expm1()
return pred, truth
# The backtest() function automatically trains and evaluates our model on
# different windows of data.
forecaster_options = {
"num_steps": args.num_steps,
"learning_rate": args.learning_rate,
"log_every": args.log_every,
"dct_gradients": args.dct,
}
metrics = backtest(data, covariates, Model,
train_window=args.train_window,
test_window=args.test_window,
stride=args.stride,
num_samples=args.num_samples,
forecaster_options=forecaster_options)
for name in ["mae", "rmse", "crps"]:
values = [m[name] for m in metrics]
mean = np.mean(values)
std = np.std(values)
print("{} = {:0.3g} +- {:0.3g}".format(name, mean, std))
return metrics
if __name__ == "__main__":
assert pyro.__version__.startswith('1.4.0')
parser = argparse.ArgumentParser(description="Bart Ridership Forecasting Example")
parser.add_argument("--train-window", default=2160, type=int)
parser.add_argument("--test-window", default=336, type=int)
parser.add_argument("--stride", default=168, type=int)
parser.add_argument("-n", "--num-steps", default=501, type=int)
parser.add_argument("-lr", "--learning-rate", default=0.05, type=float)
parser.add_argument("--dct", action="store_true")
parser.add_argument("--num-samples", default=100, type=int)
parser.add_argument("--log-every", default=50, type=int)
parser.add_argument("--seed", default=1234567890, type=int)
args = parser.parse_args()
main(args)
| [
"torch.zeros",
"torch.stack",
"torch.ones",
"torch.full",
"torch.eye"
] | 1.5.0 | ciguaran/pyro | 11a96cde05756def826c232d76f9cff66f6e6d4f |
1.8 | import json
import logging
from pathlib import Path
import random
import tarfile
import tempfile
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas_path
from PIL import Image
import torch
import fasttext
from torch.nn.utils.rnn import pad_sequence
from torchvision.transforms import transforms
from transformers import BertTokenizer
class HatefulMemesDataset(torch.utils.data.Dataset):
"""Uses jsonl data to preprocess and serve
dictionary of multimodal tensors for model input.
"""
def __init__(
self,
data_path,
img_dir,
text_embedding_model,
text_embedding_type="fasttext",
balance=False,
num_labeled=None,
random_state=0,
):
assert text_embedding_type in ["fasttext", "bert"]
self.samples_frame = pd.read_json(data_path, lines=True)
self.num_labeled = num_labeled
if balance:
neg = self.samples_frame[self.samples_frame.label.eq(0)]
pos = self.samples_frame[self.samples_frame.label.eq(1)]
self.samples_frame = pd.concat(
[neg.sample(pos.shape[0], random_state=random_state), pos]
)
if self.num_labeled:
if self.samples_frame.shape[0] > int(self.num_labeled):
self.samples_frame = self.samples_frame.sample(
num_labeled, random_state=random_state
)
self.samples_frame = self.samples_frame.reset_index(drop=True)
self.samples_frame.img = self.samples_frame.apply(
lambda row: (Path(img_dir) / row.img), axis=1
)
self.image_transform = transforms.Compose(
[
transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
self.text_embedding_type = text_embedding_type
if self.text_embedding_type == "fasttext":
self.text_transform = fasttext.load_model(text_embedding_model)
elif self.text_embedding_type == "bert":
self.text_transform = BertTokenizer.from_pretrained(text_embedding_model)
# print(self.samples_frame.img)
# # https://github.com/drivendataorg/pandas-path
# if not self.samples_frame.img.path.exists().all():
# raise FileNotFoundError
# if not self.samples_frame.img.path.is_file().all():
# raise TypeError
def __len__(self):
"""This method is called when you do len(instance)
for an instance of this class.
"""
return len(self.samples_frame)
def __getitem__(self, idx):
"""This method is called when you do instance[key]
for an instance of this class.
"""
if torch.is_tensor(idx):
idx = idx.tolist()
img_id = self.samples_frame.loc[idx, "id"]
image = Image.open(self.samples_frame.loc[idx, "img"]).convert("RGB")
image = self.image_transform(image)
text = self.transform_text(self.samples_frame.loc[idx, "text"])
if "label" in self.samples_frame.columns:
label = (
torch.Tensor([self.samples_frame.loc[idx, "label"]]).long().squeeze()
)
sample = {"id": img_id, "image": image, "text": text, "label": label}
else:
sample = {"id": img_id, "image": image, "text": text}
return sample
def transform_text(self, text_input):
if self.text_embedding_type == "fasttext":
return torch.Tensor(
self.text_transform.get_sentence_vector(text_input)
).squeeze()
else:
tokenized_text = self.text_transform(
text_input,
return_tensors="pt",
return_attention_mask=False,
return_token_type_ids=False,
)
return tokenized_text["input_ids"].squeeze()
def collate(batch):
img_tensor = pad_sequence([i["image"] for i in batch], batch_first=True)
text_tensor = pad_sequence([i["text"] for i in batch], batch_first=True)
label_tensor = torch.LongTensor([i["label"] for i in batch])
return img_tensor, text_tensor, label_tensor
| [
"torch.is_tensor",
"torch.nn.utils.rnn.pad_sequence",
"torch.LongTensor",
"torch.Tensor"
] | 1.8.1 | nav13n/multimodal-learning | 283d09989ab5e547acc881547276e03e00c0ff39 |
1.4 | from typing import List
import torch
from code_transformer.modeling.constants import SEP_TOKEN, CLS_TOKEN, MAX_NUM_TOKENS
from code_transformer.modeling.data_utils import sample_targets, permutation_attention_mask
from code_transformer.preprocessing.datamanager.base import CTBatch
from code_transformer.preprocessing.datamanager.preprocessed import CTPreprocessedDataManager
from code_transformer.preprocessing.dataset.base import CTBaseDataset, CTBaseSample
from code_transformer.preprocessing.nlp.tokenization import get_idx_no_punctuation
from code_transformer.utils.data import pad_list
from code_transformer.utils.vocab import decode_tokens
class CTLanguageModelingDataset(CTBaseDataset):
"""
Dataset implementation to be used together with PyTorch's dataloader for general language modeling.
Transforms the samples into torch tensors that fit into TransformerLanguageModel.
Shuffling is not directly provided by the dataset but by the underlying data manager.
"""
def __init__(self, data_manager: CTPreprocessedDataManager, token_distances=None, max_distance_mask=None,
num_sub_tokens=5, num_labels_per_sample=5, max_num_tokens=MAX_NUM_TOKENS, use_pointer_network=False):
"""
:param num_labels_per_sample: the number of tokens per sample to be predicted
"""
super(CTLanguageModelingDataset, self).__init__(data_manager, token_distances, max_distance_mask,
num_sub_tokens, max_num_tokens=max_num_tokens,
use_pointer_network=use_pointer_network)
self.num_labels_per_sample = num_labels_per_sample
def collate_fn(self, batch: List) -> CTBatch:
"""
Combines the given list of samples into a batch, taking care of correctly padding every tensor.
Implements dynamic padding, i.e., sequences (and thus distance matrices) are padded with respect to the longest
sequence in the batch and not a global padding value.
"""
batch = super(CTLanguageModelingDataset, self).collate_fn(batch)
seq_lengths = batch.sequence_lengths
seq_tensors = batch.tokens
max_distance_masks = batch.max_distance_mask
padding_mask = batch.pad_mask
max_seq_length = seq_tensors.shape[1]
target_mapping, target_mapping_per_token = sample_targets(num_predict=self.num_labels_per_sample,
seq_len=max_seq_length,
batch_size=batch.tokens.shape[0],
pad_mask=padding_mask)
perm = permutation_attention_mask(seq_tensors, target_mapping_per_token,
max_seq_length, max_seq_length, sep_id=self.word_vocab.vocabulary[SEP_TOKEN],
cls_id=self.word_vocab.vocabulary[CLS_TOKEN])
perm_mask = perm[0].long()
perm_mask |= max_distance_masks # Merge max distance attention mask with regular attention mask
label_selected = seq_tensors.unsqueeze(-1) * target_mapping.transpose(1, 2).unsqueeze(2)
labels = label_selected.max(1)[0].transpose(1, 2).long().contiguous()
extended_vocabulary_ids = batch.extended_vocabulary_ids
if self.use_pointer_network:
extended_vocabulary_ids = []
for idx_sample in range(batch.tokens.shape[0]):
idx_func_tokens = torch.where(target_mapping_per_token[idx_sample] == 1)[0]
current_pos = 0
idx_func_sub_tokens = []
for j, mask in enumerate(batch.pointer_pad_mask[idx_sample]):
n_sub_tokens = mask.sum().item()
if j in idx_func_tokens:
idx_func_sub_tokens.extend(range(current_pos, current_pos + n_sub_tokens))
current_pos += n_sub_tokens
extended_vocabulary_ids.append([v_id.item() for j, v_id in enumerate(batch.extended_vocabulary_ids[idx_sample]) if j not in idx_func_sub_tokens and v_id.item() != self.sequence_pad_value])
batch.pointer_pad_mask[idx_sample][idx_func_tokens] = False
assert len(extended_vocabulary_ids[-1]) == batch.pointer_pad_mask[idx_sample].sum().item(), "number of sub tokens in extended_vocabulary_ids does not match number of non-masked pointer sub tokens"
seq_len_subtokens = max([len(evi) for evi in extended_vocabulary_ids])
extended_vocabulary_ids = torch.tensor([
pad_list(evi, seq_len_subtokens, self.sequence_pad_value) for evi in extended_vocabulary_ids])
return CTBatch(tokens=seq_tensors, token_types=batch.token_types, node_types=batch.node_types,
relative_distances=batch.relative_distances, distance_names=batch.distance_names,
sequence_lengths=seq_lengths, pad_mask=batch.pad_mask, labels=labels,
perm_mask=perm_mask, target_mapping=target_mapping,
target_mapping_per_token=target_mapping_per_token,
extended_vocabulary=batch.extended_vocabulary,
extended_vocabulary_ids=extended_vocabulary_ids,
pointer_pad_mask=batch.pointer_pad_mask, languages=batch.languages)
class CTLanguageModelingDatasetNoPunctuation(CTLanguageModelingDataset):
"""
Filters each sample to remove punctuation tokens like .,(): etc. as well as [INDENT]/[DEDENT] tokens.
The idea is that for the code summarization task, these tokens are hardly important but instead elongate the token
sequence unnecessarily.
"""
def __init__(self, data_manager: CTPreprocessedDataManager, token_distances=None, max_distance_mask=None,
num_sub_tokens=5, num_labels_per_sample=5, min_sequence_length=5, max_num_tokens=MAX_NUM_TOKENS,
use_pointer_network=False):
super(CTLanguageModelingDatasetNoPunctuation, self).__init__(data_manager, token_distances=token_distances,
max_distance_mask=max_distance_mask,
num_sub_tokens=num_sub_tokens,
num_labels_per_sample=num_labels_per_sample,
max_num_tokens=None,
use_pointer_network=use_pointer_network)
self.config = data_manager.load_config()
self.min_sequence_length = min_sequence_length
self.max_num_tokens_no_punctuation = max_num_tokens
def __next__(self):
sample = super(CTLanguageModelingDatasetNoPunctuation, self).__next__()
# Calculate indices of tokens that should be kept, i.e., are tokens like identifiers or types
decoded_tokens = decode_tokens(sample.tokens, word_vocab=self.word_vocab, config=self.config)
idx = get_idx_no_punctuation(decoded_tokens)
if len(idx) > self.max_num_tokens_no_punctuation + 1:
return self.__next__()
# For the distance matrices, token sequence, token and node types, only indices corresponding to non punctuation
# tokens are kept
distance_matrices_no_punctuation = []
for dist_matrix in sample.distance_matrices:
distance_matrices_no_punctuation.append(dist_matrix[idx][:, idx])
node_types_no_punctuation = sample.node_types[idx]
token_types_no_punctuation = sample.token_types[idx]
tokens_no_punctuation = sample.tokens[idx]
if len(tokens_no_punctuation) < self.num_labels_per_sample \
or len(tokens_no_punctuation) < self.min_sequence_length:
return next(self)
pointer_pad_mask = sample.pointer_pad_mask
extended_vocabulary_ids = sample.extended_vocabulary_ids
if self.use_pointer_network:
# Also remove punctuation tokens from extended_vocabulary_ids and pointer_pad_mask
idx_sub_tokens = []
current_sub_token = 0
for i, mask in enumerate(sample.pointer_pad_mask):
n_sub_tokens = mask.sum()
if i in idx:
idx_sub_tokens.extend(range(current_sub_token, current_sub_token + n_sub_tokens))
current_sub_token += n_sub_tokens
pointer_pad_mask = sample.pointer_pad_mask[idx]
extended_vocabulary_ids = [sample.extended_vocabulary_ids[i] for i in idx_sub_tokens]
assert pointer_pad_mask.sum() == len(extended_vocabulary_ids), \
f"Number of non-masked subtokens ({pointer_pad_mask.sum().item()}) does not match number of extended vocabulary ids ({len(extended_vocabulary_ids)})"
return CTBaseSample(tokens_no_punctuation, token_types_no_punctuation, node_types_no_punctuation,
distance_matrices_no_punctuation, sample.binning_vectors, sample.distance_names,
sample.func_name, sample.docstring, sample.extended_vocabulary,
extended_vocabulary_ids, pointer_pad_mask, sample.language)
| [
"torch.where"
] | 1.4 | maximzubkov/code-transformer | 52600ab17d05a238f35c39a78b22c5c706fbb13c |
1.4 | import random
import signal
import sys
from abc import abstractmethod
from itertools import islice
from statistics import mean
import torch
from sacred import Experiment
from torch import optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from code_transformer.configuration.transformer_lm_encoder import TransformerLMEncoderConfig
from code_transformer.experiments.log import ExperimentLogger, TensorboardLogger
from code_transformer.modeling.constants import PAD_TOKEN, UNKNOWN_TOKEN, EOS_TOKEN, NUM_SUB_TOKENS
from code_transformer.modeling.modelmanager import ModelManager
from code_transformer.modeling.modelmanager.code_transformer import CodeTransformerModelManager, \
CodeTransformerLMModelManager
from code_transformer.preprocessing.datamanager.base import batch_filter_distances, batch_to_device, \
DataLoaderWrapper, BufferedDataManager
from code_transformer.preprocessing.datamanager.preprocessed import CTBufferedDataManager
from code_transformer.preprocessing.dataset.lm import CTLanguageModelingDataset, \
CTLanguageModelingDatasetNoPunctuation
from code_transformer.preprocessing.graph.binning import ExponentialBinning, EqualBinning
from code_transformer.preprocessing.graph.distances import DistanceBinning
from code_transformer.preprocessing.graph.transform import MaxDistanceMaskTransform, TokenDistancesTransform
from code_transformer.utils.metrics import top1_accuracy, topk_accuracy, precision, recall, f1_score, \
non_trivial_words_accuracy, micro_f1_score, rouge_2, rouge_l
from code_transformer.utils.timing import Timing
from code_transformer.env import MODELS_SAVE_PATH, LOGS_PATH, DATA_PATH_STAGE_2
ex = Experiment(base_dir='../../', interactive=False)
class ExperimentSetup:
def __init__(self):
self._init_config()
self._init_data_transforms()
self._init_data()
self._init_transfer_learning()
self._init_model()
self._init_optimizer()
@ex.capture
def _init_config(self, _config):
self.config = _config
@ex.capture(prefix="data_transforms")
def _init_data_transforms(self, max_distance_mask, relative_distances, distance_binning):
self.max_distance_mask = None if max_distance_mask is None else MaxDistanceMaskTransform(max_distance_mask)
self.relative_distances = [] if relative_distances is None else relative_distances
if distance_binning['type'] == 'exponential':
trans_func = ExponentialBinning(distance_binning['growth_factor'])
else:
trans_func = EqualBinning()
self.distance_binning = {
'n_fixed_bins': distance_binning['n_fixed_bins'],
'trans_func': trans_func
}
@ex.capture(prefix="data_setup")
def _init_data(self, language, num_predict, use_validation=False, mini_dataset=False,
use_no_punctuation=False, use_pointer_network=False, sort_by_length=False, shuffle=True,
chunk_size=None, filter_language=None, dataset_imbalance=None, num_sub_tokens=NUM_SUB_TOKENS):
self.data_manager = CTBufferedDataManager(DATA_PATH_STAGE_2, language, shuffle=shuffle,
infinite_loading=True,
mini_dataset=mini_dataset, size_load_buffer=10000,
sort_by_length=sort_by_length, chunk_size=chunk_size,
filter_language=filter_language, dataset_imbalance=dataset_imbalance)
self.word_vocab, self.token_type_vocab, self.node_type_vocab = self.data_manager.load_vocabularies()
token_distances = None
if TokenDistancesTransform.name in self.relative_distances:
num_bins = self.data_manager.load_config()['binning']['num_bins']
token_distances = TokenDistancesTransform(
DistanceBinning(num_bins, self.distance_binning['n_fixed_bins'], self.distance_binning['trans_func']))
self.num_predict = num_predict
self.use_pointer_network = use_pointer_network
self.use_separate_vocab = False # For language modeling we always only operate on the method body vocabulary
if use_no_punctuation:
self.dataset_train = CTLanguageModelingDatasetNoPunctuation(self.data_manager,
token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
else:
self.dataset_train = CTLanguageModelingDataset(self.data_manager, token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
self.use_validation = use_validation
if self.use_validation:
data_manager_validation = CTBufferedDataManager(DATA_PATH_STAGE_2, language, partition="valid",
shuffle=True, infinite_loading=True,
mini_dataset=mini_dataset, size_load_buffer=10000,
filter_language=filter_language,
dataset_imbalance=dataset_imbalance)
if use_no_punctuation:
self.dataset_validation = CTLanguageModelingDatasetNoPunctuation(data_manager_validation,
token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
else:
self.dataset_validation = CTLanguageModelingDataset(data_manager_validation,
token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
self.dataset_validation_creator = \
lambda infinite_loading: self._create_validation_dataset(DATA_PATH_STAGE_2,
language,
use_no_punctuation,
token_distances,
infinite_loading,
num_predict,
use_pointer_network,
filter_language,
dataset_imbalance,
num_sub_tokens)
def _create_validation_dataset(self, data_location, language, use_no_punctuation, token_distances,
infinite_loading, num_predict, use_pointer_network, filter_language,
dataset_imbalance, num_sub_tokens):
data_manager_validation = CTBufferedDataManager(data_location, language, partition="valid",
shuffle=True, infinite_loading=infinite_loading,
size_load_buffer=10000, filter_language=filter_language,
dataset_imbalance=dataset_imbalance)
if use_no_punctuation:
return CTLanguageModelingDatasetNoPunctuation(data_manager_validation,
token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
else:
return CTLanguageModelingDataset(data_manager_validation,
token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
@ex.capture(prefix="transfer_learning")
def _init_transfer_learning(self, use_pretrained_model=False, model_type=None, run_id=None,
snapshot_iteration=None, cpu=False, freeze_encoder_layers=None):
assert not use_pretrained_model or (
run_id is not None
and snapshot_iteration is not None
and model_type is not None), "model_type, run_id and snapshot_iteration have to be provided if " \
"use_pretrained_model is set"
self.use_pretrained_model = use_pretrained_model
if use_pretrained_model:
print(
f"Using Transfer Learning. Loading snapshot snapshot-{snapshot_iteration} from run {run_id} in collection "
f"{model_type} ")
if model_type == 'ct_code_summarization':
model_manager = CodeTransformerModelManager()
pretrained_model = model_manager.load_model(run_id, snapshot_iteration, gpu=not cpu)
self.pretrained_model = pretrained_model
elif model_type == 'ct_lm':
model_manager = CodeTransformerLMModelManager()
pretrained_model = model_manager.load_model(run_id, snapshot_iteration, gpu=not cpu)
self.pretrained_model = pretrained_model
else:
model_manager = ModelManager(MODELS_SAVE_PATH, model_type)
self.pretrained_model_params = model_manager.load_parameters(run_id, snapshot_iteration, gpu=not cpu)
encoder_config = model_manager.load_config(run_id)['model']['transformer_lm_encoder']
self.pretrained_transformer_encoder_config = TransformerLMEncoderConfig(**encoder_config)
if freeze_encoder_layers is not None:
self.freeze_encoder_layers = freeze_encoder_layers
def generate_transformer_lm_encoder_config(self, transformer_lm_encoder: dict) -> TransformerLMEncoderConfig:
config = TransformerLMEncoderConfig(**transformer_lm_encoder)
if self.use_pretrained_model:
loaded_config = self.pretrained_transformer_encoder_config
if not config == self.pretrained_transformer_encoder_config:
print(f"pretrained configuration differs from given configuration. Pretrained: "
f"{self.pretrained_transformer_encoder_config}, Given: {config}. Try merging...")
loaded_config.input_nonlinearity = config.input_nonlinearity
loaded_config.transformer['encoder_layer']['dropout'] = config.transformer['encoder_layer']['dropout']
loaded_config.transformer['encoder_layer']['activation'] \
= config.transformer['encoder_layer']['activation']
config = loaded_config
transformer_config = dict(config.transformer)
if hasattr(self, "word_vocab"):
config.vocab_size = len(self.word_vocab)
if hasattr(self, "token_type_vocab"):
if hasattr(self, "use_only_ast") and self.use_only_ast:
config.num_token_types = None
else:
config.num_token_types = len(self.token_type_vocab)
if hasattr(self, "node_type_vocab"):
config.num_node_types = len(self.node_type_vocab)
if hasattr(self, "relative_distances"):
encoder_layer_config = dict(transformer_config['encoder_layer'])
encoder_layer_config['num_relative_distances'] = len(self.relative_distances)
transformer_config['encoder_layer'] = encoder_layer_config
if hasattr(self, "num_sub_tokens"):
config.subtokens_per_token = self.num_sub_tokens
if hasattr(self, 'num_languages'):
config.num_languages = self.num_languages
config.transformer = transformer_config
return config
@abstractmethod
def _init_model(self, *args, **kwargs):
self.model_lm = None
self.with_cuda = True
self.model_manager = None
@ex.capture(prefix="optimizer")
def _init_optimizer(self, learning_rate, reg_scale, scheduler=None, scheduler_params=None, optimizer="Adam"):
if optimizer == 'Adam':
self.optimizer = optim.Adam(self.model_lm.parameters(), lr=learning_rate, weight_decay=reg_scale)
elif optimizer == 'Momentum':
self.optimizer = optim.SGD(self.model_lm.parameters(), lr=learning_rate, weight_decay=reg_scale,
momentum=0.95, nesterov=True)
self.scheduler = None
if scheduler == 'OneCycleLR':
self.scheduler = optim.lr_scheduler.OneCycleLR(self.optimizer, **scheduler_params)
elif scheduler == 'MultiStepLR':
self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, **scheduler_params)
def _init_metrics(self, metrics):
self.metrics = dict()
pad_id = self.word_vocab[PAD_TOKEN]
unk_id = self.word_vocab[UNKNOWN_TOKEN]
for metric in metrics:
if metric == 'top1_accuracy':
self.metrics[metric] = top1_accuracy
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: top1_accuracy(logits, labels,
unk_id=unk_id, pad_id=pad_id)
elif metric == 'top5_accuracy':
self.metrics[metric] = lambda logits, labels: topk_accuracy(5, logits, labels)
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: topk_accuracy(5, logits, labels,
unk_id=unk_id, pad_id=pad_id)
elif metric == 'precision':
self.metrics[metric] = lambda logits, labels: precision(logits, labels, pad_id=pad_id)
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: precision(logits, labels, pad_id=pad_id,
unk_id=unk_id)
elif metric == 'recall':
self.metrics[metric] = lambda logits, labels: recall(logits, labels, pad_id=pad_id)
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: recall(logits, labels, pad_id=pad_id,
unk_id=unk_id)
elif metric == 'f1_score':
self.metrics[metric] = lambda logits, labels: f1_score(logits, labels, pad_id=pad_id)
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: f1_score(logits, labels, pad_id=pad_id,
unk_id=unk_id)
elif metric == 'non_trivial_accuracy':
self.metrics[metric] = lambda logits, labels: non_trivial_words_accuracy(logits, labels, pad_id)
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: non_trivial_words_accuracy(logits, labels,
pad_id,
unk_id=unk_id)
elif metric == 'micro_f1_score':
self.metrics[metric] = lambda logits, labels: micro_f1_score(logits, labels, pad_id=pad_id,
unk_id=unk_id)
elif metric == 'rouge_2':
self.metrics[metric] = lambda logits, labels: rouge_2(logits, labels, pad_id=pad_id)
elif metric == 'rouge_l':
self.metrics[metric] = lambda logits, labels: rouge_l(logits, labels, pad_id=pad_id)
@ex.capture(prefix="training")
def train(self, batch_size, simulated_batch_size, random_seed, metrics,
validate_every=None,
persistent_snapshot_every=None, simulated_batch_size_valid=None, early_stopping_patience=10,
max_validation_samples=10000, accumulate_tokens_batch=False):
if self.with_cuda:
self.model_lm = self.model_lm.cuda()
self.device = "cuda"
else:
self.device = "cpu"
run_id = self.model_manager.generate_run_name()
self.logger = ExperimentLogger("experiment",
TensorboardLogger(f"{LOGS_PATH}/{self.model_manager.model_type}/{run_id}"))
self.logger.info(f"===============================================")
self.logger.info(f"Starting run {run_id}")
self.logger.info(f"===============================================")
self.model_manager.save_config(run_id, self.config)
early_stopping = EarlyStopping(self.model_manager, run_id, early_stopping_patience)
num_params = sum([len(params.view(-1)) for params in self.model_lm.parameters()])
self.logger.info(f"Start training model with {num_params} parameters")
self.logger.info(f"Model setup: {self.model_lm}")
self._init_metrics(metrics)
torch.manual_seed(random_seed)
random.seed(random_seed)
# Simulated batches
simulated_batch_size = batch_size if simulated_batch_size is None else simulated_batch_size
assert simulated_batch_size % batch_size == 0, "simulated_batch_size must be a multiple of batch_size"
num_simulated_batches = simulated_batch_size // batch_size
# Main train loop
train_step = 0
dataloader = DataLoader(self.dataset_train, batch_size=batch_size, collate_fn=self.dataset_train.collate_fn)
if self.use_validation:
if simulated_batch_size_valid is None:
simulated_batch_size_valid = simulated_batch_size
num_simulated_batches_valid = simulated_batch_size_valid // batch_size
dataloader_validation = iter(DataLoader(self.dataset_validation, batch_size=batch_size,
collate_fn=self.dataset_validation.collate_fn))
n_tokens_accumulate_batch = None
if accumulate_tokens_batch:
n_tokens_accumulate_batch = 0
epoch = 1
progress_bar = tqdm(total=int(self.data_manager.approximate_total_samples() / batch_size))
progress_bar.set_description(f"Epoch {epoch}")
# Ensure graceful shutdown when training is interrupted
signal.signal(signal.SIGINT, self._handle_shutdown)
with Timing() as t:
for it, batch in enumerate(dataloader):
self.logger.log_time(t.measure() / batch_size, "dataloader_seconds/sample",
train_step * simulated_batch_size + (it % num_simulated_batches) * batch_size)
# Calculate gradients
batch = batch_filter_distances(batch, self.relative_distances)
model_out = self._train_step(batch, num_simulated_batches)
self.logger.log_time(t.measure() / batch_size, "model_seconds/sample",
train_step * simulated_batch_size + (it % num_simulated_batches) * batch_size)
# Log actual predicted words and labels
self.logger.log_text("input/train",
str([[self.word_vocab.reverse_lookup(st.item()) for st in token
if st.item() != self.word_vocab[PAD_TOKEN]
and st.item() != self.word_vocab[EOS_TOKEN]]
for token in batch.tokens[0]]))
self.logger.log_text("predicted words/train", str(self._decode_predicted_words(model_out, batch)))
self.logger.log_text("labels/train", str(self._decode_labels(batch)))
# Calculate metrics
evaluation = self._evaluate_predictions(model_out.logits, batch.labels, loss=model_out.loss)
self.logger.log_sub_batch_metrics(evaluation)
if accumulate_tokens_batch:
n_tokens_accumulate_batch += batch.sequence_lengths.sum().item()
# Gradient accumulation: only update gradients every num_simulated_batches step
if not accumulate_tokens_batch and it % num_simulated_batches == (num_simulated_batches - 1) \
or accumulate_tokens_batch and n_tokens_accumulate_batch > simulated_batch_size:
if accumulate_tokens_batch:
n_tokens_accumulate_batch = 0
train_step += 1
total_norm = 0
for p in self.model_lm.parameters():
if p.grad is not None:
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1. / 2)
self.logger.log_metrics({'gradient_norm': total_norm}, train_step * simulated_batch_size)
self.optimizer.step()
self.optimizer.zero_grad()
if self.scheduler:
if not hasattr(self.scheduler,
"total_steps") or train_step < self.scheduler.total_steps - 1:
self.scheduler.step()
self.logger.log_metrics({'lr': self.scheduler.get_lr()[0]},
train_step * simulated_batch_size)
# Send train metrics to observers
self.logger.flush_batch_metrics(train_step * simulated_batch_size)
# Evaluate on validation set
if self.use_validation and validate_every and train_step % validate_every == 0:
t.measure()
self.model_lm.eval()
with torch.no_grad():
for validation_batch in islice(dataloader_validation, num_simulated_batches_valid):
validation_batch = batch_filter_distances(validation_batch, self.relative_distances)
validation_batch = batch_to_device(validation_batch, self.device)
output = self.model_lm.forward_batch(validation_batch).cpu()
validation_batch = batch_to_device(validation_batch, "cpu")
evaluation = self._evaluate_predictions(output.logits, validation_batch.labels,
loss=output.loss, partition='valid')
self.logger.log_sub_batch_metrics(evaluation)
self.logger.log_text("predicted words/validation",
str(self._decode_predicted_words(output, validation_batch)))
self.logger.log_text("labels/validation",
str(self._decode_labels(validation_batch)))
self.model_lm.train()
self.logger.flush_batch_metrics(step=train_step * simulated_batch_size)
self.logger.log_time(t.measure() / simulated_batch_size_valid, "valid_seconds/sample",
train_step * simulated_batch_size)
if persistent_snapshot_every and (it + 1) % persistent_snapshot_every == 0:
snapshot_iteration = it + 1
self.logger.info(f"Storing model params into snapshot-{snapshot_iteration}")
self.model_manager.save_snapshot(run_id, self.model_lm.state_dict(), snapshot_iteration)
dataset = self.dataset_validation_creator(False)
score = self.evaluate(islice(dataset.to_dataloader(), int(max_validation_samples / batch_size)),
train_step * simulated_batch_size, 'valid_full')
if f"micro_f1_score/valid_full" in self.logger.sub_batch_metrics:
score_name = 'micro-F1'
else:
score_name = 'F1'
self.logger.info(f"Full evaluation yielded {score} {score_name}")
if not early_stopping.evaluate(score, snapshot_iteration):
self.logger.info(f"Last {early_stopping_patience} evaluations did not improve performance. "
f"Stopping run")
break
progress_bar.update()
if progress_bar.n >= progress_bar.total:
progress_bar = tqdm(total=int(self.data_manager.approximate_total_samples() / batch_size))
epoch += 1
progress_bar.set_description(f"Epoch {epoch}")
t.measure()
self._handle_shutdown()
def _train_step(self, batch, num_simulated_batches):
batch = batch_to_device(batch, self.device)
output_gpu = self.model_lm.forward_batch(batch)
# Gradient accumulation: every batch contributes only a part of the total gradient
(output_gpu.loss / num_simulated_batches).backward()
output_cpu = output_gpu.cpu()
del output_gpu
del batch
return output_cpu
def _evaluate_predictions(self, logits, labels, loss=None, partition='train'):
evaluation = dict()
for metric_name, metric_fn in self.metrics.items():
evaluation[f"{metric_name}/{partition}"] = metric_fn(logits, labels)
if loss:
evaluation[f"loss/{partition}"] = loss.item()
return evaluation
def evaluate(self, dataset, step, partition='valid'):
# Evaluate on validation set
self.model_lm.eval()
predictions = []
labels = []
with torch.no_grad():
for validation_batch in dataset:
validation_batch = batch_filter_distances(validation_batch, self.relative_distances)
validation_batch = batch_to_device(validation_batch, self.device)
output = self.model_lm.forward_batch(validation_batch).cpu()
validation_batch = batch_to_device(validation_batch, "cpu")
predictions.extend(output.logits.argmax(-1))
labels.extend(validation_batch.labels)
evaluation = self._evaluate_predictions(output.logits, validation_batch.labels,
loss=output.loss, partition=partition)
self.logger.log_sub_batch_metrics(evaluation)
self.logger.log_text("predicted words/validation",
str(self._decode_predicted_words(output, validation_batch)))
self.logger.log_text("labels/validation", str(self._decode_labels(validation_batch)))
self.model_lm.train()
if f"micro_f1_score/{partition}" in self.logger.sub_batch_metrics:
score = mean(self.logger.sub_batch_metrics[f"micro_f1_score/{partition}"])
else:
score = mean(self.logger.sub_batch_metrics[f"f1_score/{partition}"])
self.logger.flush_batch_metrics(step=step)
return score
def _decode_predicted_words(self, model_out, batch):
method_name_vocab = self.method_name_vocab if self.use_separate_vocab else self.word_vocab
if hasattr(self, 'use_pointer_network') and self.use_pointer_network:
extended_vocab_reverse = {idx: word for word, idx in batch.extended_vocabulary[0].items()}
predicted_sub_tokens = ((predicted_sub_token.argmax().item(), predicted_sub_token.max().item()) for
predicted_sub_token in model_out.logits[0][0])
return [
(extended_vocab_reverse[st] if st in extended_vocab_reverse else method_name_vocab.reverse_lookup(st),
f"{value:0.2f}") for st, value in predicted_sub_tokens]
else:
return [(method_name_vocab.reverse_lookup(predicted_sub_token.argmax().item()),
f"{predicted_sub_token.max().item():0.2f}") for
predicted_sub_token in model_out.logits[0][0]]
def _decode_labels(self, batch):
method_name_vocab = self.method_name_vocab if self.use_separate_vocab else self.word_vocab
if hasattr(self, 'use_pointer_network') and self.use_pointer_network:
extended_vocab_reverse = {idx: word for word, idx in batch.extended_vocabulary[0].items()}
label_tokens = (sub_token_label.item() for sub_token_label in batch.labels[0][0])
return [extended_vocab_reverse[lt] if lt in extended_vocab_reverse else method_name_vocab.reverse_lookup(lt)
for lt in label_tokens]
else:
return [method_name_vocab.reverse_lookup(sub_token_label.item()) for sub_token_label in batch.labels[0][0]]
def get_dataloader(self, split: str, batch_size: int):
assert split == 'train' or split == 'validation'
if split == 'train':
ds = self.dataset_train
elif split == 'validation':
ds = self.dataset_validation
dl = DataLoader(ds, batch_size=batch_size, num_workers=0,
collate_fn=ds.collate_fn)
dl = DataLoaderWrapper(dl)
return BufferedDataManager(dl)
def _handle_shutdown(self, sig=None, frame=None):
self.dataset_train.data_manager.shutdown()
self.dataset_validation.data_manager.shutdown()
sys.exit(0)
class EarlyStopping:
def __init__(self, model_manager: ModelManager, run_id, patience):
self.model_manager = model_manager
self.run_id = run_id
self.patience = patience
self.evaluation_results = dict()
self._counter = 0
self._best = 0
def evaluate(self, score, snapshot_iteration):
self.evaluation_results[snapshot_iteration] = score
sorted_results = sorted(self.evaluation_results.items(), key=lambda x: x[1], reverse=True)
print(f"Current best performing snapshots: {sorted_results}")
snapshots_to_keep = sorted_results[:self.patience]
snapshots_to_keep = [x[0] for x in snapshots_to_keep]
stored_snapshots = self.model_manager.get_available_snapshots(self.run_id)
for stored_snapshot in stored_snapshots:
if stored_snapshot not in snapshots_to_keep:
self.model_manager.delete_snapshot(self.run_id, stored_snapshot)
if score > self._best:
self._best = score
self._counter = 0
else:
self._counter += 1
print(f"Counter: {self._counter}, Best: {self._best}")
if self._counter > self.patience:
return False
else:
return True
| [
"torch.optim.lr_scheduler.OneCycleLR",
"torch.no_grad",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.manual_seed",
"torch.utils.data.DataLoader"
] | 1.4 | maximzubkov/code-transformer | 52600ab17d05a238f35c39a78b22c5c706fbb13c |
1.5 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from onpolicy.algorithms.utils.cnn import CNNBase
from onpolicy.algorithms.utils.mlp import MLPBase, MLPLayer
from onpolicy.algorithms.utils.rnn import RNNLayer
from onpolicy.algorithms.utils.act import ACTLayer
from onpolicy.algorithms.utils.util import init, check
from onpolicy.utils.util import get_shape_from_obs_space
class R_Model(nn.Module):
def __init__(self, args, obs_space, share_obs_space, action_space, device=torch.device("cpu"), cat_self=True):
super(R_Model, self).__init__()
self._gain = args.gain
self._use_orthogonal = args.use_orthogonal
self._use_ReLU = args.use_ReLU
self._recurrent_N = args.recurrent_N
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._use_centralized_V = args.use_centralized_V
self.hidden_size = args.hidden_size
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal]
# obs space
obs_shape = get_shape_from_obs_space(obs_space)
base = CNNBase if len(obs_shape)==3 else MLPBase
self.obs_prep = base(args, obs_shape)
# share obs space
if self._use_centralized_V:
share_obs_shape = get_shape_from_obs_space(share_obs_space)
self.share_obs_prep = base(args, share_obs_shape, cat_self)
else:
self.share_obs_prep = self.obs_prep
# common layer
self.common = MLPLayer(self.hidden_size, self.hidden_size, layer_N=0, use_orthogonal=self._use_orthogonal, use_ReLU=self._use_ReLU)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0))
# value
self.v_out = init_(nn.Linear(self.hidden_size, 1))
# action
self.act = ACTLayer(action_space, self.hidden_size, self._use_orthogonal, self._gain)
self.to(self.device)
def get_actions(self, obs, rnn_states, masks, available_actions=None, deterministic=False):
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
x = obs
x = self.obs_prep(x)
# common
actor_features = self.common(x)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
actions, action_log_probs = self.act(actor_features, available_actions, deterministic)
return actions, action_log_probs, rnn_states
def evaluate_actions(self, obs, rnn_states, action, masks, available_actions, active_masks=None):
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
action = check(action).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
if active_masks is not None:
active_masks = check(active_masks).to(**self.tpdv)
x = obs
x = self.obs_prep(x)
actor_features = self.common(x)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features, action, available_actions, active_masks)
return action_log_probs, dist_entropy
def get_values(self, share_obs, rnn_states, masks):
share_obs = check(share_obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
share_x = share_obs
share_x = self.share_obs_prep(share_x)
critic_features = self.common(share_x)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
critic_features, rnn_states = self.rnn(critic_features, rnn_states, masks)
values = self.v_out(critic_features)
return values, rnn_states
| [
"torch.nn.Linear",
"torch.device",
"torch.nn.init.constant_"
] | 1.5.1 | LUMO666/Highway | 05e1ad318bd14d405bd78d612e5706f7db2b3266 |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import warnings
from typing import TYPE_CHECKING, Callable, List, Optional
import torch
from monai.data import CSVSaver
from monai.utils import ImageMetaKey as Key
from monai.utils import (
evenly_divisible_all_gather,
exact_version,
issequenceiterable,
optional_import,
string_list_all_gather,
)
idist, _ = optional_import("ignite", "0.4.4", exact_version, "distributed")
Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine")
class ClassificationSaver:
"""
Event handler triggered on completing every iteration to save the classification predictions as CSV file.
If running in distributed data parallel, only saves CSV file in the specified rank.
"""
def __init__(
self,
output_dir: str = "./",
filename: str = "predictions.csv",
overwrite: bool = True,
batch_transform: Callable = lambda x: x,
output_transform: Callable = lambda x: x,
name: Optional[str] = None,
save_rank: int = 0,
saver: Optional[CSVSaver] = None,
) -> None:
"""
Args:
output_dir: if `saver=None`, output CSV file directory.
filename: if `saver=None`, name of the saved CSV file name.
overwrite: if `saver=None`, whether to overwriting existing file content, if True,
will clear the file before saving. otherwise, will apend new content to the file.
batch_transform: a callable that is used to transform the
ignite.engine.batch into expected format to extract the meta_data dictionary.
output_transform: a callable that is used to transform the
ignite.engine.output into the form expected model prediction data.
The first dimension of this transform's output will be treated as the
batch dimension. Each item in the batch will be saved individually.
name: identifier of logging.logger to use, defaulting to `engine.logger`.
save_rank: only the handler on specified rank will save to CSV file in multi-gpus validation,
default to 0.
saver: the saver instance to save classification results, if None, create a CSVSaver internally.
the saver must provide `save_batch(batch_data, meta_data)` and `finalize()` APIs.
"""
self.save_rank = save_rank
self.output_dir = output_dir
self.filename = filename
self.overwrite = overwrite
self.batch_transform = batch_transform
self.output_transform = output_transform
self.saver = saver
self.logger = logging.getLogger(name)
self._name = name
self._outputs: List[torch.Tensor] = []
self._filenames: List[str] = []
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self._name is None:
self.logger = engine.logger
if not engine.has_event_handler(self._started, Events.EPOCH_STARTED):
engine.add_event_handler(Events.EPOCH_STARTED, self._started)
if not engine.has_event_handler(self, Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
if not engine.has_event_handler(self._finalize, Events.EPOCH_COMPLETED):
engine.add_event_handler(Events.EPOCH_COMPLETED, self._finalize)
def _started(self, engine: Engine) -> None:
self._outputs = []
self._filenames = []
def __call__(self, engine: Engine) -> None:
"""
This method assumes self.batch_transform will extract metadata from the input batch.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
filenames = self.batch_transform(engine.state.batch).get(Key.FILENAME_OR_OBJ)
if issequenceiterable(filenames):
self._filenames.extend(filenames)
outputs = self.output_transform(engine.state.output)
if outputs is not None:
if isinstance(outputs, torch.Tensor):
outputs = outputs.detach()
self._outputs.append(outputs)
def _finalize(self, engine: Engine) -> None:
"""
All gather classification results from ranks and save to CSV file.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
ws = idist.get_world_size()
if self.save_rank >= ws:
raise ValueError("target save rank is greater than the distributed group size.")
outputs = torch.cat(self._outputs, dim=0)
filenames = self._filenames
if ws > 1:
outputs = evenly_divisible_all_gather(outputs, concat=True)
filenames = string_list_all_gather(filenames)
if len(filenames) == 0:
meta_dict = None
else:
if len(filenames) != len(outputs):
warnings.warn(f"filenames length: {len(filenames)} doesn't match outputs length: {len(outputs)}.")
meta_dict = {Key.FILENAME_OR_OBJ: filenames}
# save to CSV file only in the expected rank
if idist.get_rank() == self.save_rank:
saver = self.saver or CSVSaver(self.output_dir, self.filename, self.overwrite)
saver.save_batch(outputs, meta_dict)
saver.finalize()
| [
"torch.cat"
] | 1.5 | eddyleelin/MONAI | 8e56191d344692fdfa1b9a52285b2514131061e6 |
1.4 | # Copyright 2018 Uber Technologies, Inc. All Rights Reserved.
# Modifications copyright (C) 2019 Intel Corporation
# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from distutils.version import LooseVersion
import inspect
import itertools
import os
import platform
import sys
import unittest
import warnings
import time
import json
from collections.abc import Iterable
import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import horovod.torch as hvd
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'utils'))
from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath
_1_5_api = LooseVersion(torch.__version__) >= LooseVersion('1.5.0')
ccl_supported_types = set([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor])
class TorchTests(unittest.TestCase):
"""
Tests for ops in horovod.torch.
"""
def __init__(self, *args, **kwargs):
super(TorchTests, self).__init__(*args, **kwargs)
warnings.simplefilter('module')
def convert_cpu_fp16_to_fp32(self, *values):
# PyTorch doesn't support any CPU ops on FP16 tensors.
# In case we need to do ops, we will convert tensor to FP32 here.
result = []
for value in values:
if value.dtype in [torch.float16, torch.HalfTensor] and not value.is_cuda:
result.append(value.float())
else:
result.append(value)
return result
def cast_and_place(self, tensor, dtype):
if dtype.is_cuda:
return tensor.cuda(hvd.local_rank()).type(dtype)
return tensor.type(dtype)
def filter_supported_types(self, types):
if 'CCL_ROOT' in os.environ:
types = [t for t in types if t in ccl_supported_types]
return types
def test_gpu_required(self):
if not torch.cuda.is_available():
skip_or_fail_gpu_test(self, "No GPUs available")
@pytest.mark.skipif(platform.system() == 'Darwin', reason='Reinit not supported on macOS')
def test_horovod_reinit(self):
"""Test that Horovod can init -> shutdown -> init successfully."""
mpi_rank, _ = mpi_env_rank_and_size()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
is_mpi = gloo_rank == -1
if is_mpi:
# Horovod cannot be re-initialized after shutdown when using MPI, so
# this test can only be done using the Gloo controller
self.skipTest("Gloo is not available")
hvd.init()
rank, size = hvd.rank(), hvd.size()
hvd.shutdown()
hvd.init()
rank2, size2 = hvd.rank(), hvd.size()
assert rank == rank2
assert size == size2
def test_horovod_is_initialized(self):
"""Test that is_initialized returned by hvd.is_initialized() is correct."""
hvd.init()
assert hvd.is_initialized()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
is_mpi = gloo_rank == -1
if is_mpi:
# Only applies for Gloo
self.skipTest("Gloo is not available")
hvd.shutdown()
assert not hvd.is_initialized()
hvd.init()
def test_horovod_rank(self):
"""Test that the rank returned by hvd.rank() is correct."""
mpi_rank, _ = mpi_env_rank_and_size()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
# The mpi rank does not match gloo rank, we need to figure which one
# we are using to run the test.
is_mpi = gloo_rank == -1
hvd.init()
rank = hvd.rank()
if is_mpi:
assert mpi_rank == rank
else:
assert gloo_rank == rank
def test_horovod_size(self):
"""Test that the size returned by hvd.size() is correct."""
_, mpi_size = mpi_env_rank_and_size()
gloo_size = int(os.getenv('HOROVOD_SIZE', -1))
# The mpi size does not match gloo size, we need to figure which one
# we are using to run the test.
is_mpi = gloo_size == -1
hvd.init()
size = hvd.size()
if is_mpi:
assert mpi_size == size
else:
assert gloo_size == size
def test_horovod_allreduce(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False)
tensor, summed = self.convert_cpu_fp16_to_fp32(tensor, summed)
multiplied = tensor * size
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_average(self):
"""Test that the allreduce correctly averages 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
averaged = hvd.allreduce(tensor, average=True)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(averaged, tensor, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_inplace(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
multiplied = self.cast_and_place(tensor * size, dtype)
tensor = self.cast_and_place(tensor, dtype)
hvd.allreduce_(tensor, average=False)
tensor, multiplied = self.convert_cpu_fp16_to_fp32(tensor, multiplied)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_async_fused(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
tests = []
is_hvd_poll_false_once = False
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
handle = hvd.allreduce_async(tensor, average=False)
if not hvd.poll(handle):
is_hvd_poll_false_once = True
tensor, = self.convert_cpu_fp16_to_fp32(tensor)
multiplied = tensor * size
tests.append((dtype, multiplied, handle))
# Make sure it's an asynchronous operation.
assert is_hvd_poll_false_once, 'hvd.poll() always returns True, not an async op?'
for dtype, multiplied, handle in tests:
summed = hvd.synchronize(handle)
summed, = self.convert_cpu_fp16_to_fp32(summed)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_multi_gpu(self):
"""Test that the allreduce works on multiple GPUs."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# Skip the test if there are not enough GPUs.
if torch.cuda.device_count() < hvd.local_size() * 2:
self.skipTest("Not enough GPUs available")
iter = 0
dtypes = [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
iter += 1
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
device = local_rank * 2 + (iter + local_rank) % 2
tensor = tensor.cuda(device).type(dtype)
multiplied = tensor * size
hvd.allreduce_(tensor, average=False)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_prescale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with prescaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
int_types = [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]
half_types = [torch.HalfTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
np.random.seed(1234)
factor = np.random.uniform()
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False,
prescale_factor=factor)
factor = torch.tensor(factor, dtype=torch.float64)
factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor
if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.type(torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float64 if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
multiplied = factor * tensor
multiplied = multiplied.type(dtype)
summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)
multiplied *= size
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_postscale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with postscaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
int_types = [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]
half_types = [torch.HalfTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
np.random.seed(1234)
factor = np.random.uniform()
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False,
postscale_factor=factor)
factor = torch.tensor(factor, dtype=torch.float64)
factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor
if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.type(torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float64 if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
multiplied = size * tensor
multiplied = multiplied * factor
multiplied = multiplied.type(dtype)
summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
torch.manual_seed(1234)
dims = [17 + rank] * 3
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
# Same number of elements, different rank
torch.manual_seed(1234)
if rank == 0:
dims = [17, 23 * 57]
else:
dims = [17, 23, 57]
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*dims)
else:
tensor = torch.FloatTensor(*dims)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_cpu_gpu_error(self):
"""Test that the allreduce raises an error if different ranks try to
perform reduction on CPU and GPU."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17] * 3
if rank % 2 == 0:
tensor = torch.cuda.FloatTensor(*dims)
else:
tensor = torch.FloatTensor(*dims)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_duplicate_name_error(self):
"""Test that the allreduce raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.allreduce_async(tensor, name='duplicate_name')
try:
for i in range(10):
hvd.allreduce_async(tensor, name='duplicate_name')
assert False, 'hvd.allreduce_async did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_allreduce_grad(self):
"""Test the correctness of the allreduce gradient."""
hvd.init()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
summed = hvd.allreduce(tensor, average=False)
summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones([17] * dim) * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_grad_average(self):
"""Test the correctness of the allreduce averaged gradient."""
hvd.init()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
summed = hvd.allreduce(tensor, average=True)
summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones([17] * dim)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_grouped_allreduce(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
summed = hvd.grouped_allreduce(tensors, average=False)
tensors, summed = zip(*[self.convert_cpu_fp16_to_fp32(t, s) for t, s in zip(tensors, summed)])
multiplied = [tensor * size for tensor in tensors]
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(summed, multiplied)]), \
'hvd.grouped_allreduce produces incorrect results'
def test_horovod_grouped_allreduce_average(self):
"""Test that the grouped allreduce correctly averages 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
averaged = hvd.grouped_allreduce(tensors, average=True)
tensors, averaged = zip(*[self.convert_cpu_fp16_to_fp32(t, m) for t, m in zip(tensors, averaged)])
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(averaged, tensors)]), \
'hvd.grouped_allreduce produces incorrect results for average'
def test_horovod_grouped_allreduce_inplace(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
multiplied = [self.cast_and_place(tensor * size, dtype) for tensor in tensors]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
hvd.grouped_allreduce_(tensors, average=False)
tensors, multiplied = zip(*[self.convert_cpu_fp16_to_fp32(t, m) for t, m in zip(tensors, multiplied)])
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(tensors, multiplied)]), \
'hvd.grouped_allreduce_ produces incorrect results'
def test_horovod_grouped_allreduce_cpu_gpu_error(self):
"""Test that the grouped allreduce raises an error if the input tensor
list contains a mix of tensors on CPU and GPU."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
tensors = [torch.FloatTensor(10) if i % 2 else torch.cuda.FloatTensor(10) for i in range(5)]
try:
hvd.grouped_allreduce(tensors, average=False)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_grouped_allreduce_grad(self):
"""Test the correctness of the grouped allreduce gradient."""
hvd.init()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
for tensor in tensors:
tensor.requires_grad_()
summed = hvd.grouped_allreduce(tensors, average=False)
for s in summed:
s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]
expected = np.ones([17] * dim) * size
for grad_out in grads_out:
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_grad_average(self):
"""Test the correctness of the allreduce averaged gradient."""
hvd.init()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
for tensor in tensors:
tensor.requires_grad_()
summed = hvd.grouped_allreduce(tensors, average=True)
for s in summed:
s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]
expected = np.ones([17] * dim)
for grad_out in grads_out:
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
gathered = hvd.allgather(tensor)
tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)
assert list(gathered.shape) == [17 * size] + [17] * (dim - 1)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == [17] * dim, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'
def test_horovod_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = torch.FloatTensor(
*([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
gathered = hvd.allgather(tensor)
tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)
expected_size = sum(tensor_sizes)
assert list(gathered.shape) == [expected_size] + [17] * (dim - 1)
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = gathered[sum(
tensor_sizes[:i]):sum(tensor_sizes[:i + 1])]
assert list(rank_tensor.shape) == rank_size
assert rank_tensor.data.min() == i
assert rank_tensor.data.max() == i
def test_horovod_allgather_async_fused(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
tests = []
is_hvd_poll_false_once = False
for dtype, dim in itertools.product(dtypes, dims):
rank_shape = [17] * dim
tensor = torch.FloatTensor(*(rank_shape)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
handle = hvd.allgather_async(tensor)
if not hvd.poll(handle):
is_hvd_poll_false_once = True
tests.append((handle, rank_shape))
# Make sure it's an asynchronous operation.
assert is_hvd_poll_false_once, 'hvd.poll() always returns True, not an async op?'
for handle, rank_shape in tests:
gathered = hvd.synchronize(handle)
gathered, = self.convert_cpu_fp16_to_fp32(gathered)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == rank_shape, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'
def test_horovod_allgather_error(self):
"""Test that the allgather returns an error if any dimension besides
the first is different among the tensors being gathered."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.FloatTensor(*tensor_size).fill_(1).mul_(rank)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allgather_type_error(self):
"""Test that the allgather returns an error if the types being gathered
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*tensor_size)
else:
tensor = torch.FloatTensor(*tensor_size)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allgather_duplicate_name_error(self):
"""Test that the allgather raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.allgather_async(tensor, name='duplicate_name')
try:
for i in range(10):
hvd.allgather_async(tensor, name='duplicate_name')
assert False, 'hvd.allgather_async did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_allgather_grad(self):
"""Test the correctness of the allgather gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5
tensor_sizes = tensor_sizes[:size]
tensor = torch.FloatTensor(
*([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
grad_list = []
for r, size in enumerate(tensor_sizes):
grad_list.append(self.cast_and_place(
torch.ones([size] + [17] * (dim - 1)), dtype) * r)
grad_ys = torch.cat(grad_list, dim=0)
gathered = hvd.allgather(tensor)
gathered.backward(grad_ys)
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(
[tensor_sizes[rank]] + [17] * (dim - 1)
) * rank * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_broadcast(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)
tensor = self.cast_and_place(tensor, dtype)
root_tensor = self.cast_and_place(root_tensor, dtype)
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
tensor, root_tensor, broadcasted_tensor = \
self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)
if rank != root_rank:
assert (tensor == root_tensor).max() == 0, \
'hvd.broadcast modifies source tensor'
assert (broadcasted_tensor.data == root_tensor).min() == 1, \
'hvd.broadcast produces incorrect broadcasted tensor'
def test_horovod_broadcast_inplace(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)
tensor = self.cast_and_place(tensor, dtype)
root_tensor = self.cast_and_place(root_tensor, dtype)
broadcasted_tensor = hvd.broadcast_(tensor, root_rank)
tensor, root_tensor, broadcasted_tensor = \
self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)
assert (tensor == broadcasted_tensor).min() == 1, \
'hvd.broadcast does not modify source tensor'
assert (broadcasted_tensor == root_tensor).min() == 1, \
'hvd.broadcast produces incorrect broadcasted tensor'
def test_horovod_broadcast_error(self):
"""Test that the broadcast returns an error if any dimension besides
the first is different among the tensors being broadcasted."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.FloatTensor(*tensor_size).fill_(1).mul_(rank)
try:
hvd.broadcast(tensor, 0)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_type_error(self):
"""Test that the broadcast returns an error if the types being broadcasted
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*tensor_size)
else:
tensor = torch.FloatTensor(*tensor_size)
try:
hvd.broadcast(tensor, 0)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_rank_error(self):
"""Test that the broadcast returns an error if different ranks
specify different root rank."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor = torch.FloatTensor(*([17] * 3)).fill_(1)
try:
hvd.broadcast(tensor, rank)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_duplicate_name_error(self):
"""Test that the broadcast raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.broadcast_async(tensor, root_rank=0, name='duplicate_name')
try:
for i in range(10):
hvd.broadcast_async(tensor, root_rank=0, name='duplicate_name')
assert False, 'hvd.broadcast_async did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_broadcast_grad(self):
"""Test the correctness of the broadcast gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
broadcasted_tensor.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
c = size if rank == root_rank else 0
expected = np.ones([17] * dim) * c
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall(self):
"""Test that the alltoall correctly distributes 1D, 2D, and 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
splits = torch.tensor([rank + 1] * size, dtype=torch.int32)
tensor = self.cast_and_place(tensor, dtype)
collected = hvd.alltoall(tensor, splits)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
def test_horovod_alltoall_equal_split(self):
"""Test that the alltoall correctly distributes 1D tensors with default splitting."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
collected = hvd.alltoall(tensor)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
def test_horovod_alltoall_type_error(self):
"""Test that the alltoall returns an error if the tensor types differ
across the processes."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
if rank % 2:
tensor = torch.empty(size, dtype=torch.int32)
else:
tensor = torch.empty(size, dtype=torch.float32)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_alltoall_equal_split_length_error(self):
"""Test that the alltoall with default splitting returns an error if the tensor length is not a multiple
of the number of workers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size + 1)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_splits_error(self):
"""Test that the alltoall returns an error if the sum of the splits entries exceeds
the first dimension of the input tensor."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size - 1)
splits = torch.ones(size, dtype=torch.int32)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_splits_type_error(self):
"""Test that the alltoall returns an error if the splits tensor does not
contain 32-bit integers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size)
splits = torch.empty(size, dtype=torch.float32)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_rank_error(self):
"""Test that the alltoall returns an error if any dimension besides
the first is different among the tensors being processed."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor_size = [2 * size] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.ones(tensor_size)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_alltoall_grad(self):
"""Test the correctness of the alltoall gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
splits = torch.tensor([rank + 1] * size, dtype=torch.int32)
collected = hvd.alltoall(tensor, splits)
collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(tensor.shape)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall_equal_split_grad(self):
"""Test the correctness of the alltoall gradient with default splitting."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
collected = hvd.alltoall(tensor)
collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(tensor.shape)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_broadcast_state(self):
hvd.init()
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
def new_optimizer(cls, opt_params, model):
p = {
k: v for k, v in opt_params.items()
if k in inspect.getargspec(cls.__init__).args
}
return cls(model.parameters(), **p)
def create_model(opt_class, opt_params):
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
optimizer = new_optimizer(opt_class, opt_params, model)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
return model, optimizer
def get_model_param_values(model):
params = sorted(model.state_dict().items())
return [(k, v.clone()) for k, v in params]
def get_optimizer_param_values(optimizer):
results = []
state_dict = optimizer.state_dict()
for group in state_dict['param_groups']:
for param_id in group['params']:
if param_id not in state_dict['state']:
continue
params = sorted(state_dict['state'][param_id].items())
for k, v in params:
results.append(
(k, v.clone() if torch.is_tensor(v) else v))
return results
# L-BFGS is currently unsupported, as are sparse tensors, which are
# required by SparseAdam optimizer
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
opt_params_list = [
dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),
dict(lr=0.2)
]
for (opt_name, opt_class), opt_params in itertools.product(optimizers, opt_params_list):
model, optimizer = create_model(opt_class, opt_params)
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model_param_values = get_model_param_values(model)
for name, model_param_value in model_param_values:
hvd.broadcast_(model_param_value, root_rank=0)
opt_param_values_updated = []
opt_param_values = get_optimizer_param_values(optimizer)
for name, opt_param_value in opt_param_values:
is_tensor = torch.is_tensor(opt_param_value)
if is_tensor:
hvd.broadcast_(opt_param_value, root_rank=0)
else:
opt_param_value = hvd.broadcast_object(opt_param_value, name=name)
opt_param_values_updated.append((name, opt_param_value))
opt_param_values = opt_param_values_updated
with temppath() as fname:
if hvd.rank() == 0:
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, fname)
model, optimizer = create_model(opt_class, opt_params)
if hvd.rank() == 0:
checkpoint = torch.load(fname)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
model_param_value_after = get_model_param_values(model)
for before, after in zip(model_param_values,
model_param_value_after):
name, model_param_value = before
name_after, model_param_value_after = after
self.assertEqual(name, name_after)
self.assertEqual(type(model_param_value),
type(model_param_value_after))
self.assertTrue(
(model_param_value == model_param_value_after).all())
expected_tensors = hvd.broadcast_object(len(optimizer.state_dict()['state'].values()))
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
self.assertEqual(len(optimizer.state_dict()['state'].values()), expected_tensors)
opt_param_values_after = get_optimizer_param_values(optimizer)
for before, after in zip(opt_param_values, opt_param_values_after):
name, opt_param_value = before
name_after, opt_param_value_after = after
self.assertEqual(name, name_after)
self.assertEqual(type(opt_param_value),
type(opt_param_value_after))
if torch.is_tensor(opt_param_value):
self.assertTrue(
(opt_param_value == opt_param_value_after).all())
else:
self.assertEqual(opt_param_value, opt_param_value_after)
# TODO: investigate why this hangs on K80s
@unittest.skip
def test_broadcast_state_gpu(self):
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
# Set default tensor type, ensuring optimizer tensor-wrapping is robust
# to this setting.
try:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.test_broadcast_state()
finally:
torch.set_default_tensor_type(torch.FloatTensor)
def test_broadcast_state_options(self):
hvd.init()
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
params_0 = dict(lr=0.1, momentum=0.8, weight_decay=0.2, nesterov=True,
betas=(0.9, 0.999), etas=(0.8, 2.4), step_sizes=(1e-5, 100))
params_1 = dict(lr=0.2, momentum=0.9, weight_decay=0.1, nesterov=False,
betas=(0.8, 0.9), etas=(0.25, 1.75), step_sizes=(1e-7, 5))
def create_model(opt_class):
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
params = params_0 if hvd.rank() == 0 else params_1
p = {
k: v for k, v in params.items()
if k in inspect.getargspec(opt_class.__init__).args
}
opt = opt_class(model.parameters(), **p)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())
return model, opt
# Include subclass name so we can sort them lexicographically, otherwise different
# ranks will have different optimizer orderings
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
for _, opt_class in optimizers:
model, optimizer = create_model(opt_class)
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
p0 = {
k: v for k, v in params_0.items()
if k in inspect.getargspec(opt_class.__init__).args
}
for k, p in p0.items():
p_actual = optimizer.param_groups[0][k]
if not isinstance(p, Iterable):
p_actual = [p_actual]
p = [p]
for i in range(len(p)):
self.assertEqual(type(p_actual[i]), type(p[i]))
self.assertAlmostEqual(p_actual[i], p[i], delta=1e-5)
# Ensure that the parameter option types are compatible with ops
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_broadcast_state_no_grad(self):
class ModelNoGrad(nn.Module):
def __init__(self, a, b):
super(ModelNoGrad, self).__init__()
self.a = nn.Parameter(a.int(), requires_grad=False)
self.b = nn.Parameter(b)
def forward(self, x):
return torch.index_select(self.b, 0, self.a.long()) * x
hvd.init()
a = torch.Tensor([1, 3])
b = torch.rand(4)
model = ModelNoGrad(a, b)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, weight_decay=1e-6, momentum=0.9, nesterov=True)
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
grad = optimizer.param_groups[0]['params'][1].grad
bgrad = hvd.broadcast(grad, root_rank=0)
assert optimizer.param_groups[0]['params'][0].grad is None
assert torch.all(torch.eq(grad, bgrad)).item()
def test_broadcast_object(self):
hvd.init()
expected_obj = {
'hello': 123,
0: [1, 2]
}
obj = expected_obj if hvd.rank() == 0 else {}
obj = hvd.broadcast_object(obj, root_rank=0)
self.assertDictEqual(obj, expected_obj)
def test_allgather_object(self):
hvd.init()
d = {'metric_val_1': hvd.rank()}
if hvd.rank() == 1:
d['metric_val_2'] = 42
results = hvd.allgather_object(d)
expected = [{'metric_val_1': i} for i in range(hvd.size())]
if hvd.size() > 1:
expected[1] = {'metric_val_1': 1, 'metric_val_2': 42}
self.assertEqual(len(results), hvd.size())
self.assertListEqual(results, expected)
def test_compression_fp16(self):
valid_dtypes = [torch.float32, torch.float64]
invalid_dtypes = [torch.uint8, torch.int8, torch.int16,
torch.int32, torch.int64]
tensor_size = [5] * 3
compression = hvd.Compression.fp16
for dtype in valid_dtypes:
tensor = torch.ones(tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, torch.float16)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, dtype)
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - tensor_decompressed.data.numpy())
self.assertLess(err, 0.00000001)
for dtype in invalid_dtypes:
tensor = torch.ones(tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, dtype)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, dtype)
if dtype != torch.int8: # Cannot cast to NumPy with a CharTensor
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - tensor_decompressed.data.numpy())
self.assertLess(err, 0.00000001)
def test_force_allreduce(self):
"""Test that allreduce is forced on all gradients during opt.step()."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
def new_optimizer(cls, opt_params, model):
p = {
k: v for k, v in opt_params.items()
if k in inspect.getargspec(cls.__init__).args
}
return cls(model.parameters(), **p)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(D_in, H)
self.fc2 = torch.nn.Linear(H, D_out)
self.fc3 = torch.nn.Linear(D_out, D_out)
def forward(self, x_):
x_ = F.relu(self.fc1(x_))
x1_ = self.fc2(x_)
x2_ = self.fc3(F.relu(x1_))
return x1_, x2_
def create_model(opt_class, opt_params):
model = Net()
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
opt = new_optimizer(opt_class, opt_params, model)
opt = hvd.DistributedOptimizer(
opt, named_parameters=model.named_parameters())
return model, opt
# L-BFGS is currently unsupported, as are sparse tensors, which are
# required by SparseAdam optimizer
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
opt_params_list = [
dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),
dict(lr=0.2)
]
for (opt_name, opt_class), opt_params in itertools.product(optimizers, opt_params_list):
model, optimizer = create_model(opt_class, opt_params)
y_pred1, y_pred2 = model(x)
if rank == 0:
loss = F.mse_loss(y_pred1, y, size_average=False)
else:
loss = F.mse_loss(y_pred2, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_model_parallelism(self):
"""Test that tensors on different GPUs are supported."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Skip the test if there are not enough GPUs.
if torch.cuda.device_count() < hvd.local_size() * 2:
self.skipTest("Not enough GPUs available")
first_device = local_rank * 2
second_device = local_rank * 2 + 1
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# Place parts of model on different GPUs.
self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(first_device)
self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(second_device)
def forward(self, x):
x = x.cuda(first_device)
x = self.conv1(x)
x = x.cuda(second_device)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_delta_optimizer(self):
"""Test that delta optimizer."""
hvd.init()
# TODO support non-MPI Adasum operation
# Only do this test if there are GPUs available.
if not hvd.mpi_enabled() or not torch.cuda.is_available():
self.skipTest("No GPUs available")
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(local_rank)
self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(local_rank)
def forward(self, x):
x = x.cuda(local_rank)
x = self.conv1(x)
x = x.cuda(local_rank)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters(), op=hvd.Adasum)
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_duplicate_names(self):
"""Test that passing duplicate names to optimizer will fail."""
net1 = torch.nn.Conv2d(1, 1, 1)
net2 = torch.nn.Conv2d(1, 1, 1)
parameters = itertools.chain(net1.parameters(), net2.parameters())
opt = torch.optim.SGD(parameters, lr=0.1)
# This will have duplicate names, since both net1 and net2 have 'weight' and 'bias'
named_parameters = itertools.chain(net1.named_parameters(), net2.named_parameters())
try:
hvd.DistributedOptimizer(opt, named_parameters=named_parameters)
assert False, 'hvd.DistributedOptimizer did not throw error'
except ValueError:
pass
def test_dynamic_requires_grad(self):
"""Test that makes sure that gradients can be turned off/on dynamically."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
gen = torch.nn.Conv2d(1, 10, 1)
disc = torch.nn.Conv2d(10, 1, 1)
inp = torch.rand([1, 1, 100, 100])
gen_opt = torch.optim.SGD(gen.parameters(), lr=0.1)
gen_opt = hvd.DistributedOptimizer(gen_opt, named_parameters=gen.named_parameters())
disc_opt = torch.optim.SGD(disc.parameters(), lr=0.1)
disc_opt = hvd.DistributedOptimizer(disc_opt, named_parameters=disc.named_parameters())
def train_step(train_generator=False, train_discriminator=False):
for p in gen.parameters():
p.requires_grad_(train_generator)
for p in disc.parameters():
p.requires_grad_(train_discriminator)
gen_opt.zero_grad()
disc_opt.zero_grad()
loss = disc(gen(inp)).sum()
loss.backward()
for p in gen.parameters():
assert train_generator == p.grad.max().is_nonzero(), \
'Gradient for generator is zero but it should be trained or vice versa.'
for p in disc.parameters():
assert train_discriminator == p.grad.max().is_nonzero(), \
'Gradient for discriminator is zero but it should be trained or vice versa.'
if train_generator:
gen_opt.step()
if train_discriminator:
disc_opt.step()
for x in range(10):
# Step 1: train generator.
train_step(train_generator=True)
# Step 2: train discriminator.
train_step(train_discriminator=True)
def test_gradient_clipping(self):
"""Test gradient clipping example."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
x = torch.ones(1, 1).requires_grad_()
y = torch.ones(1, 1).requires_grad_()
model = torch.nn.Linear(1, 1)
model.weight = torch.nn.Parameter(torch.zeros(1, 1) + 0.5)
model.bias = torch.nn.Parameter(torch.zeros(1))
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
y_pred = model(x)
loss = F.mse_loss(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.synchronize()
prior_grad = model.weight.grad.item()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
clipped_grad = model.weight.grad.item()
assert abs(prior_grad) > abs(clipped_grad)
with optimizer.skip_synchronize():
optimizer.step()
def test_synchronize_step_warning(self):
"""
Test that .synchronize() followed by .step() without
optimizer.skip_synchronize() context will produce a warning.
"""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
x = torch.zeros(1, 1).requires_grad_()
y = torch.ones(1, 1).requires_grad_()
model = torch.nn.Linear(1, 1)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
y_pred = model(x)
loss = F.mse_loss(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.synchronize()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
with warnings.catch_warnings(record=True) as ws:
optimizer.step()
assert len(ws) == 1
assert 'optimizer.step() called without optimizer.skip_synchronize()' \
in str(ws[0].message)
def test_no_named_parameters(self):
"""Test that leaving the default named_parameters=None will not throw an error."""
hvd.init()
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1)
self.conv2 = torch.nn.Conv2d(100, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt)
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_missing_named_parameters(self):
"""Test that naming half of the model parameters will throw an error."""
hvd.init()
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1)
self.conv2 = torch.nn.Conv2d(100, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
model = Net()
opt = torch.optim.SGD(model.parameters(), lr=0.1)
try:
hvd.DistributedOptimizer(opt,
named_parameters=list(model.named_parameters())[0:1])
assert False, 'hvd.DistributedOptimizer did not throw error'
except ValueError:
pass
def test_horovod_join_allreduce(self):
"""Test Join op with allreduce."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
integral_types = [torch.IntTensor, torch.LongTensor, torch.cuda.IntTensor, torch.cuda.LongTensor]
dims = [1, 2, 3]
first_join_ranks = [0, 1]
cachings = [False, True]
for dtype, dim, first_join_rank, caching in itertools.product(dtypes, dims, first_join_ranks, cachings):
torch.manual_seed(1234)
def div(t, s):
if _1_5_api and dtype in integral_types:
return t.floor_divide(s)
return t / s
# Use two tensors to test fusion
tensor_a = torch.FloatTensor(*([5] * dim)).random_(-100, 100)
tensor_a = self.cast_and_place(tensor_a, dtype)
tensor_b = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor_b = self.cast_and_place(tensor_b, dtype)
if caching:
handle_a = hvd.allreduce_async(tensor_a, name="tensor_a", average=True)
handle_b = hvd.allreduce_async(tensor_b, name="tensor_b", average=True)
averaged_a = hvd.synchronize(handle_a)
averaged_b = hvd.synchronize(handle_b)
if rank == first_join_rank:
if dtype.is_cuda:
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
else:
handle_a = hvd.allreduce_async(tensor_a, name="tensor_a", average=True)
handle_b = hvd.allreduce_async(tensor_b, name="tensor_b", average=True)
averaged_a = hvd.synchronize(handle_a)
averaged_b = hvd.synchronize(handle_b)
if dtype.is_cuda:
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in integral_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(averaged_a, div(tensor_a * (size - 1), size), threshold), \
'hvd.join with hvd.allreduce produces incorrect results'
assert torch.allclose(averaged_b, div(tensor_b * (size - 1), size), threshold), \
'hvd.join with hvd.allreduce produces incorrect results'
def test_horovod_join_allgather(self):
"""Test Join op with allgather."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
if rank == 0:
if torch.cuda.is_available():
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
else:
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
ret = hvd.join(hvd.local_rank())
def test_horovod_join_broadcast(self):
"""Test Join op with broadcast."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
if rank == 0:
ret = hvd.join(hvd.local_rank())
else:
try:
broadcasted_tensor = hvd.broadcast(tensor, 1, name="test_horovod_join_broadcast")
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
if torch.cuda.is_available():
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
def test_horovod_sync_batch_norm(self):
"""Tests Horovod version of SyncBatchNorm."""
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
ts_list = [
torch.stack([
torch.tensor([
[r, r + 1],
[r * 2, r * 2 + 1],
[r * 3, r * 3 + 1],
[r * 4, r * 4 + 1]
])
for r in range(hvd.size())
]),
torch.stack([
torch.tensor([
[r + 1],
[r * 2 + 1],
[r * 3 + 1],
[r * 4 + 1]
])
for r in range(hvd.size())
]),
]
for ts in ts_list:
sync_bn = hvd.SyncBatchNorm(num_features=4)
sync_bn.cuda(hvd.local_rank())
bn = torch.nn.BatchNorm1d(num_features=4)
bn.cuda(hvd.local_rank())
ts = ts.cuda(hvd.local_rank()).float()
ts1 = ts.clone().requires_grad_()
ts2 = ts.clone().requires_grad_()
# Training
sync_bn_out = sync_bn(ts1[hvd.rank()].unsqueeze(0))
bn_out = bn(ts2)
assert torch.allclose(sync_bn_out, bn_out[hvd.rank()].unsqueeze(0), 1e-6)
assert torch.allclose(sync_bn.running_mean, bn.running_mean, 1e-6)
assert torch.allclose(sync_bn.running_var, bn.running_var, 1e-6)
# Gradients
sync_bn_out.sum().backward()
bn_out.mean(dim=0).sum().backward()
assert torch.allclose(hvd.allreduce(sync_bn.weight.grad, name='sync_bn.weight.grad'), bn.weight.grad, 1e-6)
assert torch.allclose(hvd.allreduce(sync_bn.bias.grad, name='sync_bn.bias.grad'), bn.bias.grad, 1e-6)
assert torch.allclose(hvd.allreduce(ts1.grad, name='ts1.grad'), ts2.grad, 1e-6)
@pytest.mark.skip(reason='https://github.com/horovod/horovod/issues/2496')
def test_timeline_api(self):
hvd.init()
def check_file(fname, check_cycle=True):
if hvd.rank() == 0:
with open(fname, 'r') as timeline_file:
timeline_text = timeline_file.read()
assert 'allreduce.test_allreduce' in timeline_text, timeline_text
assert 'start_time_since_epoch_in_micros' in timeline_text, timeline_text
assert 'NEGOTIATE_ALLREDUCE' in timeline_text, timeline_text
assert 'ALLREDUCE' in timeline_text, timeline_text
json_obj = json.loads(timeline_text)
assert json_obj is not None
if check_cycle:
assert 'CYCLE_START' in timeline_text, timeline_text
with temppath() as fname1:
hvd.start_timeline(fname1, mark_cycles=True)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that mark_cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname1)
# Test resuming with a different filename.
with temppath() as fname2:
hvd.start_timeline(fname2, mark_cycles=True)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname2)
# Test resuming with a different filename, but mark_cycles=False
with temppath() as fname3:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname3, mark_cycles=False)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that events can be registered in timeline file.
hvd.stop_timeline()
check_file(fname3, check_cycle=False)
# Test resuming with a different filename, but mark_cycles=True
with temppath() as fname4:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname4, mark_cycles=True)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname4, check_cycle=True)
with temppath() as fname5:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname5, mark_cycles=False)
hvd.start_timeline(fname5, mark_cycles=False)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy()
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy()
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname5, check_cycle=False)
hvd.shutdown()
if __name__ == "__main__":
unittest.main()
| [
"torch.nn.Linear",
"torch.cat",
"torch.ones",
"torch.nn.Parameter",
"torch.cuda.is_available",
"torch.cuda.FloatTensor",
"torch.load",
"torch.allclose",
"torch.IntTensor",
"torch.is_tensor",
"torch.FloatTensor",
"torch.manual_seed",
"torch.optim.Optimizer.__subclasses__",
"torch.tensor",
"torch.nn.functional.relu",
"torch.empty",
"torch.Tensor",
"torch.zeros",
"torch.set_default_tensor_type",
"torch.optim.SGD",
"torch.save",
"torch.cuda.device_count",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.rand",
"torch.eq",
"torch.nn.functional.mse_loss",
"torch.nn.BatchNorm1d",
"torch.randn"
] | 1.4.0 | DEKHTIARJonathan/horovod | 333ce607c5ed0c5a38defd234f818aeb27a5394b |
1.4 | from timeit import timeit
import numpy as np
import torch
from tianshou.data import Batch, ReplayBuffer, to_numpy
from tianshou.policy import BasePolicy
def compute_episodic_return_base(batch, gamma):
returns = np.zeros_like(batch.rew)
last = 0
for i in reversed(range(len(batch.rew))):
returns[i] = batch.rew[i]
if not batch.done[i]:
returns[i] += last * gamma
last = returns[i]
batch.returns = returns
return batch
def test_episodic_returns(size=2560):
fn = BasePolicy.compute_episodic_return
buf = ReplayBuffer(20)
batch = Batch(
done=np.array([1, 0, 0, 1, 0, 1, 0, 1.]),
rew=np.array([0, 1, 2, 3, 4, 5, 6, 7.]),
info=Batch(
{
'TimeLimit.truncated':
np.array([False, False, False, False, False, True, False, False])
}
)
)
for b in batch:
b.obs = b.act = 1
buf.add(b)
returns, _ = fn(batch, buf, buf.sample_indices(0), gamma=.1, gae_lambda=1)
ans = np.array([0, 1.23, 2.3, 3, 4.5, 5, 6.7, 7])
assert np.allclose(returns, ans)
buf.reset()
batch = Batch(
done=np.array([0, 1, 0, 1, 0, 1, 0.]),
rew=np.array([7, 6, 1, 2, 3, 4, 5.]),
)
for b in batch:
b.obs = b.act = 1
buf.add(b)
returns, _ = fn(batch, buf, buf.sample_indices(0), gamma=.1, gae_lambda=1)
ans = np.array([7.6, 6, 1.2, 2, 3.4, 4, 5])
assert np.allclose(returns, ans)
buf.reset()
batch = Batch(
done=np.array([0, 1, 0, 1, 0, 0, 1.]),
rew=np.array([7, 6, 1, 2, 3, 4, 5.]),
)
for b in batch:
b.obs = b.act = 1
buf.add(b)
returns, _ = fn(batch, buf, buf.sample_indices(0), gamma=.1, gae_lambda=1)
ans = np.array([7.6, 6, 1.2, 2, 3.45, 4.5, 5])
assert np.allclose(returns, ans)
buf.reset()
batch = Batch(
done=np.array([0, 0, 0, 1., 0, 0, 0, 1, 0, 0, 0, 1]),
rew=np.array([101, 102, 103., 200, 104, 105, 106, 201, 107, 108, 109, 202]),
)
for b in batch:
b.obs = b.act = 1
buf.add(b)
v = np.array([2., 3., 4, -1, 5., 6., 7, -2, 8., 9., 10, -3])
returns, _ = fn(batch, buf, buf.sample_indices(0), v, gamma=0.99, gae_lambda=0.95)
ground_truth = np.array(
[
454.8344, 376.1143, 291.298, 200., 464.5610, 383.1085, 295.387, 201.,
474.2876, 390.1027, 299.476, 202.
]
)
assert np.allclose(returns, ground_truth)
buf.reset()
batch = Batch(
done=np.array([0, 0, 0, 1., 0, 0, 0, 1, 0, 0, 0, 1]),
rew=np.array([101, 102, 103., 200, 104, 105, 106, 201, 107, 108, 109, 202]),
info=Batch(
{
'TimeLimit.truncated':
np.array(
[
False, False, False, True, False, False, False, True, False,
False, False, False
]
)
}
)
)
for b in batch:
b.obs = b.act = 1
buf.add(b)
v = np.array([2., 3., 4, -1, 5., 6., 7, -2, 8., 9., 10, -3])
returns, _ = fn(batch, buf, buf.sample_indices(0), v, gamma=0.99, gae_lambda=0.95)
ground_truth = np.array(
[
454.0109, 375.2386, 290.3669, 199.01, 462.9138, 381.3571, 293.5248, 199.02,
474.2876, 390.1027, 299.476, 202.
]
)
assert np.allclose(returns, ground_truth)
if __name__ == '__main__':
buf = ReplayBuffer(size)
batch = Batch(
done=np.random.randint(100, size=size) == 0,
rew=np.random.random(size),
)
for b in batch:
b.obs = b.act = 1
buf.add(b)
indices = buf.sample_indices(0)
def vanilla():
return compute_episodic_return_base(batch, gamma=.1)
def optimized():
return fn(batch, buf, indices, gamma=.1, gae_lambda=1.0)
cnt = 3000
print('GAE vanilla', timeit(vanilla, setup=vanilla, number=cnt))
print('GAE optim ', timeit(optimized, setup=optimized, number=cnt))
def target_q_fn(buffer, indices):
# return the next reward
indices = buffer.next(indices)
return torch.tensor(-buffer.rew[indices], dtype=torch.float32)
def target_q_fn_multidim(buffer, indices):
return target_q_fn(buffer, indices).unsqueeze(1).repeat(1, 51)
def compute_nstep_return_base(nstep, gamma, buffer, indices):
returns = np.zeros_like(indices, dtype=float)
buf_len = len(buffer)
for i in range(len(indices)):
flag, rew = False, 0.
real_step_n = nstep
for n in range(nstep):
idx = (indices[i] + n) % buf_len
rew += buffer.rew[idx] * gamma**n
if buffer.done[idx]:
if not (
hasattr(buffer, 'info') and buffer.info['TimeLimit.truncated'][idx]
):
flag = True
real_step_n = n + 1
break
if not flag:
idx = (indices[i] + real_step_n - 1) % buf_len
rew += to_numpy(target_q_fn(buffer, idx)) * gamma**real_step_n
returns[i] = rew
return returns
def test_nstep_returns(size=10000):
buf = ReplayBuffer(10)
for i in range(12):
buf.add(Batch(obs=0, act=0, rew=i + 1, done=i % 4 == 3))
batch, indices = buf.sample(0)
assert np.allclose(indices, [2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
# rew: [11, 12, 3, 4, 5, 6, 7, 8, 9, 10]
# done: [ 0, 1, 0, 1, 0, 0, 0, 1, 0, 0]
# test nstep = 1
returns = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn, gamma=.1, n_step=1
).pop('returns').reshape(-1)
)
assert np.allclose(returns, [2.6, 4, 4.4, 5.3, 6.2, 8, 8, 8.9, 9.8, 12])
r_ = compute_nstep_return_base(1, .1, buf, indices)
assert np.allclose(returns, r_), (r_, returns)
returns_multidim = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn_multidim, gamma=.1, n_step=1
).pop('returns')
)
assert np.allclose(returns_multidim, returns[:, np.newaxis])
# test nstep = 2
returns = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn, gamma=.1, n_step=2
).pop('returns').reshape(-1)
)
assert np.allclose(returns, [3.4, 4, 5.53, 6.62, 7.8, 8, 9.89, 10.98, 12.2, 12])
r_ = compute_nstep_return_base(2, .1, buf, indices)
assert np.allclose(returns, r_)
returns_multidim = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn_multidim, gamma=.1, n_step=2
).pop('returns')
)
assert np.allclose(returns_multidim, returns[:, np.newaxis])
# test nstep = 10
returns = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn, gamma=.1, n_step=10
).pop('returns').reshape(-1)
)
assert np.allclose(returns, [3.4, 4, 5.678, 6.78, 7.8, 8, 10.122, 11.22, 12.2, 12])
r_ = compute_nstep_return_base(10, .1, buf, indices)
assert np.allclose(returns, r_)
returns_multidim = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn_multidim, gamma=.1, n_step=10
).pop('returns')
)
assert np.allclose(returns_multidim, returns[:, np.newaxis])
def test_nstep_returns_with_timelimit(size=10000):
buf = ReplayBuffer(10)
for i in range(12):
buf.add(
Batch(
obs=0,
act=0,
rew=i + 1,
done=i % 4 == 3,
info={"TimeLimit.truncated": i == 3}
)
)
batch, indices = buf.sample(0)
assert np.allclose(indices, [2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
# rew: [11, 12, 3, 4, 5, 6, 7, 8, 9, 10]
# done: [ 0, 1, 0, 1, 0, 0, 0, 1, 0, 0]
# test nstep = 1
returns = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn, gamma=.1, n_step=1
).pop('returns').reshape(-1)
)
assert np.allclose(returns, [2.6, 3.6, 4.4, 5.3, 6.2, 8, 8, 8.9, 9.8, 12])
r_ = compute_nstep_return_base(1, .1, buf, indices)
assert np.allclose(returns, r_), (r_, returns)
returns_multidim = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn_multidim, gamma=.1, n_step=1
).pop('returns')
)
assert np.allclose(returns_multidim, returns[:, np.newaxis])
# test nstep = 2
returns = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn, gamma=.1, n_step=2
).pop('returns').reshape(-1)
)
assert np.allclose(returns, [3.36, 3.6, 5.53, 6.62, 7.8, 8, 9.89, 10.98, 12.2, 12])
r_ = compute_nstep_return_base(2, .1, buf, indices)
assert np.allclose(returns, r_)
returns_multidim = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn_multidim, gamma=.1, n_step=2
).pop('returns')
)
assert np.allclose(returns_multidim, returns[:, np.newaxis])
# test nstep = 10
returns = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn, gamma=.1, n_step=10
).pop('returns').reshape(-1)
)
assert np.allclose(
returns, [3.36, 3.6, 5.678, 6.78, 7.8, 8, 10.122, 11.22, 12.2, 12]
)
r_ = compute_nstep_return_base(10, .1, buf, indices)
assert np.allclose(returns, r_)
returns_multidim = to_numpy(
BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn_multidim, gamma=.1, n_step=10
).pop('returns')
)
assert np.allclose(returns_multidim, returns[:, np.newaxis])
if __name__ == '__main__':
buf = ReplayBuffer(size)
for i in range(int(size * 1.5)):
buf.add(
Batch(
obs=0,
act=0,
rew=i + 1,
done=np.random.randint(3) == 0,
info={"TimeLimit.truncated": i % 33 == 0}
)
)
batch, indices = buf.sample(256)
def vanilla():
return compute_nstep_return_base(3, .1, buf, indices)
def optimized():
return BasePolicy.compute_nstep_return(
batch, buf, indices, target_q_fn, gamma=.1, n_step=3
)
cnt = 3000
print('nstep vanilla', timeit(vanilla, setup=vanilla, number=cnt))
print('nstep optim ', timeit(optimized, setup=optimized, number=cnt))
if __name__ == '__main__':
test_nstep_returns()
test_nstep_returns_with_timelimit()
test_episodic_returns()
| [
"torch.tensor"
] | 1.4.0 | ClarenceYC/tianshou | 39f8391cfb4f219a267c1040e2d463be91c645b0 |
1.4 | from copy import deepcopy
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from torch.distributions import Independent, Normal
from tianshou.data import Batch, ReplayBuffer, to_torch_as
from tianshou.exploration import BaseNoise
from tianshou.policy import DDPGPolicy
class SACPolicy(DDPGPolicy):
"""Implementation of Soft Actor-Critic. arXiv:1812.05905.
:param torch.nn.Module actor: the actor network following the rules in
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
:param torch.optim.Optimizer actor_optim: the optimizer for actor network.
:param torch.nn.Module critic1: the first critic network. (s, a -> Q(s, a))
:param torch.optim.Optimizer critic1_optim: the optimizer for the first
critic network.
:param torch.nn.Module critic2: the second critic network. (s, a -> Q(s, a))
:param torch.optim.Optimizer critic2_optim: the optimizer for the second
critic network.
:param float tau: param for soft update of the target network. Default to 0.005.
:param float gamma: discount factor, in [0, 1]. Default to 0.99.
:param (float, torch.Tensor, torch.optim.Optimizer) or float alpha: entropy
regularization coefficient. Default to 0.2.
If a tuple (target_entropy, log_alpha, alpha_optim) is provided, then
alpha is automatically tuned.
:param bool reward_normalization: normalize the reward to Normal(0, 1).
Default to False.
:param BaseNoise exploration_noise: add a noise to action for exploration.
Default to None. This is useful when solving hard-exploration problem.
:param bool deterministic_eval: whether to use deterministic action (mean
of Gaussian policy) instead of stochastic action sampled by the policy.
Default to True.
:param bool action_scaling: whether to map actions from range [-1, 1] to range
[action_spaces.low, action_spaces.high]. Default to True.
:param str action_bound_method: method to bound action to range [-1, 1], can be
either "clip" (for simply clipping the action) or empty string for no bounding.
Default to "clip".
:param Optional[gym.Space] action_space: env's action space, mandatory if you want
to use option "action_scaling" or "action_bound_method". Default to None.
.. seealso::
Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed
explanation.
"""
def __init__(
self,
actor: torch.nn.Module,
actor_optim: torch.optim.Optimizer,
critic1: torch.nn.Module,
critic1_optim: torch.optim.Optimizer,
critic2: torch.nn.Module,
critic2_optim: torch.optim.Optimizer,
tau: float = 0.005,
gamma: float = 0.99,
alpha: Union[float, Tuple[float, torch.Tensor, torch.optim.Optimizer]] = 0.2,
reward_normalization: bool = False,
estimation_step: int = 1,
exploration_noise: Optional[BaseNoise] = None,
deterministic_eval: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
None, None, None, None, tau, gamma, exploration_noise,
reward_normalization, estimation_step, **kwargs
)
self.actor, self.actor_optim = actor, actor_optim
self.critic1, self.critic1_old = critic1, deepcopy(critic1)
self.critic1_old.eval()
self.critic1_optim = critic1_optim
self.critic2, self.critic2_old = critic2, deepcopy(critic2)
self.critic2_old.eval()
self.critic2_optim = critic2_optim
self._is_auto_alpha = False
self._alpha: Union[float, torch.Tensor]
if isinstance(alpha, tuple):
self._is_auto_alpha = True
self._target_entropy, self._log_alpha, self._alpha_optim = alpha
assert alpha[1].shape == torch.Size([1]) and alpha[1].requires_grad
self._alpha = self._log_alpha.detach().exp()
else:
self._alpha = alpha
self._deterministic_eval = deterministic_eval
self.__eps = np.finfo(np.float32).eps.item()
def train(self, mode: bool = True) -> "SACPolicy":
self.training = mode
self.actor.train(mode)
self.critic1.train(mode)
self.critic2.train(mode)
return self
def sync_weight(self) -> None:
self.soft_update(self.critic1_old, self.critic1, self.tau)
self.soft_update(self.critic2_old, self.critic2, self.tau)
def forward( # type: ignore
self,
batch: Batch,
state: Optional[Union[dict, Batch, np.ndarray]] = None,
input: str = "obs",
**kwargs: Any,
) -> Batch:
obs = batch[input]
logits, hidden = self.actor(obs, state=state, info=batch.info)
assert isinstance(logits, tuple)
dist = Independent(Normal(*logits), 1)
if self._deterministic_eval and not self.training:
act = logits[0]
else:
act = dist.rsample()
log_prob = dist.log_prob(act).unsqueeze(-1)
# apply correction for Tanh squashing when computing logprob from Gaussian
# You can check out the original SAC paper (arXiv 1801.01290): Eq 21.
# in appendix C to get some understanding of this equation.
if self.action_scaling and self.action_space is not None:
action_scale = to_torch_as(
(self.action_space.high - self.action_space.low) / 2.0, act
)
else:
action_scale = 1.0 # type: ignore
squashed_action = torch.tanh(act)
log_prob = log_prob - torch.log(
action_scale * (1 - squashed_action.pow(2)) + self.__eps
).sum(-1, keepdim=True)
return Batch(
logits=logits,
act=squashed_action,
state=hidden,
dist=dist,
log_prob=log_prob
)
def _target_q(self, buffer: ReplayBuffer, indices: np.ndarray) -> torch.Tensor:
batch = buffer[indices] # batch.obs: s_{t+n}
obs_next_result = self(batch, input="obs_next")
act_ = obs_next_result.act
target_q = torch.min(
self.critic1_old(batch.obs_next, act_),
self.critic2_old(batch.obs_next, act_),
) - self._alpha * obs_next_result.log_prob
return target_q
def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, float]:
# critic 1&2
td1, critic1_loss = self._mse_optimizer(
batch, self.critic1, self.critic1_optim
)
td2, critic2_loss = self._mse_optimizer(
batch, self.critic2, self.critic2_optim
)
batch.weight = (td1 + td2) / 2.0 # prio-buffer
# actor
obs_result = self(batch)
act = obs_result.act
current_q1a = self.critic1(batch.obs, act).flatten()
current_q2a = self.critic2(batch.obs, act).flatten()
actor_loss = (
self._alpha * obs_result.log_prob.flatten() -
torch.min(current_q1a, current_q2a)
).mean()
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
if self._is_auto_alpha:
log_prob = obs_result.log_prob.detach() + self._target_entropy
# please take a look at issue #258 if you'd like to change this line
alpha_loss = -(self._log_alpha * log_prob).mean()
self._alpha_optim.zero_grad()
alpha_loss.backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.detach().exp()
self.sync_weight()
result = {
"loss/actor": actor_loss.item(),
"loss/critic1": critic1_loss.item(),
"loss/critic2": critic2_loss.item(),
}
if self._is_auto_alpha:
result["loss/alpha"] = alpha_loss.item()
result["alpha"] = self._alpha.item() # type: ignore
return result
| [
"torch.Size",
"torch.min",
"torch.distributions.Normal",
"torch.tanh"
] | 1.4.0 | ClarenceYC/tianshou | 39f8391cfb4f219a267c1040e2d463be91c645b0 |
1.9 | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)
# Paper: https://arxiv.org/abs/1802.09477
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, device, noise_percent=0.):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
self.device=device
self.noise_percent = noise_percent
self.noise_flag = False
print('noise percent', self.noise_percent)
def add_noise(self, x):
if self.noise_flag:
noise = self.noise_percent * x * torch.randn(x.shape, device=self.device)
return x + noise
else:
return x
def forward(self, state, action):
sa = torch.cat([state, action], len(action.shape)-1)
q1 = F.relu(self.l1(sa))
q1 = self.add_noise(q1)
q1 = F.relu(self.l2(q1))
q1 = self.add_noise(q1)
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = self.add_noise(q2)
q2 = F.relu(self.l5(q2))
q2 = self.add_noise(q2)
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], len(action.shape)-1)
q1 = F.relu(self.l1(sa))
q1 = self.add_noise(q1)
q1 = F.relu(self.l2(q1))
q1 = self.add_noise(q1)
q1 = self.l3(q1)
return q1
class GRAC(object):
def __init__(
self,
env,
state_dim,
action_dim,
max_action,
batch_size=256,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
n_repeat=1,
no_critic_cem=False,
device='cuda:0',
model_noise=0,
alpha_start=0,
alpha_end=0,
):
self.model_noise = model_noise
print('model noise', self.model_noise)
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim, device=device, noise_percent=model_noise).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.total_it = 0
self.device = device
self.log_freq = 200
self.loss_rep = n_repeat
self.loss_rep = 1
self.policy_freq = 2
print('loss rep', self.loss_rep)
def select_action(self, state, writer=None, test=False):
state = torch.FloatTensor(state.reshape(1, -1)).to(self.device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=100, writer=None, reward_range=20.0):
self.total_it += 1
log_it = (self.total_it % self.log_freq == 0)
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
self.critic.noise_flag = False
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q_final = reward + not_done * self.discount * target_Q
# Get current Q estimates
for _ in range(self.loss_rep):
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q_final) + F.mse_loss(current_Q2, target_Q_final)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
self.critic.noise_flag = True
# Compute actor loss
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
if log_it:
with torch.no_grad():
writer.add_scalar('train_critic/critic_loss', critic_loss, self.total_it)
target_current_Q1_diff = target_Q1 - current_Q1
writer.add_scalar('q_diff/target_current_Q1_diff_max', target_current_Q1_diff.max(), self.total_it)
writer.add_scalar('q_diff/target_current_Q1_diff_min', target_current_Q1_diff.min(), self.total_it)
writer.add_scalar('q_diff/target_current_Q1_diff_mean', target_current_Q1_diff.mean(), self.total_it)
writer.add_scalar('q_diff/target_current_Q1_diff_abs_mean', target_current_Q1_diff.abs().mean(), self.total_it)
target_current_Q2_diff = target_Q2 - current_Q2
writer.add_scalar('q_diff/target_current_Q2_diff_max', target_current_Q2_diff.max(), self.total_it)
writer.add_scalar('q_diff/target_current_Q2_diff_min', target_current_Q2_diff.min(), self.total_it)
writer.add_scalar('q_diff/target_current_Q2_diff_mean', target_current_Q2_diff.mean(), self.total_it)
writer.add_scalar('q_diff/target_current_Q2_diff_abs_mean', target_current_Q2_diff.abs().mean(), self.total_it)
target_Q1_Q2_diff = target_Q1 - target_Q2
writer.add_scalar('q_diff/target_Q1_Q2_diff_max', target_Q1_Q2_diff.max(), self.total_it)
writer.add_scalar('q_diff/target_Q1_Q2_diff_min', target_Q1_Q2_diff.min(), self.total_it)
writer.add_scalar('q_diff/target_Q1_Q2_diff_mean', target_Q1_Q2_diff.mean(), self.total_it)
writer.add_scalar('q_diff/target_Q1_Q2_diff_abs_mean', target_Q1_Q2_diff.abs().mean(), self.total_it)
current_Q1_Q2_diff = current_Q1 - current_Q2
writer.add_scalar('q_diff/current_Q1_Q2_diff_max', current_Q1_Q2_diff.max(), self.total_it)
writer.add_scalar('q_diff/current_Q1_Q2_diff_min', current_Q1_Q2_diff.min(), self.total_it)
writer.add_scalar('q_diff/current_Q1_Q2_diff_mean', current_Q1_Q2_diff.mean(), self.total_it)
writer.add_scalar('q_diff/current_Q1_Q2_diff_abs_mean', current_Q1_Q2_diff.abs().mean(), self.total_it)
loss1_diff = target_Q_final - current_Q1
writer.add_scalar('losses/loss1_diff_max', loss1_diff.max(), self.total_it)
writer.add_scalar('losses/loss1_diff_min', loss1_diff.min(), self.total_it)
writer.add_scalar('losses/loss1_diff_mean', loss1_diff.mean(), self.total_it)
writer.add_scalar('losses/loss1_diff_abs_mean', loss1_diff.abs().mean(), self.total_it)
loss2_diff = target_Q_final - current_Q2
writer.add_scalar('losses/loss2_diff_max', loss2_diff.max(), self.total_it)
writer.add_scalar('losses/loss2_diff_min', loss2_diff.min(), self.total_it)
writer.add_scalar('losses/loss2_diff_mean', loss2_diff.mean(), self.total_it)
writer.add_scalar('losses/loss2_diff_abs_mean', loss2_diff.abs().mean(), self.total_it)
#target_Q
writer.add_scalar('train_critic/target_Q/mean', torch.mean(target_Q), self.total_it)
writer.add_scalar('train_critic/target_Q/max', target_Q.max(), self.total_it)
writer.add_scalar('train_critic/target_Q/min', target_Q.min(), self.total_it)
writer.add_scalar('train_critic/target_Q/std', torch.std(target_Q), self.total_it)
#target_Q1
writer.add_scalar('train_critic/target_Q1/mean', torch.mean(target_Q1), self.total_it)
writer.add_scalar('train_critic/target_Q1/max', target_Q1.max(), self.total_it)
writer.add_scalar('train_critic/target_Q1/min', target_Q1.min(), self.total_it)
writer.add_scalar('train_critic/target_Q1/std', torch.std(target_Q1), self.total_it)
#target_Q2
writer.add_scalar('train_critic/target_Q2/mean', torch.mean(target_Q2), self.total_it)
#current_Q1
writer.add_scalar('train_critic/current_Q1/mean', current_Q1.mean(), self.total_it)
writer.add_scalar('train_critic/current_Q1/std', torch.std(current_Q1), self.total_it)
writer.add_scalar('train_critic/current_Q1/max', current_Q1.max(), self.total_it)
writer.add_scalar('train_critic/current_Q1/min', current_Q1.min(), self.total_it)
# current_Q2
writer.add_scalar('train_critic/current_Q2/mean', current_Q2.mean(), self.total_it)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.actor_target = copy.deepcopy(self.actor) | [
"torch.nn.Linear",
"torch.min",
"torch.no_grad",
"torch.std",
"torch.nn.functional.mse_loss",
"torch.randn_like",
"torch.load",
"torch.mean",
"torch.randn"
] | 1.9.0 | yifan-you-37/GRAC-1 | 22b2cde651ae4416475d9594b93ad1c430090144 |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Sequence, Union
import torch
import torch.nn as nn
from monai.networks import to_norm_affine
from monai.utils import GridSampleMode, GridSamplePadMode, ensure_tuple, optional_import
_C, _ = optional_import("monai._C")
__all__ = ["AffineTransform", "grid_pull", "grid_push", "grid_count", "grid_grad"]
class _GridPull(torch.autograd.Function):
@staticmethod
def forward(ctx, input, grid, interpolation, bound, extrapolate):
opt = (bound, interpolation, extrapolate)
output = _C.grid_pull(input, grid, *opt)
if input.requires_grad or grid.requires_grad:
ctx.opt = opt
ctx.save_for_backward(input, grid)
return output
@staticmethod
def backward(ctx, grad):
if not (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]):
return None, None, None, None, None
var = ctx.saved_tensors
opt = ctx.opt
grads = _C.grid_pull_backward(grad, *var, *opt)
if ctx.needs_input_grad[0]:
return grads[0], grads[1] if ctx.needs_input_grad[1] else None, None, None, None
if ctx.needs_input_grad[1]:
return None, grads[0], None, None, None
def grid_pull(input: torch.Tensor, grid: torch.Tensor, interpolation="linear", bound="zero", extrapolate: bool = True):
"""
Sample an image with respect to a deformation field.
`interpolation` can be an int, a string or an InterpolationType.
Possible values are::
- 0 or 'nearest' or InterpolationType.nearest
- 1 or 'linear' or InterpolationType.linear
- 2 or 'quadratic' or InterpolationType.quadratic
- 3 or 'cubic' or InterpolationType.cubic
- 4 or 'fourth' or InterpolationType.fourth
- 5 or 'fifth' or InterpolationType.fifth
- 6 or 'sixth' or InterpolationType.sixth
- 7 or 'seventh' or InterpolationType.seventh
A list of values can be provided, in the order [W, H, D],
to specify dimension-specific interpolation orders.
`bound` can be an int, a string or a BoundType.
Possible values are::
- 0 or 'replicate' or 'nearest' or BoundType.replicate
- 1 or 'dct1' or 'mirror' or BoundType.dct1
- 2 or 'dct2' or 'reflect' or BoundType.dct2
- 3 or 'dst1' or 'antimirror' or BoundType.dst1
- 4 or 'dst2' or 'antireflect' or BoundType.dst2
- 5 or 'dft' or 'wrap' or BoundType.dft
- 7 or 'zero' or BoundType.zero
A list of values can be provided, in the order [W, H, D],
to specify dimension-specific boundary conditions.
`sliding` is a specific condition than only applies to flow fields
(with as many channels as dimensions). It cannot be dimension-specific.
Note that:
- `dft` corresponds to circular padding
- `dct2` corresponds to Neumann boundary conditions (symmetric)
- `dst2` corresponds to Dirichlet boundary conditions (antisymmetric)
See Also:
- https://en.wikipedia.org/wiki/Discrete_cosine_transform
- https://en.wikipedia.org/wiki/Discrete_sine_transform
- ``help(monai._C.BoundType)``
- ``help(monai._C.InterpolationType)``
Args:
input: Input image. `(B, C, Wi, Hi, Di)`.
grid: Deformation field. `(B, Wo, Ho, Do, 1|2|3)`.
interpolation (int or list[int] , optional): Interpolation order.
Defaults to `'linear'`.
bound (BoundType, or list[BoundType], optional): Boundary conditions.
Defaults to `'zero'`.
extrapolate: Extrapolate out-of-bound data.
Defaults to `True`.
Returns:
output (torch.Tensor): Deformed image `(B, C, Wo, Ho, Do)`.
"""
# Convert parameters
bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in ensure_tuple(bound)]
interpolation = [
_C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i)
for i in ensure_tuple(interpolation)
]
return _GridPull.apply(input, grid, interpolation, bound, extrapolate)
class _GridPush(torch.autograd.Function):
@staticmethod
def forward(ctx, input, grid, shape, interpolation, bound, extrapolate):
opt = (bound, interpolation, extrapolate)
output = _C.grid_push(input, grid, shape, *opt)
if input.requires_grad or grid.requires_grad:
ctx.opt = opt
ctx.save_for_backward(input, grid)
return output
@staticmethod
def backward(ctx, grad):
if not (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]):
return None, None, None, None, None, None
var = ctx.saved_tensors
opt = ctx.opt
grads = _C.grid_push_backward(grad, *var, *opt)
if ctx.needs_input_grad[0]:
return grads[0], grads[1] if ctx.needs_input_grad[1] else None, None, None, None, None
if ctx.needs_input_grad[1]:
return None, grads[0], None, None, None, None
def grid_push(
input: torch.Tensor, grid: torch.Tensor, shape=None, interpolation="linear", bound="zero", extrapolate: bool = True
):
"""
Splat an image with respect to a deformation field (pull adjoint).
`interpolation` can be an int, a string or an InterpolationType.
Possible values are::
- 0 or 'nearest' or InterpolationType.nearest
- 1 or 'linear' or InterpolationType.linear
- 2 or 'quadratic' or InterpolationType.quadratic
- 3 or 'cubic' or InterpolationType.cubic
- 4 or 'fourth' or InterpolationType.fourth
- 5 or 'fifth' or InterpolationType.fifth
- 6 or 'sixth' or InterpolationType.sixth
- 7 or 'seventh' or InterpolationType.seventh
A list of values can be provided, in the order `[W, H, D]`,
to specify dimension-specific interpolation orders.
`bound` can be an int, a string or a BoundType.
Possible values are::
- 0 or 'replicate' or 'nearest' or BoundType.replicate
- 1 or 'dct1' or 'mirror' or BoundType.dct1
- 2 or 'dct2' or 'reflect' or BoundType.dct2
- 3 or 'dst1' or 'antimirror' or BoundType.dst1
- 4 or 'dst2' or 'antireflect' or BoundType.dst2
- 5 or 'dft' or 'wrap' or BoundType.dft
- 7 or 'zero' or BoundType.zero
A list of values can be provided, in the order `[W, H, D]`,
to specify dimension-specific boundary conditions.
`sliding` is a specific condition than only applies to flow fields
(with as many channels as dimensions). It cannot be dimension-specific.
Note that:
- `dft` corresponds to circular padding
- `dct2` corresponds to Neumann boundary conditions (symmetric)
- `dst2` corresponds to Dirichlet boundary conditions (antisymmetric)
See Also:
- https://en.wikipedia.org/wiki/Discrete_cosine_transform
- https://en.wikipedia.org/wiki/Discrete_sine_transform
- ``help(monai._C.BoundType)``
- ``help(monai._C.InterpolationType)``
Args:
input: Input image `(B, C, Wi, Hi, Di)`.
grid: Deformation field `(B, Wi, Hi, Di, 1|2|3)`.
shape: Shape of the source image.
interpolation (int or list[int] , optional): Interpolation order.
Defaults to `'linear'`.
bound (BoundType, or list[BoundType], optional): Boundary conditions.
Defaults to `'zero'`.
extrapolate: Extrapolate out-of-bound data.
Defaults to `True`.
Returns:
output (torch.Tensor): Splatted image `(B, C, Wo, Ho, Do)`.
"""
# Convert parameters
bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in ensure_tuple(bound)]
interpolation = [
_C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i)
for i in ensure_tuple(interpolation)
]
if shape is None:
shape = tuple(input.shape[2:])
return _GridPush.apply(input, grid, shape, interpolation, bound, extrapolate)
class _GridCount(torch.autograd.Function):
@staticmethod
def forward(ctx, grid, shape, interpolation, bound, extrapolate):
opt = (bound, interpolation, extrapolate)
output = _C.grid_count(grid, shape, *opt)
if grid.requires_grad:
ctx.opt = opt
ctx.save_for_backward(grid)
return output
@staticmethod
def backward(ctx, grad):
if ctx.needs_input_grad[0]:
var = ctx.saved_tensors
opt = ctx.opt
return _C.grid_count_backward(grad, *var, *opt), None, None, None, None
return None, None, None, None, None
def grid_count(grid: torch.Tensor, shape=None, interpolation="linear", bound="zero", extrapolate: bool = True):
"""
Splatting weights with respect to a deformation field (pull adjoint).
This function is equivalent to applying grid_push to an image of ones.
`interpolation` can be an int, a string or an InterpolationType.
Possible values are::
- 0 or 'nearest' or InterpolationType.nearest
- 1 or 'linear' or InterpolationType.linear
- 2 or 'quadratic' or InterpolationType.quadratic
- 3 or 'cubic' or InterpolationType.cubic
- 4 or 'fourth' or InterpolationType.fourth
- 5 or 'fifth' or InterpolationType.fifth
- 6 or 'sixth' or InterpolationType.sixth
- 7 or 'seventh' or InterpolationType.seventh
A list of values can be provided, in the order [W, H, D],
to specify dimension-specific interpolation orders.
`bound` can be an int, a string or a BoundType.
Possible values are::
- 0 or 'replicate' or 'nearest' or BoundType.replicate
- 1 or 'dct1' or 'mirror' or BoundType.dct1
- 2 or 'dct2' or 'reflect' or BoundType.dct2
- 3 or 'dst1' or 'antimirror' or BoundType.dst1
- 4 or 'dst2' or 'antireflect' or BoundType.dst2
- 5 or 'dft' or 'wrap' or BoundType.dft
- 7 or 'zero' or BoundType.zero
A list of values can be provided, in the order [W, H, D],
to specify dimension-specific boundary conditions.
`sliding` is a specific condition than only applies to flow fields
(with as many channels as dimensions). It cannot be dimension-specific.
Note that:
- `dft` corresponds to circular padding
- `dct2` corresponds to Neumann boundary conditions (symmetric)
- `dst2` corresponds to Dirichlet boundary conditions (antisymmetric)
See Also:
- https://en.wikipedia.org/wiki/Discrete_cosine_transform
- https://en.wikipedia.org/wiki/Discrete_sine_transform
- ``help(monai._C.BoundType)``
- ``help(monai._C.InterpolationType)``
Args:
grid: Deformation field `(B, Wi, Hi, Di, 2|3)`.
shape: shape of the source image.
interpolation (int or list[int] , optional): Interpolation order.
Defaults to `'linear'`.
bound (BoundType, or list[BoundType], optional): Boundary conditions.
Defaults to `'zero'`.
extrapolate (bool, optional): Extrapolate out-of-bound data.
Defaults to `True`.
Returns:
output (torch.Tensor): Splat weights `(B, 1, Wo, Ho, Do)`.
"""
# Convert parameters
bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in ensure_tuple(bound)]
interpolation = [
_C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i)
for i in ensure_tuple(interpolation)
]
if shape is None:
shape = tuple(grid.shape[2:])
return _GridCount.apply(grid, shape, interpolation, bound, extrapolate)
class _GridGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input, grid, interpolation, bound, extrapolate):
opt = (bound, interpolation, extrapolate)
output = _C.grid_grad(input, grid, *opt)
if input.requires_grad or grid.requires_grad:
ctx.opt = opt
ctx.save_for_backward(input, grid)
return output
@staticmethod
def backward(ctx, grad):
if not (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]):
return None, None, None, None, None
var = ctx.saved_tensors
opt = ctx.opt
grads = _C.grid_grad_backward(grad, *var, *opt)
if ctx.needs_input_grad[0]:
return grads[0], grads[1] if ctx.needs_input_grad[1] else None, None, None, None
if ctx.needs_input_grad[1]:
return None, grads[0], None, None, None
def grid_grad(input: torch.Tensor, grid: torch.Tensor, interpolation="linear", bound="zero", extrapolate: bool = True):
"""
Sample an image with respect to a deformation field.
`interpolation` can be an int, a string or an InterpolationType.
Possible values are::
- 0 or 'nearest' or InterpolationType.nearest
- 1 or 'linear' or InterpolationType.linear
- 2 or 'quadratic' or InterpolationType.quadratic
- 3 or 'cubic' or InterpolationType.cubic
- 4 or 'fourth' or InterpolationType.fourth
- 5 or 'fifth' or InterpolationType.fifth
- 6 or 'sixth' or InterpolationType.sixth
- 7 or 'seventh' or InterpolationType.seventh
A list of values can be provided, in the order [W, H, D],
to specify dimension-specific interpolation orders.
`bound` can be an int, a string or a BoundType.
Possible values are::
- 0 or 'replicate' or 'nearest' or BoundType.replicate
- 1 or 'dct1' or 'mirror' or BoundType.dct1
- 2 or 'dct2' or 'reflect' or BoundType.dct2
- 3 or 'dst1' or 'antimirror' or BoundType.dst1
- 4 or 'dst2' or 'antireflect' or BoundType.dst2
- 5 or 'dft' or 'wrap' or BoundType.dft
- 7 or 'zero' or BoundType.zero
A list of values can be provided, in the order [W, H, D],
to specify dimension-specific boundary conditions.
`sliding` is a specific condition than only applies to flow fields
(with as many channels as dimensions). It cannot be dimension-specific.
Note that:
- `dft` corresponds to circular padding
- `dct2` corresponds to Neumann boundary conditions (symmetric)
- `dst2` corresponds to Dirichlet boundary conditions (antisymmetric)
See Also:
- https://en.wikipedia.org/wiki/Discrete_cosine_transform
- https://en.wikipedia.org/wiki/Discrete_sine_transform
- ``help(monai._C.BoundType)``
- ``help(monai._C.InterpolationType)``
Args:
input: Input image. `(B, C, Wi, Hi, Di)`.
grid: Deformation field. `(B, Wo, Ho, Do, 2|3)`.
interpolation (int or list[int] , optional): Interpolation order.
Defaults to `'linear'`.
bound (BoundType, or list[BoundType], optional): Boundary conditions.
Defaults to `'zero'`.
extrapolate: Extrapolate out-of-bound data. Defaults to `True`.
Returns:
output (torch.Tensor): Sampled gradients (B, C, Wo, Ho, Do, 1|2|3).
"""
# Convert parameters
bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in ensure_tuple(bound)]
interpolation = [
_C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i)
for i in ensure_tuple(interpolation)
]
return _GridGrad.apply(input, grid, interpolation, bound, extrapolate)
class AffineTransform(nn.Module):
def __init__(
self,
spatial_size: Optional[Union[Sequence[int], int]] = None,
normalized: bool = False,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.ZEROS,
align_corners: bool = False,
reverse_indexing: bool = True,
) -> None:
"""
Apply affine transformations with a batch of affine matrices.
When `normalized=False` and `reverse_indexing=True`,
it does the commonly used resampling in the 'pull' direction
following the ``scipy.ndimage.affine_transform`` convention.
In this case `theta` is equivalent to (ndim+1, ndim+1) input ``matrix`` of ``scipy.ndimage.affine_transform``,
operates on homogeneous coordinates.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.affine_transform.html
When `normalized=True` and `reverse_indexing=False`,
it applies `theta` to the normalized coordinates (coords. in the range of [-1, 1]) directly.
This is often used with `align_corners=False` to achieve resolution-agnostic resampling,
thus useful as a part of trainable modules such as the spatial transformer networks.
See also: https://pytorch.org/tutorials/intermediate/spatial_transformer_tutorial.html
Args:
spatial_size: output spatial shape, the full output shape will be
`[N, C, *spatial_size]` where N and C are inferred from the `src` input of `self.forward`.
normalized: indicating whether the provided affine matrix `theta` is defined
for the normalized coordinates. If `normalized=False`, `theta` will be converted
to operate on normalized coordinates as pytorch affine_grid works with the normalized
coordinates.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"zeros"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
align_corners: see also https://pytorch.org/docs/stable/nn.functional.html#grid-sample.
reverse_indexing: whether to reverse the spatial indexing of image and coordinates.
set to `False` if `theta` follows pytorch's default "D, H, W" convention.
set to `True` if `theta` follows `scipy.ndimage` default "i, j, k" convention.
"""
super().__init__()
self.spatial_size = ensure_tuple(spatial_size) if spatial_size is not None else None
self.normalized = normalized
self.mode: GridSampleMode = GridSampleMode(mode)
self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)
self.align_corners = align_corners
self.reverse_indexing = reverse_indexing
def forward(
self, src: torch.Tensor, theta: torch.Tensor, spatial_size: Optional[Union[Sequence[int], int]] = None
) -> torch.Tensor:
"""
``theta`` must be an affine transformation matrix with shape
3x3 or Nx3x3 or Nx2x3 or 2x3 for spatial 2D transforms,
4x4 or Nx4x4 or Nx3x4 or 3x4 for spatial 3D transforms,
where `N` is the batch size. `theta` will be converted into float Tensor for the computation.
Args:
src (array_like): image in spatial 2D or 3D (N, C, spatial_dims),
where N is the batch dim, C is the number of channels.
theta (array_like): Nx3x3, Nx2x3, 3x3, 2x3 for spatial 2D inputs,
Nx4x4, Nx3x4, 3x4, 4x4 for spatial 3D inputs. When the batch dimension is omitted,
`theta` will be repeated N times, N is the batch dim of `src`.
spatial_size: output spatial shape, the full output shape will be
`[N, C, *spatial_size]` where N and C are inferred from the `src`.
Raises:
TypeError: When ``theta`` is not a ``torch.Tensor``.
ValueError: When ``theta`` is not one of [Nxdxd, dxd].
ValueError: When ``theta`` is not one of [Nx3x3, Nx4x4].
TypeError: When ``src`` is not a ``torch.Tensor``.
ValueError: When ``src`` spatially is not one of [2D, 3D].
ValueError: When affine and image batch dimension differ.
"""
# validate `theta`
if not isinstance(theta, torch.Tensor):
raise TypeError(f"theta must be torch.Tensor but is {type(theta).__name__}.")
if theta.dim() not in (2, 3):
raise ValueError(f"theta must be Nxdxd or dxd, got {theta.shape}.")
if theta.dim() == 2:
theta = theta[None] # adds a batch dim.
theta = theta.clone() # no in-place change of theta
theta_shape = tuple(theta.shape[1:])
if theta_shape in ((2, 3), (3, 4)): # needs padding to dxd
pad_affine = torch.tensor([0, 0, 1] if theta_shape[0] == 2 else [0, 0, 0, 1])
pad_affine = pad_affine.repeat(theta.shape[0], 1, 1).to(theta)
pad_affine.requires_grad = False
theta = torch.cat([theta, pad_affine], dim=1)
if tuple(theta.shape[1:]) not in ((3, 3), (4, 4)):
raise ValueError(f"theta must be Nx3x3 or Nx4x4, got {theta.shape}.")
# validate `src`
if not isinstance(src, torch.Tensor):
raise TypeError(f"src must be torch.Tensor but is {type(src).__name__}.")
sr = src.dim() - 2 # input spatial rank
if sr not in (2, 3):
raise ValueError(f"Unsupported src dimension: {sr}, available options are [2, 3].")
# set output shape
src_size = tuple(src.shape)
dst_size = src_size # default to the src shape
if self.spatial_size is not None:
dst_size = src_size[:2] + self.spatial_size
if spatial_size is not None:
dst_size = src_size[:2] + ensure_tuple(spatial_size)
# reverse and normalize theta if needed
if not self.normalized:
theta = to_norm_affine(
affine=theta, src_size=src_size[2:], dst_size=dst_size[2:], align_corners=self.align_corners
)
if self.reverse_indexing:
rev_idx = torch.as_tensor(range(sr - 1, -1, -1), device=src.device)
theta[:, :sr] = theta[:, rev_idx]
theta[:, :, :sr] = theta[:, :, rev_idx]
if (theta.shape[0] == 1) and src_size[0] > 1:
# adds a batch dim to `theta` in order to match `src`
theta = theta.repeat(src_size[0], 1, 1)
if theta.shape[0] != src_size[0]:
raise ValueError(
f"affine and image batch dimension must match, got affine={theta.shape[0]} image={src_size[0]}."
)
grid = nn.functional.affine_grid(theta=theta[:, :sr], size=list(dst_size), align_corners=self.align_corners)
dst = nn.functional.grid_sample(
input=src.contiguous(),
grid=grid,
mode=self.mode.value,
padding_mode=self.padding_mode.value,
align_corners=self.align_corners,
)
return dst
| [
"torch.cat",
"torch.tensor"
] | 1.5 | KohYoungResearchAmerica/MONAI | eca3f19182b9fcee0be7123728a9826cd382d152 |
1.8 | #!/bin/env python
"""Train a Sketch-VAE."""
import argparse
from enum import Enum
import os
import wget
import time
import numpy as np
import torch as th
from torch.utils.data import DataLoader
import torchvision.datasets as dset
import torchvision.transforms as transforms
import ttools
import ttools.interfaces
from ttools.modules import networks
import rendering
import losses
import modules
import data
import pydiffvg
LOG = ttools.get_logger(__name__)
BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)
OUTPUT = os.path.join(BASE_DIR, "results")
class SketchVAE(th.nn.Module):
class ImageEncoder(th.nn.Module):
def __init__(self, image_size=64, width=64, zdim=128):
super(SketchVAE.ImageEncoder, self).__init__()
self.zdim = zdim
self.net = th.nn.Sequential(
th.nn.Conv2d(4, width, 5, padding=2),
th.nn.InstanceNorm2d(width),
th.nn.ReLU(inplace=True),
# 64x64
th.nn.Conv2d(width, width, 5, padding=2),
th.nn.InstanceNorm2d(width),
th.nn.ReLU( inplace=True),
# 64x64
th.nn.Conv2d(width, 2*width, 5, stride=1, padding=2),
th.nn.InstanceNorm2d(2*width),
th.nn.ReLU( inplace=True),
# 32x32
th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2),
th.nn.InstanceNorm2d(2*width),
th.nn.ReLU( inplace=True),
# 16x16
th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2),
th.nn.InstanceNorm2d(2*width),
th.nn.ReLU( inplace=True),
# 16x16
th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2),
th.nn.InstanceNorm2d(2*width),
th.nn.ReLU( inplace=True),
# 8x8
th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2),
th.nn.InstanceNorm2d(2*width),
th.nn.ReLU( inplace=True),
# 4x4
modules.Flatten(),
th.nn.Linear(4*4*2*width, 2*zdim)
)
def forward(self, images):
features = self.net(images)
# VAE params
mu = features[:, :self.zdim]
log_sigma = features[:, self.zdim:]
# Sample a latent vector
sigma = th.exp(log_sigma/2.0)
z0 = th.randn(self.zdim, device=mu.device)
z = mu + sigma*z0
# KL divergence needs mu/sigma
return z, mu, log_sigma
class ImageDecoder(th.nn.Module):
""""""
def __init__(self, zdim=128, image_size=64, width=64):
super(SketchVAE.ImageDecoder, self).__init__()
self.zdim = zdim
self.width = width
self.embedding = th.nn.Linear(zdim, 4*4*2*width)
self.net = th.nn.Sequential(
th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2),
th.nn.InstanceNorm2d(2*width),
th.nn.ReLU( inplace=True),
# 8x8
th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2),
th.nn.InstanceNorm2d(2*width),
th.nn.ReLU( inplace=True),
# 16x16
th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2),
th.nn.InstanceNorm2d(2*width),
th.nn.ReLU( inplace=True),
# 16x16
th.nn.Conv2d(2*width, 2*width, 5, padding=2, stride=1),
th.nn.InstanceNorm2d(2*width),
th.nn.ReLU( inplace=True),
# 16x16
th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2),
th.nn.InstanceNorm2d(2*width),
th.nn.ReLU( inplace=True),
# 32x32
th.nn.Conv2d(2*width, width, 5, padding=2, stride=1),
th.nn.InstanceNorm2d(width),
th.nn.ReLU( inplace=True),
# 32x32
th.nn.ConvTranspose2d(width, width, 5, padding=2, stride=1),
th.nn.InstanceNorm2d(width),
th.nn.ReLU( inplace=True),
# 64x64
th.nn.Conv2d(width, width, 5, padding=2, stride=1),
th.nn.InstanceNorm2d(width),
th.nn.ReLU( inplace=True),
# 64x64
th.nn.Conv2d(width, 4, 5, padding=2, stride=1),
)
def forward(self, z):
bs = z.shape[0]
im = self.embedding(z).view(bs, 2*self.width, 4, 4)
out = self.net(im)
return out
class SketchDecoder(th.nn.Module):
"""
The decoder outputs a sequence where each time step models (dx, dy,
opacity).
"""
def __init__(self, sequence_length, hidden_size=512, dropout=0.9,
zdim=128, num_layers=3):
super(SketchVAE.SketchDecoder, self).__init__()
self.sequence_length = sequence_length
self.hidden_size = hidden_size
self.num_layers = num_layers
self.zdim = zdim
# Maps the latent vector to an initial cell/hidden vector
self.hidden_cell_predictor = th.nn.Linear(zdim, 2*hidden_size*num_layers)
self.lstm = th.nn.LSTM(
zdim, hidden_size,
num_layers=self.num_layers, dropout=dropout,
batch_first=True)
self.dxdy_predictor = th.nn.Sequential(
th.nn.Linear(hidden_size, 2),
th.nn.Tanh(),
)
self.opacity_predictor = th.nn.Sequential(
th.nn.Linear(hidden_size, 1),
th.nn.Sigmoid(),
)
def forward(self, z, hidden_and_cell=None):
# Every step in the sequence takes the latent vector as input so we
# replicate it here
bs = z.shape[0]
steps = self.sequence_length - 1 # no need to predict the start of sequence
expanded_z = z.unsqueeze(1).repeat(1, steps, 1)
if hidden_and_cell is None:
# Initialize from latent vector
hidden_and_cell = self.hidden_cell_predictor(
th.tanh(z))
hidden = hidden_and_cell[:, :self.hidden_size*self.num_layers]
hidden = hidden.view(-1, self.num_layers, self.hidden_size)
hidden = hidden.permute(1, 0, 2).contiguous()
# hidden = hidden.unsqueeze(1).contiguous()
cell = hidden_and_cell[:, self.hidden_size*self.num_layers:]
cell = cell.view(-1, self.num_layers, self.hidden_size)
cell = cell.permute(1, 0, 2).contiguous()
# cell = cell.unsqueeze(1).contiguous()
hidden_and_cell = (hidden, cell)
outputs, hidden_and_cell = self.lstm(expanded_z, hidden_and_cell)
hidden, cell = hidden_and_cell
dxdy = self.dxdy_predictor(
outputs.reshape(bs*steps, self.hidden_size)).view(bs, steps, -1)
opacity = self.opacity_predictor(
outputs.reshape(bs*steps, self.hidden_size)).view(bs, steps, -1)
strokes = th.cat([dxdy, opacity], -1)
return strokes
def __init__(self, sequence_length, zdim=128, image_size=64):
super(SketchVAE, self).__init__()
self.im_encoder = SketchVAE.ImageEncoder(
zdim=zdim, image_size=image_size)
self.im_decoder = SketchVAE.ImageDecoder(
zdim=zdim, image_size=image_size)
self.sketch_decoder = SketchVAE.SketchDecoder(
sequence_length, zdim=zdim)
def forward(self, images):
# Encode the images as latent vectors
z, mu, log_sigma = self.im_encoder(images)
decoded_im = self.im_decoder(z)
decoded_sketch = self.sketch_decoder(z)
return {
"decoded_im": decoded_im,
"decoded_sketch": decoded_sketch,
"z": z,
"mu": mu,
"log_sigma": log_sigma,
}
class SketchVAECallback(ttools.callbacks.ImageDisplayCallback):
"""Simple callback that visualize images."""
def visualized_image(self, batch, step_data, is_val=False):
if is_val:
return None
# only display the first n drawings
n = 8
gt = step_data["gt_image"][:n].detach()
vae_im = step_data["vae_image"][:n].detach()
sketch_im = step_data["sketch_image"][:n].detach()
rendering = th.cat([gt, vae_im, sketch_im], 2)
rendering = th.clamp(rendering, 0, 1)
alpha = rendering[:, 3:4]
rendering = rendering[:, :3] * alpha
return rendering
def caption(self, batch, step_data, is_val=False):
if is_val:
return ""
else:
return "top: truth, middle: vae sample, output: rnn-output"
class Interface(ttools.ModelInterface):
def __init__(self, model, lr=1e-4, lr_decay=0.9999,
kl_weight=0.5, kl_min_weight=0.01, kl_decay=0.99995,
raster_resolution=64, absolute_coords=False,
device="cpu", grad_clip=1.0):
super(Interface, self).__init__()
self.grad_clip = grad_clip
self.raster_resolution = raster_resolution
self.absolute_coords = absolute_coords
self.model = model
self.device = device
self.model.to(self.device)
self.im_enc_opt = th.optim.Adam(
self.model.im_encoder.parameters(), lr=lr)
self.im_dec_opt = th.optim.Adam(
self.model.im_decoder.parameters(), lr=lr)
self.sketch_dec_opt = th.optim.Adam(
self.model.sketch_decoder.parameters(), lr=lr)
self.kl_weight = kl_weight
self.kl_min_weight = kl_min_weight
self.kl_decay = kl_decay
self.kl_loss = losses.KLDivergence()
self.schedulers = [
th.optim.lr_scheduler.ExponentialLR(self.im_enc_opt, lr_decay),
th.optim.lr_scheduler.ExponentialLR(self.im_dec_opt, lr_decay),
th.optim.lr_scheduler.ExponentialLR(self.sketch_dec_opt, lr_decay),
]
# include loss on alpha
self.im_loss = losses.MultiscaleMSELoss(channels=4).to(self.device)
def optimizers(self):
return [self.im_enc_opt, self.im_dec_opt, self.sketch_dec_opt]
def kl_scaling(self):
# Scale the KL divergence weight
try:
state = self.im_enc_opt.state_dict()["param_groups"][0]["params"][0]
optim_step = self.im_enc_opt.state_dict()["state"][state]["step"]
except KeyError:
optim_step = 0 # no step taken yet
kl_scaling = 1.0 - (1.0 -
self.kl_min_weight)*(self.kl_decay**optim_step)
return kl_scaling
def training_step(self, batch):
gt_strokes, gt_im = batch
gt_strokes = gt_strokes.to(self.device)
gt_im = gt_im.to(self.device)
out = self.model(gt_im)
kl_loss = self.kl_loss(
out["mu"], out["log_sigma"])
kl_weight = self.kl_weight * self.kl_scaling()
# add start of sequence
sos = gt_strokes[:, :1]
sketch = th.cat([sos, out["decoded_sketch"]], 1)
vae_im = out["decoded_im"]
# start = time.time()
sketch_im = rendering.opacityStroke2diffvg(
sketch, canvas_size=self.raster_resolution, debug=False,
force_cpu=True, relative=not self.absolute_coords)
# elapsed = (time.time() - start)*1000
# print("out rendering took %.2fms" % elapsed)
vae_im_loss = self.im_loss(vae_im, gt_im)
sketch_im_loss = self.im_loss(sketch_im, gt_im)
# vae_im_loss = th.nn.functional.mse_loss(vae_im, gt_im)
# sketch_im_loss = th.nn.functional.mse_loss(sketch_im, gt_im)
loss = vae_im_loss + kl_loss*kl_weight + sketch_im_loss
self.im_enc_opt.zero_grad()
self.im_dec_opt.zero_grad()
self.sketch_dec_opt.zero_grad()
loss.backward()
# clip gradients
enc_nrm = th.nn.utils.clip_grad_norm_(
self.model.im_encoder.parameters(), self.grad_clip)
dec_nrm = th.nn.utils.clip_grad_norm_(
self.model.im_decoder.parameters(), self.grad_clip)
sketch_dec_nrm = th.nn.utils.clip_grad_norm_(
self.model.sketch_decoder.parameters(), self.grad_clip)
if enc_nrm > self.grad_clip:
LOG.debug("Clipped encoder gradient (%.5f) to %.2f",
enc_nrm, self.grad_clip)
if dec_nrm > self.grad_clip:
LOG.debug("Clipped decoder gradient (%.5f) to %.2f",
dec_nrm, self.grad_clip)
if sketch_dec_nrm > self.grad_clip:
LOG.debug("Clipped sketch decoder gradient (%.5f) to %.2f",
sketch_dec_nrm, self.grad_clip)
self.im_enc_opt.step()
self.im_dec_opt.step()
self.sketch_dec_opt.step()
return {
"vae_image": vae_im,
"sketch_image": sketch_im,
"gt_image": gt_im,
"loss": loss.item(),
"vae_im_loss": vae_im_loss.item(),
"sketch_im_loss": sketch_im_loss.item(),
"kl_loss": kl_loss.item(),
"kl_weight": kl_weight,
"lr": self.im_enc_opt.param_groups[0]["lr"],
}
def init_validation(self):
return dict(sample=None)
def validation_step(self, batch, running_data):
# Switch to eval mode for dropout, batchnorm, etc
# self.model.eval()
# with th.no_grad():
# # sample = self.model.sample(
# # batch.to(self.device), temperature=self.sampling_temperature)
# # running_data["sample"] = sample
# self.model.train()
return running_data
def train(args):
th.manual_seed(0)
np.random.seed(0)
dataset = data.FixedLengthQuickDrawDataset(
args.dataset, max_seq_length=args.sequence_length,
canvas_size=args.raster_resolution)
dataloader = DataLoader(
dataset, batch_size=args.bs, num_workers=args.workers, shuffle=True)
# val_dataset = [s for idx, s in enumerate(dataset) if idx < 8]
# val_dataloader = DataLoader(
# val_dataset, batch_size=8, num_workers=4, shuffle=False)
val_dataloader = None
model_params = {
"zdim": args.zdim,
"sequence_length": args.sequence_length,
"image_size": args.raster_resolution,
# "encoder_dim": args.encoder_dim,
# "decoder_dim": args.decoder_dim,
}
model = SketchVAE(**model_params)
model.train()
LOG.info("Model parameters:\n%s", model_params)
device = "cpu"
if th.cuda.is_available():
device = "cuda"
LOG.info("Using CUDA")
interface = Interface(model, raster_resolution=args.raster_resolution,
lr=args.lr, lr_decay=args.lr_decay,
kl_decay=args.kl_decay, kl_weight=args.kl_weight,
absolute_coords=args.absolute_coordinates,
device=device)
env_name = "sketch_vae"
if args.custom_name is not None:
env_name += "_" + args.custom_name
if args.absolute_coordinates:
env_name += "_abs_coords"
chkpt = os.path.join(OUTPUT, env_name)
# Resume from checkpoint, if any
checkpointer = ttools.Checkpointer(
chkpt, model, meta=model_params,
optimizers=interface.optimizers(),
schedulers=interface.schedulers)
extras, meta = checkpointer.load_latest()
epoch = extras["epoch"] if extras and "epoch" in extras.keys() else 0
if meta is not None and meta != model_params:
LOG.info("Checkpoint's metaparams differ "
"from CLI, aborting: %s and %s", meta, model_params)
trainer = ttools.Trainer(interface)
# Add callbacks
losses = ["loss", "kl_loss", "vae_im_loss", "sketch_im_loss"]
training_debug = ["lr", "kl_weight"]
trainer.add_callback(ttools.callbacks.ProgressBarCallback(
keys=losses, val_keys=None))
trainer.add_callback(ttools.callbacks.VisdomLoggingCallback(
keys=losses, val_keys=None, env=env_name, port=args.port))
trainer.add_callback(ttools.callbacks.VisdomLoggingCallback(
keys=training_debug, smoothing=0, val_keys=None, env=env_name,
port=args.port))
trainer.add_callback(ttools.callbacks.CheckpointingCallback(
checkpointer, max_files=2, interval=600, max_epochs=10))
trainer.add_callback(
ttools.callbacks.LRSchedulerCallback(interface.schedulers))
trainer.add_callback(SketchVAECallback(
env=env_name, win="samples", port=args.port, frequency=args.freq))
# Start training
trainer.train(dataloader, starting_epoch=epoch,
val_dataloader=val_dataloader,
num_epochs=args.num_epochs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="cat.npz")
parser.add_argument("--absolute_coordinates", action="store_true",
default=False)
parser.add_argument("--custom_name")
# Training params
parser.add_argument("--bs", type=int, default=1)
parser.add_argument("--workers", type=int, default=0)
parser.add_argument("--num_epochs", type=int, default=10000)
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument("--lr_decay", type=float, default=0.9999)
parser.add_argument("--kl_weight", type=float, default=0.5)
parser.add_argument("--kl_decay", type=float, default=0.99995)
# Model configuration
parser.add_argument("--zdim", type=int, default=128)
parser.add_argument("--sequence_length", type=int, default=50)
parser.add_argument("--raster_resolution", type=int, default=64)
# parser.add_argument("--encoder_dim", type=int, default=256)
# parser.add_argument("--decoder_dim", type=int, default=512)
# Viz params
parser.add_argument("--freq", type=int, default=10)
parser.add_argument("--port", type=int, default=5000)
args = parser.parse_args()
pydiffvg.set_use_gpu(False)
train(args)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"torch.nn.ConvTranspose2d",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.clamp",
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.nn.InstanceNorm2d",
"torch.tanh",
"torch.exp",
"torch.randn"
] | 1.8.1 | AntonBiryukovUofC/diffvg | e081098f52b82bfd0b7e91114d289d65ef969a60 |
1.3 | import torch as T
from torch.nn import Module
from mltoolkit.mlmo.layers import Ffnn
class MuSigmaFfnn(Module):
"""
A single hidden layer feed-forward nn that outputs mu and sigma of a
Gaussian distribution. The produced sigma is a vector with non-negative
values.
"""
def __init__(self, input_dim, output_dim, hidden_dim=None, **kwargs):
"""
The network inputs a continues vector as input that at least contains
the average embedding of aspects.
:param input_dim: self-explanatory.
:param hidden_dim: self-explanatory.
:type hidden_dim: int or list of ints.
:param output_dim: dimensionality of mus and sigmas.
"""
super(MuSigmaFfnn, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.ffnn = Ffnn(input_dim=input_dim, hidden_dim=hidden_dim,
output_dim=2 * output_dim, **kwargs)
def forward(self, inp):
"""
:param inp: [batch_size, input_dim]
:return: mu [batch_size, output_dim]
sigma [batch_size, output_dim]
"""
out = self.ffnn(inp)
mu = out[:, :self.output_dim]
log_sigma = out[:, self.output_dim:]
return mu, T.exp(log_sigma)
| [
"torch.exp"
] | 1.3.0 | stungkit/Copycat-abstractive-opinion-summarizer | 04fe5393a7bb6883516766b762f6a0c530e95375 |
1.8 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The LightningModule - an nn.Module with many additional features."""
import collections
import inspect
import logging
import numbers
import os
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, Optional, overload, Sequence, Tuple, Union
import torch
from torch import ScriptModule, Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from torchmetrics import Metric
from typing_extensions import Literal
import pytorch_lightning as pl
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.callbacks.progress import base as progress_base
from pytorch_lightning.core.hooks import CheckpointHooks, DataHooks, ModelHooks
from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin, HyperparametersMixin
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.saving import ModelIO
from pytorch_lightning.loggers import Logger
from pytorch_lightning.trainer.connectors.data_connector import _DataHookSelector
from pytorch_lightning.trainer.connectors.logger_connector.fx_validator import _FxValidator
from pytorch_lightning.utilities import _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_10, GradClipAlgorithmType
from pytorch_lightning.utilities.apply_func import apply_to_collection, convert_to_tensors
from pytorch_lightning.utilities.cloud_io import get_filesystem
from pytorch_lightning.utilities.distributed import distributed_available, sync_ddp
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.parsing import collect_init_args
from pytorch_lightning.utilities.rank_zero import rank_zero_debug, rank_zero_deprecation, rank_zero_warn
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.types import _METRIC_COLLECTION, EPOCH_OUTPUT, LRSchedulerTypeUnion, STEP_OUTPUT
from pytorch_lightning.utilities.warnings import WarningCache
warning_cache = WarningCache()
log = logging.getLogger(__name__)
class LightningModule(
DeviceDtypeModuleMixin,
HyperparametersMixin,
ModelIO,
ModelHooks,
DataHooks,
CheckpointHooks,
Module,
):
# Below is for property support of JIT
# since none of these are important when using JIT, we are going to ignore them.
__jit_unused_properties__ = (
[
"example_input_array",
"on_gpu",
"current_epoch",
"global_step",
"global_rank",
"local_rank",
"logger",
"loggers",
"automatic_optimization",
"truncated_bptt_steps",
"use_amp",
]
+ DeviceDtypeModuleMixin.__jit_unused_properties__
+ HyperparametersMixin.__jit_unused_properties__
)
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/
# torch/nn/modules/module.py#L227)
torch._C._log_api_usage_once(f"lightning.module.{self.__class__.__name__}")
# pointer to the trainer object
self.trainer: Optional["pl.Trainer"] = None
self._use_amp: bool = False
# the precision used
self.precision: int = 32
# optionally can be set by user
self._example_input_array = None
self._current_fx_name: Optional[str] = None
self._automatic_optimization: bool = True
self._truncated_bptt_steps: int = 0
self._param_requires_grad_state = {}
self._metric_attributes: Optional[Dict[int, str]] = None
self._should_prevent_trainer_and_dataloaders_deepcopy: bool = False
# TODO: remove in 1.8
self._running_torchscript = False
self._register_sharded_tensor_state_dict_hooks_if_available()
@overload
def optimizers(self, use_pl_optimizer: Literal[True] = True) -> Union[LightningOptimizer, List[LightningOptimizer]]:
...
@overload
def optimizers(self, use_pl_optimizer: Literal[False]) -> Union[Optimizer, List[Optimizer]]:
...
@overload
def optimizers(
self, use_pl_optimizer: bool
) -> Union[Optimizer, LightningOptimizer, List[Optimizer], List[LightningOptimizer]]:
...
def optimizers(
self, use_pl_optimizer: bool = True
) -> Union[Optimizer, LightningOptimizer, List[Optimizer], List[LightningOptimizer]]:
"""Returns the optimizer(s) that are being used during training. Useful for manual optimization.
Args:
use_pl_optimizer: If ``True``, will wrap the optimizer(s) in a
:class:`~pytorch_lightning.core.optimizer.LightningOptimizer` for automatic handling of precision and
profiling.
Returns:
A single optimizer, or a list of optimizers in case multiple ones are present.
"""
if use_pl_optimizer:
opts = list(self.trainer.strategy._lightning_optimizers.values())
else:
opts = self.trainer.optimizers
# single optimizer
if isinstance(opts, list) and len(opts) == 1 and isinstance(opts[0], (Optimizer, LightningOptimizer)):
return opts[0]
# multiple opts
return opts
def lr_schedulers(self) -> Optional[Union[LRSchedulerTypeUnion, List[LRSchedulerTypeUnion]]]:
"""Returns the learning rate scheduler(s) that are being used during training. Useful for manual
optimization.
Returns:
A single scheduler, or a list of schedulers in case multiple ones are present, or ``None`` if no
schedulers were returned in :meth:`configure_optimizers`.
"""
if not self.trainer.lr_scheduler_configs:
return None
# ignore other keys "interval", "frequency", etc.
lr_schedulers = [config.scheduler for config in self.trainer.lr_scheduler_configs]
# single scheduler
if len(lr_schedulers) == 1:
return lr_schedulers[0]
# multiple schedulers
return lr_schedulers
@property
def example_input_array(self) -> Any:
"""The example input array is a specification of what the module can consume in the :meth:`forward` method.
The return type is interpreted as follows:
- Single tensor: It is assumed the model takes a single argument, i.e.,
``model.forward(model.example_input_array)``
- Tuple: The input array should be interpreted as a sequence of positional arguments, i.e.,
``model.forward(*model.example_input_array)``
- Dict: The input array represents named keyword arguments, i.e.,
``model.forward(**model.example_input_array)``
"""
return self._example_input_array
@example_input_array.setter
def example_input_array(self, example: Any) -> None:
self._example_input_array = example
@property
def current_epoch(self) -> int:
"""The current epoch in the ``Trainer``, or 0 if not attached."""
return self.trainer.current_epoch if self.trainer else 0
@property
def global_step(self) -> int:
"""Total training batches seen across all epochs.
If no Trainer is attached, this propery is 0.
"""
return self.trainer.global_step if self.trainer else 0
@property
def global_rank(self) -> int:
"""The index of the current process across all nodes and devices."""
return self.trainer.global_rank if self.trainer else 0
@property
def local_rank(self) -> int:
"""The index of the current process within a single node."""
return self.trainer.local_rank if self.trainer else 0
@property
def on_gpu(self):
"""Returns ``True`` if this model is currently located on a GPU.
Useful to set flags around the LightningModule for different CPU vs GPU behavior.
"""
return self.device.type == "cuda"
@property
def automatic_optimization(self) -> bool:
"""If set to ``False`` you are responsible for calling ``.backward()``, ``.step()``, ``.zero_grad()``."""
return self._automatic_optimization
@automatic_optimization.setter
def automatic_optimization(self, automatic_optimization: bool) -> None:
self._automatic_optimization = automatic_optimization
@property
def truncated_bptt_steps(self) -> int:
"""Enables `Truncated Backpropagation Through Time` in the Trainer when set to a positive integer.
It represents
the number of times :meth:`training_step` gets called before backpropagation. If this is > 0, the
:meth:`training_step` receives an additional argument ``hiddens`` and is expected to return a hidden state.
"""
return self._truncated_bptt_steps
@truncated_bptt_steps.setter
def truncated_bptt_steps(self, truncated_bptt_steps: int) -> None:
self._truncated_bptt_steps = truncated_bptt_steps
@property
def logger(self) -> Optional[Logger]:
"""Reference to the logger object in the Trainer."""
return self.trainer.logger if self.trainer else None
@property
def loggers(self) -> List[Logger]:
"""Reference to the list of loggers in the Trainer."""
return self.trainer.loggers if self.trainer else []
def _apply_batch_transfer_handler(
self, batch: Any, device: Optional[torch.device] = None, dataloader_idx: int = 0
) -> Any:
device = device or self.device
datahook_selector = (
_DataHookSelector(self, None) if self.trainer is None else self.trainer._data_connector._datahook_selector
)
hook = datahook_selector.get_hook("on_before_batch_transfer")
batch = hook(batch, dataloader_idx)
hook = datahook_selector.get_hook("transfer_batch_to_device")
batch = hook(batch, device, dataloader_idx)
hook = datahook_selector.get_hook("on_after_batch_transfer")
batch = hook(batch, dataloader_idx)
return batch
def print(self, *args, **kwargs) -> None:
r"""
Prints only from process 0. Use this in any distributed mode to log only once.
Args:
*args: The thing to print. The same as for Python's built-in print function.
**kwargs: The same as for Python's built-in print function.
Example::
def forward(self, x):
self.print(x, 'in forward')
"""
if self.trainer.is_global_zero:
progress_bar = self.trainer.progress_bar_callback
if progress_bar is not None and progress_bar.is_enabled:
progress_bar.print(*args, **kwargs)
else:
print(*args, **kwargs)
def log(
self,
name: str,
value: _METRIC_COLLECTION,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Union[str, Callable] = "mean",
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_group: Optional[Any] = None,
add_dataloader_idx: bool = True,
batch_size: Optional[int] = None,
metric_attribute: Optional[str] = None,
rank_zero_only: bool = False,
) -> None:
"""Log a key, value pair.
Example::
self.log('train_loss', loss)
The default behavior per hook is documented here: :ref:`extensions/logging:Automatic Logging`.
Args:
name: key to log.
value: value to log. Can be a ``float``, ``Tensor``, ``Metric``, or a dictionary of the former.
prog_bar: if ``True`` logs to the progress bar.
logger: if ``True`` logs to the logger.
on_step: if ``True`` logs at this step. The default value is determined by the hook.
See :ref:`extensions/logging:Automatic Logging` for details.
on_epoch: if ``True`` logs epoch accumulated metrics. The default value is determined by the hook.
See :ref:`extensions/logging:Automatic Logging` for details.
reduce_fx: reduction function over step values for end of epoch. :meth:`torch.mean` by default.
enable_graph: if ``True``, will not auto detach the graph.
sync_dist: if ``True``, reduces the metric across devices. Use with care as this may lead to a significant
communication overhead.
sync_dist_group: the DDP group to sync across.
add_dataloader_idx: if ``True``, appends the index of the current dataloader to
the name (when using multiple dataloaders). If False, user needs to give unique names for
each dataloader to not mix the values.
batch_size: Current batch_size. This will be directly inferred from the loaded batch,
but for some data structures you might need to explicitly provide it.
metric_attribute: To restore the metric state, Lightning requires the reference of the
:class:`torchmetrics.Metric` in your model. This is found automatically if it is a model attribute.
rank_zero_only: Whether the value will be logged only on rank 0. This will prevent synchronization which
would produce a deadlock as not all processes would perform this log call.
"""
# check for invalid values
apply_to_collection(value, dict, self.__check_not_nested, name)
apply_to_collection(
value, object, self.__check_allowed, name, value, wrong_dtype=(numbers.Number, Metric, Tensor, dict)
)
if self.trainer is None:
# not an error to support testing the `*_step` methods without a `Trainer` reference
rank_zero_warn(
"You are trying to `self.log()` but the `self.trainer` reference is not registered on the model yet."
" This is most likely because the model hasn't been passed to the `Trainer`"
)
return
results = self.trainer._results
if results is None:
raise MisconfigurationException(
"You are trying to `self.log()` but the loop's result collection is not registered"
" yet. This is most likely because you are trying to log in a `predict` hook,"
" but it doesn't support logging"
)
if self._current_fx_name is None:
raise MisconfigurationException(
"You are trying to `self.log()` but it is not managed by the `Trainer` control flow"
)
on_step, on_epoch = _FxValidator.check_logging_and_get_default_levels(
self._current_fx_name, on_step=on_step, on_epoch=on_epoch
)
# make sure user doesn't introduce logic for multi-dataloaders
if "/dataloader_idx_" in name:
raise MisconfigurationException(
f"You called `self.log` with the key `{name}`"
" but it should not contain information about `dataloader_idx`"
)
value = apply_to_collection(value, numbers.Number, self.__to_tensor)
if self.trainer._logger_connector.should_reset_tensors(self._current_fx_name):
# if we started a new epoch (running its first batch) the hook name has changed
# reset any tensors for the new hook name
results.reset(metrics=False, fx=self._current_fx_name)
if metric_attribute is None and isinstance(value, Metric):
if self._metric_attributes is None:
# compute once
self._metric_attributes = {
id(module): name for name, module in self.named_modules() if isinstance(module, Metric)
}
if not self._metric_attributes:
raise MisconfigurationException(
"Could not find the `LightningModule` attribute for the `torchmetrics.Metric` logged."
" You can fix this by setting an attribute for the metric in your `LightningModule`."
)
# try to find the passed metric in the LightningModule
metric_attribute = self._metric_attributes.get(id(value), None)
if metric_attribute is None:
raise MisconfigurationException(
"Could not find the `LightningModule` attribute for the `torchmetrics.Metric` logged."
f" You can fix this by calling `self.log({name}, ..., metric_attribute=name)` where `name` is one"
f" of {list(self._metric_attributes.values())}"
)
if (
self.trainer.training
and is_param_in_hook_signature(self.training_step, "dataloader_iter", explicit=True)
and batch_size is None
):
raise MisconfigurationException(
"With `def training_step(self, dataloader_iter)`, `self.log(..., batch_size=...)` should be provided."
)
results.log(
self._current_fx_name,
name,
value,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
add_dataloader_idx=add_dataloader_idx,
batch_size=batch_size,
sync_dist=sync_dist and distributed_available(),
sync_dist_fn=self.trainer.strategy.reduce or sync_ddp,
sync_dist_group=sync_dist_group,
metric_attribute=metric_attribute,
rank_zero_only=rank_zero_only,
)
self.trainer._logger_connector._current_fx = self._current_fx_name
def log_dict(
self,
dictionary: Mapping[str, _METRIC_COLLECTION],
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Union[str, Callable] = "mean",
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_group: Optional[Any] = None,
add_dataloader_idx: bool = True,
batch_size: Optional[int] = None,
rank_zero_only: bool = False,
) -> None:
"""Log a dictionary of values at once.
Example::
values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}
self.log_dict(values)
Args:
dictionary: key value pairs.
The values can be a ``float``, ``Tensor``, ``Metric``, or a dictionary of the former.
prog_bar: if ``True`` logs to the progress base.
logger: if ``True`` logs to the logger.
on_step: if ``True`` logs at this step.
``None`` auto-logs for training_step but not validation/test_step.
The default value is determined by the hook.
See :ref:`extensions/logging:Automatic Logging` for details.
on_epoch: if ``True`` logs epoch accumulated metrics.
``None`` auto-logs for val/test step but not ``training_step``.
The default value is determined by the hook.
See :ref:`extensions/logging:Automatic Logging` for details.
reduce_fx: reduction function over step values for end of epoch. :meth:`torch.mean` by default.
enable_graph: if ``True``, will not auto-detach the graph
sync_dist: if ``True``, reduces the metric across GPUs/TPUs. Use with care as this may lead to a significant
communication overhead.
sync_dist_group: the ddp group to sync across.
add_dataloader_idx: if ``True``, appends the index of the current dataloader to
the name (when using multiple). If ``False``, user needs to give unique names for
each dataloader to not mix values.
batch_size: Current batch size. This will be directly inferred from the loaded batch,
but some data structures might need to explicitly provide it.
rank_zero_only: Whether the value will be logged only on rank 0. This will prevent synchronization which
would produce a deadlock as not all processes would perform this log call.
"""
for k, v in dictionary.items():
self.log(
name=k,
value=v,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
add_dataloader_idx=add_dataloader_idx,
batch_size=batch_size,
rank_zero_only=rank_zero_only,
)
@staticmethod
def __check_not_nested(value: dict, name: str) -> dict:
# self-imposed restriction. for simplicity
if any(isinstance(v, dict) for v in value.values()):
raise ValueError(f"`self.log({name}, {value})` was called, but nested dictionaries cannot be logged")
return value
@staticmethod
def __check_allowed(v: Any, name: str, value: Any) -> None:
raise ValueError(f"`self.log({name}, {value})` was called, but `{type(v).__name__}` values cannot be logged")
def __to_tensor(self, value: numbers.Number) -> torch.Tensor:
return torch.tensor(value, device=self.device)
def log_grad_norm(self, grad_norm_dict: Dict[str, float]) -> None:
"""Override this method to change the default behaviour of ``log_grad_norm``.
If clipping gradients, the gradients will not have been clipped yet.
Args:
grad_norm_dict: Dictionary containing current grad norm metrics
Example::
# DEFAULT
def log_grad_norm(self, grad_norm_dict):
self.log_dict(grad_norm_dict, on_step=True, on_epoch=True, prog_bar=False, logger=True)
"""
self.log_dict(grad_norm_dict, on_step=True, on_epoch=True, prog_bar=False, logger=True)
def all_gather(
self, data: Union[torch.Tensor, Dict, List, Tuple], group: Optional[Any] = None, sync_grads: bool = False
):
r"""
Allows users to call ``self.all_gather()`` from the LightningModule, thus making the ``all_gather`` operation
accelerator agnostic. ``all_gather`` is a function provided by accelerators to gather a tensor from several
distributed processes.
Args:
data: int, float, tensor of shape (batch, ...), or a (possibly nested) collection thereof.
group: the process group to gather results from. Defaults to all processes (world)
sync_grads: flag that allows users to synchronize gradients for the all_gather operation
Return:
A tensor of shape (world_size, batch, ...), or if the input was a collection
the output will also be a collection with tensors of this shape.
"""
group = group if group is not None else torch.distributed.group.WORLD
all_gather = self.trainer.strategy.all_gather
data = convert_to_tensors(data, device=self.device)
return apply_to_collection(data, torch.Tensor, all_gather, group=group, sync_grads=sync_grads)
def forward(self, *args, **kwargs) -> Any:
r"""
Same as :meth:`torch.nn.Module.forward()`.
Args:
*args: Whatever you decide to pass into the forward method.
**kwargs: Keyword arguments are also possible.
Return:
Your model's output
"""
return super().forward(*args, **kwargs)
def training_step(self, *args, **kwargs) -> STEP_OUTPUT:
r"""
Here you compute and return the training loss and some additional metrics for e.g.
the progress bar or logger.
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (``int``): Integer displaying index of this batch
optimizer_idx (``int``): When using multiple optimizers, this argument will also be present.
hiddens (``Any``): Passed in if
:paramref:`~pytorch_lightning.core.lightning.LightningModule.truncated_bptt_steps` > 0.
Return:
Any of.
- :class:`~torch.Tensor` - The loss tensor
- ``dict`` - A dictionary. Can include any keys, but must include the key ``'loss'``
- ``None`` - Training will skip to the next batch. This is only for automatic optimization.
This is not supported for multi-GPU, TPU, IPU, or DeepSpeed.
In this step you'd normally do the forward pass and calculate the loss for a batch.
You can also do fancier things like multiple forward passes or something model specific.
Example::
def training_step(self, batch, batch_idx):
x, y, z = batch
out = self.encoder(x)
loss = self.loss(out, x)
return loss
If you define multiple optimizers, this step will be called with an additional
``optimizer_idx`` parameter.
.. code-block:: python
# Multiple optimizers (e.g.: GANs)
def training_step(self, batch, batch_idx, optimizer_idx):
if optimizer_idx == 0:
# do training_step with encoder
...
if optimizer_idx == 1:
# do training_step with decoder
...
If you add truncated back propagation through time you will also get an additional
argument with the hidden states of the previous step.
.. code-block:: python
# Truncated back-propagation through time
def training_step(self, batch, batch_idx, hiddens):
# hiddens are the hidden states from the previous truncated backprop step
out, hiddens = self.lstm(data, hiddens)
loss = ...
return {"loss": loss, "hiddens": hiddens}
Note:
The loss value shown in the progress bar is smoothed (averaged) over the last values,
so it differs from the actual loss returned in train/validation step.
"""
rank_zero_warn("`training_step` must be implemented to be used with the Lightning Trainer")
def training_step_end(self, step_output: STEP_OUTPUT) -> STEP_OUTPUT:
"""Use this when training with dp or ddp2 because :meth:`training_step` will operate on only part of the
batch. However, this is still optional and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
step_output = [training_step(sub_batch) for sub_batch in sub_batches]
training_step_end(step_output)
Args:
step_output: What you return in `training_step` for each batch part.
Return:
Anything
When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
# softmax uses only a portion of the batch in the denominator
loss = self.softmax(out)
loss = nce_loss(loss)
return loss
If you wish to do something with all the parts of the batch, then use this method to do it:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return {"pred": out}
def training_step_end(self, training_step_outputs):
gpu_0_pred = training_step_outputs[0]["pred"]
gpu_1_pred = training_step_outputs[1]["pred"]
gpu_n_pred = training_step_outputs[n]["pred"]
# this softmax now uses the full batch
loss = nce_loss([gpu_0_pred, gpu_1_pred, gpu_n_pred])
return loss
See Also:
See the :ref:`Multi GPU Training <gpu_intermediate>` guide for more details.
"""
def training_epoch_end(self, outputs: EPOCH_OUTPUT) -> None:
"""Called at the end of the training epoch with the outputs of all training steps. Use this in case you
need to do something with all the outputs returned by :meth:`training_step`.
.. code-block:: python
# the pseudocode for these calls
train_outs = []
for train_batch in train_data:
out = training_step(train_batch)
train_outs.append(out)
training_epoch_end(train_outs)
Args:
outputs: List of outputs you defined in :meth:`training_step`. If there are multiple optimizers or when
using ``truncated_bptt_steps > 0``, the lists have the dimensions
(n_batches, tbptt_steps, n_optimizers). Dimensions of length 1 are squeezed.
Return:
None
Note:
If this method is not overridden, this won't be called.
.. code-block:: python
def training_epoch_end(self, training_step_outputs):
# do something with all training_step outputs
for out in training_step_outputs:
...
"""
def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
r"""
Operates on a single batch of data from the validation set.
In this step you'd might generate examples or calculate anything of interest like accuracy.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
batch: The output of your :class:`~torch.utils.data.DataLoader`.
batch_idx: The index of this batch.
dataloader_idx: The index of the dataloader that produced this batch.
(only if multiple val dataloaders used)
Return:
- Any object or value
- ``None`` - Validation will skip to the next batch
.. code-block:: python
# pseudocode of order
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
if defined("validation_step_end"):
out = validation_step_end(out)
val_outs.append(out)
val_outs = validation_epoch_end(val_outs)
.. code-block:: python
# if you have one val dataloader:
def validation_step(self, batch, batch_idx):
...
# if you have multiple val dataloaders:
def validation_step(self, batch, batch_idx, dataloader_idx=0):
...
Examples::
# CASE 1: A single validation dataset
def validation_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'val_loss': loss, 'val_acc': val_acc})
If you pass in multiple val dataloaders, :meth:`validation_step` will have an additional argument. We recommend
setting the default value of 0 so that you can quickly switch between single and multiple dataloaders.
.. code-block:: python
# CASE 2: multiple validation dataloaders
def validation_step(self, batch, batch_idx, dataloader_idx=0):
# dataloader_idx tells you which dataset this is.
...
Note:
If you don't need to validate you don't need to implement this method.
Note:
When the :meth:`validation_step` is called, the model has been put in eval mode
and PyTorch gradients have been disabled. At the end of validation,
the model goes back to training mode and gradients are enabled.
"""
def validation_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
"""Use this when validating with dp or ddp2 because :meth:`validation_step` will operate on only part of
the batch. However, this is still optional and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
step_output = [validation_step(sub_batch) for sub_batch in sub_batches]
validation_step_end(step_output)
Args:
step_output: What you return in :meth:`validation_step` for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT validation_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
loss = self.softmax(out)
loss = nce_loss(loss)
self.log("val_loss", loss)
# --------------
# with validation_step_end to do softmax over the full batch
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
return out
def validation_step_end(self, val_step_outputs):
for out in val_step_outputs:
...
See Also:
See the :ref:`Multi GPU Training <gpu_intermediate>` guide for more details.
"""
def validation_epoch_end(self, outputs: Union[EPOCH_OUTPUT, List[EPOCH_OUTPUT]]) -> None:
"""Called at the end of the validation epoch with the outputs of all validation steps.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
outputs: List of outputs you defined in :meth:`validation_step`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
None
Note:
If you didn't define a :meth:`validation_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def validation_epoch_end(self, val_step_outputs):
for out in val_step_outputs:
...
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each validation step for that dataloader.
.. code-block:: python
def validation_epoch_end(self, outputs):
for dataloader_output_result in outputs:
dataloader_outs = dataloader_output_result.dataloader_i_outputs
self.log("final_metric", final_value)
"""
def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
r"""
Operates on a single batch of data from the test set.
In this step you'd normally generate examples or calculate anything of interest
such as accuracy.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
batch: The output of your :class:`~torch.utils.data.DataLoader`.
batch_idx: The index of this batch.
dataloader_id: The index of the dataloader that produced this batch.
(only if multiple test dataloaders used).
Return:
Any of.
- Any object or value
- ``None`` - Testing will skip to the next batch
.. code-block:: python
# if you have one test dataloader:
def test_step(self, batch, batch_idx):
...
# if you have multiple test dataloaders:
def test_step(self, batch, batch_idx, dataloader_idx=0):
...
Examples::
# CASE 1: A single test dataset
def test_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'test_loss': loss, 'test_acc': test_acc})
If you pass in multiple test dataloaders, :meth:`test_step` will have an additional argument. We recommend
setting the default value of 0 so that you can quickly switch between single and multiple dataloaders.
.. code-block:: python
# CASE 2: multiple test dataloaders
def test_step(self, batch, batch_idx, dataloader_idx=0):
# dataloader_idx tells you which dataset this is.
...
Note:
If you don't need to test you don't need to implement this method.
Note:
When the :meth:`test_step` is called, the model has been put in eval mode and
PyTorch gradients have been disabled. At the end of the test epoch, the model goes back
to training mode and gradients are enabled.
"""
def test_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
"""Use this when testing with DP or DDP2 because :meth:`test_step` will operate on only part of the batch.
However, this is still optional and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
step_output = [test_step(sub_batch) for sub_batch in sub_batches]
test_step_end(step_output)
Args:
step_output: What you return in :meth:`test_step` for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT test_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
loss = self.softmax(out)
self.log("test_loss", loss)
# --------------
# with test_step_end to do softmax over the full batch
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return out
def test_step_end(self, output_results):
# this out is now the full size of the batch
all_test_step_outs = output_results.out
loss = nce_loss(all_test_step_outs)
self.log("test_loss", loss)
See Also:
See the :ref:`Multi GPU Training <gpu_intermediate>` guide for more details.
"""
def test_epoch_end(self, outputs: Union[EPOCH_OUTPUT, List[EPOCH_OUTPUT]]) -> None:
"""Called at the end of a test epoch with the output of all test steps.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
outputs: List of outputs you defined in :meth:`test_step_end`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader
Return:
None
Note:
If you didn't define a :meth:`test_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def test_epoch_end(self, outputs):
# do something with the outputs of all test batches
all_test_preds = test_step_outputs.predictions
some_result = calc_all_results(all_test_preds)
self.log(some_result)
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each test step for that dataloader.
.. code-block:: python
def test_epoch_end(self, outputs):
final_value = 0
for dataloader_outputs in outputs:
for test_step_out in dataloader_outputs:
# do something
final_value += test_step_out
self.log("final_metric", final_value)
"""
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
"""Step function called during :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. By default, it
calls :meth:`~pytorch_lightning.core.lightning.LightningModule.forward`. Override to add any processing
logic.
The :meth:`~pytorch_lightning.core.lightning.LightningModule.predict_step` is used
to scale inference on multi-devices.
To prevent an OOM error, it is possible to use :class:`~pytorch_lightning.callbacks.BasePredictionWriter`
callback to write the predictions to disk or database after each batch or on epoch end.
The :class:`~pytorch_lightning.callbacks.BasePredictionWriter` should be used while using a spawn
based accelerator. This happens for ``Trainer(strategy="ddp_spawn")``
or training on 8 TPU cores with ``Trainer(accelerator="tpu", devices=8)`` as predictions won't be returned.
Example ::
class MyModel(LightningModule):
def predicts_step(self, batch, batch_idx, dataloader_idx=0):
return self(batch)
dm = ...
model = MyModel()
trainer = Trainer(accelerator="gpu", devices=2)
predictions = trainer.predict(model, dm)
Args:
batch: Current batch.
batch_idx: Index of current batch.
dataloader_idx: Index of the current dataloader.
Return:
Predicted output
"""
return self(batch)
def configure_callbacks(self) -> Union[Sequence[Callback], Callback]:
"""Configure model-specific callbacks. When the model gets attached, e.g., when ``.fit()`` or ``.test()``
gets called, the list or a callback returned here will be merged with the list of callbacks passed to the
Trainer's ``callbacks`` argument. If a callback returned here has the same type as one or several callbacks
already present in the Trainer's callbacks list, it will take priority and replace them. In addition,
Lightning will make sure :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callbacks
run last.
Return:
A callback or a list of callbacks which will extend the list of callbacks in the Trainer.
Example::
def configure_callbacks(self):
early_stop = EarlyStopping(monitor="val_acc", mode="max")
checkpoint = ModelCheckpoint(monitor="val_loss")
return [early_stop, checkpoint]
Note:
Certain callback methods like :meth:`~pytorch_lightning.callbacks.base.Callback.on_init_start`
will never be invoked on the new callbacks returned here.
"""
return []
def configure_optimizers(self):
r"""
Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
Return:
Any of these 6 options.
- **Single optimizer**.
- **List or Tuple** of optimizers.
- **Two lists** - The first list has multiple optimizers, and the second has multiple LR schedulers
(or multiple ``lr_scheduler_config``).
- **Dictionary**, with an ``"optimizer"`` key, and (optionally) a ``"lr_scheduler"``
key whose value is a single LR scheduler or ``lr_scheduler_config``.
- **Tuple of dictionaries** as described above, with an optional ``"frequency"`` key.
- **None** - Fit will run without any optimizer.
The ``lr_scheduler_config`` is a dictionary which contains the scheduler and its associated configuration.
The default configuration is shown below.
.. code-block:: python
lr_scheduler_config = {
# REQUIRED: The scheduler instance
"scheduler": lr_scheduler,
# The unit of the scheduler's step size, could also be 'step'.
# 'epoch' updates the scheduler on epoch end whereas 'step'
# updates it after a optimizer update.
"interval": "epoch",
# How many epochs/steps should pass between calls to
# `scheduler.step()`. 1 corresponds to updating the learning
# rate after every epoch/step.
"frequency": 1,
# Metric to to monitor for schedulers like `ReduceLROnPlateau`
"monitor": "val_loss",
# If set to `True`, will enforce that the value specified 'monitor'
# is available when the scheduler is updated, thus stopping
# training if not found. If set to `False`, it will only produce a warning
"strict": True,
# If using the `LearningRateMonitor` callback to monitor the
# learning rate progress, this keyword can be used to specify
# a custom logged name
"name": None,
}
When there are schedulers in which the ``.step()`` method is conditioned on a value, such as the
:class:`torch.optim.lr_scheduler.ReduceLROnPlateau` scheduler, Lightning requires that the
``lr_scheduler_config`` contains the keyword ``"monitor"`` set to the metric name that the scheduler
should be conditioned on.
.. testcode::
# The ReduceLROnPlateau scheduler requires a monitor
def configure_optimizers(self):
optimizer = Adam(...)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": ReduceLROnPlateau(optimizer, ...),
"monitor": "metric_to_track",
"frequency": "indicates how often the metric is updated"
# If "monitor" references validation metrics, then "frequency" should be set to a
# multiple of "trainer.check_val_every_n_epoch".
},
}
# In the case of two optimizers, only one using the ReduceLROnPlateau scheduler
def configure_optimizers(self):
optimizer1 = Adam(...)
optimizer2 = SGD(...)
scheduler1 = ReduceLROnPlateau(optimizer1, ...)
scheduler2 = LambdaLR(optimizer2, ...)
return (
{
"optimizer": optimizer1,
"lr_scheduler": {
"scheduler": scheduler1,
"monitor": "metric_to_track",
},
},
{"optimizer": optimizer2, "lr_scheduler": scheduler2},
)
Metrics can be made available to monitor by simply logging it using
``self.log('metric_to_track', metric_val)`` in your :class:`~pytorch_lightning.core.lightning.LightningModule`.
Note:
The ``frequency`` value specified in a dict along with the ``optimizer`` key is an int corresponding
to the number of sequential batches optimized with the specific optimizer.
It should be given to none or to all of the optimizers.
There is a difference between passing multiple optimizers in a list,
and passing multiple optimizers in dictionaries with a frequency of 1:
- In the former case, all optimizers will operate on the given batch in each optimization step.
- In the latter, only one optimizer will operate on the given batch at every step.
This is different from the ``frequency`` value specified in the ``lr_scheduler_config`` mentioned above.
.. code-block:: python
def configure_optimizers(self):
optimizer_one = torch.optim.SGD(self.model.parameters(), lr=0.01)
optimizer_two = torch.optim.SGD(self.model.parameters(), lr=0.01)
return [
{"optimizer": optimizer_one, "frequency": 5},
{"optimizer": optimizer_two, "frequency": 10},
]
In this example, the first optimizer will be used for the first 5 steps,
the second optimizer for the next 10 steps and that cycle will continue.
If an LR scheduler is specified for an optimizer using the ``lr_scheduler`` key in the above dict,
the scheduler will only be updated when its optimizer is being used.
Examples::
# most cases. no learning rate scheduler
def configure_optimizers(self):
return Adam(self.parameters(), lr=1e-3)
# multiple optimizer case (e.g.: GAN)
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
return gen_opt, dis_opt
# example with learning rate schedulers
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
dis_sch = CosineAnnealing(dis_opt, T_max=10)
return [gen_opt, dis_opt], [dis_sch]
# example with step-based learning rate schedulers
# each optimizer has its own scheduler
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
gen_sch = {
'scheduler': ExponentialLR(gen_opt, 0.99),
'interval': 'step' # called after each training step
}
dis_sch = CosineAnnealing(dis_opt, T_max=10) # called every epoch
return [gen_opt, dis_opt], [gen_sch, dis_sch]
# example with optimizer frequencies
# see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1
# https://arxiv.org/abs/1704.00028
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
n_critic = 5
return (
{'optimizer': dis_opt, 'frequency': n_critic},
{'optimizer': gen_opt, 'frequency': 1}
)
Note:
Some things to know:
- Lightning calls ``.backward()`` and ``.step()`` on each optimizer and learning rate scheduler as needed.
- If you use 16-bit precision (``precision=16``), Lightning will automatically handle the optimizers.
- If you use multiple optimizers, :meth:`training_step` will have an additional ``optimizer_idx`` parameter.
- If you use :class:`torch.optim.LBFGS`, Lightning handles the closure function automatically for you.
- If you use multiple optimizers, gradients will be calculated only for the parameters of current optimizer
at each training step.
- If you need to control how often those optimizers step or override the default ``.step()`` schedule,
override the :meth:`optimizer_step` hook.
"""
rank_zero_warn("`configure_optimizers` must be implemented to be used with the Lightning Trainer")
def manual_backward(self, loss: Tensor, *args, **kwargs) -> None:
"""Call this directly from your :meth:`training_step` when doing optimizations manually. By using this,
Lightning can ensure that all the proper scaling gets applied when using mixed precision.
See :ref:`manual optimization<common/optimization:Manual optimization>` for more examples.
Example::
def training_step(...):
opt = self.optimizers()
loss = ...
opt.zero_grad()
# automatically applies scaling, etc...
self.manual_backward(loss)
opt.step()
Args:
loss: The tensor on which to compute gradients. Must have a graph attached.
*args: Additional positional arguments to be forwarded to :meth:`~torch.Tensor.backward`
**kwargs: Additional keyword arguments to be forwarded to :meth:`~torch.Tensor.backward`
"""
self._verify_is_manual_optimization("manual_backward")
self.trainer.strategy.backward(loss, None, None, *args, **kwargs)
def backward(
self, loss: Tensor, optimizer: Optional[Optimizer], optimizer_idx: Optional[int], *args, **kwargs
) -> None:
"""Called to perform backward on the loss returned in :meth:`training_step`. Override this hook with your
own implementation if you need to.
Args:
loss: The loss tensor returned by :meth:`training_step`. If gradient accumulation is used, the loss here
holds the normalized value (scaled by 1 / accumulation steps).
optimizer: Current optimizer being used. ``None`` if using manual optimization.
optimizer_idx: Index of the current optimizer being used. ``None`` if using manual optimization.
Example::
def backward(self, loss, optimizer, optimizer_idx):
loss.backward()
"""
loss.backward(*args, **kwargs)
def toggle_optimizer(self, optimizer: Union[Optimizer, LightningOptimizer], optimizer_idx: int) -> None:
"""Makes sure only the gradients of the current optimizer's parameters are calculated in the training step
to prevent dangling gradients in multiple-optimizer setup.
This is only called automatically when automatic optimization is enabled and multiple optimizers are used.
It works with :meth:`untoggle_optimizer` to make sure ``param_requires_grad_state`` is properly reset.
Args:
optimizer: The optimizer to toggle.
optimizer_idx: The index of the optimizer to toggle.
"""
# Iterate over all optimizer parameters to preserve their `requires_grad` information
# in case these are pre-defined during `configure_optimizers`
param_requires_grad_state = {}
for opt in self.trainer.optimizers:
for group in opt.param_groups:
for param in group["params"]:
# If a param already appear in param_requires_grad_state, continue
if param in param_requires_grad_state:
continue
param_requires_grad_state[param] = param.requires_grad
param.requires_grad = False
# Then iterate over the current optimizer's parameters and set its `requires_grad`
# properties accordingly
for group in optimizer.param_groups:
for param in group["params"]:
param.requires_grad = param_requires_grad_state[param]
self._param_requires_grad_state = param_requires_grad_state
def untoggle_optimizer(self, optimizer_idx: int) -> None:
"""Resets the state of required gradients that were toggled with :meth:`toggle_optimizer`.
This is only called automatically when automatic optimization is enabled and multiple optimizers are used.
Args:
optimizer_idx: The index of the optimizer to untoggle.
"""
for opt_idx, opt in enumerate(self.trainer.optimizers):
if optimizer_idx != opt_idx:
for group in opt.param_groups:
for param in group["params"]:
if param in self._param_requires_grad_state:
param.requires_grad = self._param_requires_grad_state[param]
# save memory
self._param_requires_grad_state = {}
def clip_gradients(
self,
optimizer: Optimizer,
gradient_clip_val: Optional[Union[int, float]] = None,
gradient_clip_algorithm: Optional[str] = None,
):
"""Handles gradient clipping internally.
Note:
Do not override this method. If you want to customize gradient clipping, consider
using :meth:`configure_gradient_clipping` method.
Args:
optimizer: Current optimizer being used.
gradient_clip_val: The value at which to clip gradients.
gradient_clip_algorithm: The gradient clipping algorithm to use. Pass ``gradient_clip_algorithm="value"``
to clip by value, and ``gradient_clip_algorithm="norm"`` to clip by norm.
"""
if gradient_clip_val is None:
gradient_clip_val = self.trainer.gradient_clip_val or 0.0
elif self.trainer.gradient_clip_val is not None and self.trainer.gradient_clip_val != gradient_clip_val:
raise MisconfigurationException(
f"You have set `Trainer(gradient_clip_val={self.trainer.gradient_clip_val!r})`"
f" and have passed `clip_gradients(gradient_clip_val={gradient_clip_val!r})`."
" Please use only one of them."
)
if gradient_clip_algorithm is None:
gradient_clip_algorithm = self.trainer.gradient_clip_algorithm or "norm"
else:
gradient_clip_algorithm = gradient_clip_algorithm.lower()
if (
self.trainer.gradient_clip_algorithm is not None
and self.trainer.gradient_clip_algorithm != gradient_clip_algorithm
):
raise MisconfigurationException(
f"You have set `Trainer(gradient_clip_algorithm={self.trainer.gradient_clip_algorithm.value!r})`"
f" and have passed `clip_gradients(gradient_clip_algorithm={gradient_clip_algorithm!r})"
" Please use only one of them."
)
if not isinstance(gradient_clip_val, (int, float)):
raise TypeError(f"`gradient_clip_val` should be an int or a float. Got {gradient_clip_val}.")
if not GradClipAlgorithmType.supported_type(gradient_clip_algorithm.lower()):
raise MisconfigurationException(
f"`gradient_clip_algorithm` {gradient_clip_algorithm} is invalid."
f" Allowed algorithms: {GradClipAlgorithmType.supported_types()}."
)
gradient_clip_algorithm = GradClipAlgorithmType(gradient_clip_algorithm)
self.trainer.precision_plugin.clip_gradients(optimizer, gradient_clip_val, gradient_clip_algorithm)
def configure_gradient_clipping(
self,
optimizer: Optimizer,
optimizer_idx: int,
gradient_clip_val: Optional[Union[int, float]] = None,
gradient_clip_algorithm: Optional[str] = None,
):
"""Perform gradient clipping for the optimizer parameters. Called before :meth:`optimizer_step`.
Args:
optimizer: Current optimizer being used.
optimizer_idx: Index of the current optimizer being used.
gradient_clip_val: The value at which to clip gradients. By default value passed in Trainer
will be available here.
gradient_clip_algorithm: The gradient clipping algorithm to use. By default value
passed in Trainer will be available here.
Example::
# Perform gradient clipping on gradients associated with discriminator (optimizer_idx=1) in GAN
def configure_gradient_clipping(self, optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm):
if optimizer_idx == 1:
# Lightning will handle the gradient clipping
self.clip_gradients(
optimizer,
gradient_clip_val=gradient_clip_val,
gradient_clip_algorithm=gradient_clip_algorithm
)
else:
# implement your own custom logic to clip gradients for generator (optimizer_idx=0)
"""
self.clip_gradients(
optimizer, gradient_clip_val=gradient_clip_val, gradient_clip_algorithm=gradient_clip_algorithm
)
def lr_scheduler_step(
self,
scheduler: LRSchedulerTypeUnion,
optimizer_idx: int,
metric: Optional[Any],
) -> None:
r"""
Override this method to adjust the default way the
:class:`~pytorch_lightning.trainer.trainer.Trainer` calls each scheduler.
By default, Lightning calls ``step()`` and as shown in the example
for each scheduler based on its ``interval``.
Args:
scheduler: Learning rate scheduler.
optimizer_idx: Index of the optimizer associated with this scheduler.
metric: Value of the monitor used for schedulers like ``ReduceLROnPlateau``.
Examples::
# DEFAULT
def lr_scheduler_step(self, scheduler, optimizer_idx, metric):
if metric is None:
scheduler.step()
else:
scheduler.step(metric)
# Alternative way to update schedulers if it requires an epoch value
def lr_scheduler_step(self, scheduler, optimizer_idx, metric):
scheduler.step(epoch=self.current_epoch)
"""
if metric is None:
scheduler.step()
else:
scheduler.step(metric)
def optimizer_step(
self,
epoch: int,
batch_idx: int,
optimizer: Union[Optimizer, LightningOptimizer],
optimizer_idx: int = 0,
optimizer_closure: Optional[Callable[[], Any]] = None,
on_tpu: bool = False,
using_native_amp: bool = False,
using_lbfgs: bool = False,
) -> None:
r"""
Override this method to adjust the default way the :class:`~pytorch_lightning.trainer.trainer.Trainer` calls
each optimizer.
By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example once per optimizer.
This method (and ``zero_grad()``) won't be called during the accumulation phase when
``Trainer(accumulate_grad_batches != 1)``. Overriding this hook has no benefit with manual optimization.
Args:
epoch: Current epoch
batch_idx: Index of current batch
optimizer: A PyTorch optimizer
optimizer_idx: If you used multiple optimizers, this indexes into that list.
optimizer_closure: The optimizer closure. This closure must be executed as it includes the
calls to ``training_step()``, ``optimizer.zero_grad()``, and ``backward()``.
on_tpu: ``True`` if TPU backward is required
using_native_amp: ``True`` if using native amp
using_lbfgs: True if the matching optimizer is :class:`torch.optim.LBFGS`
Examples::
# DEFAULT
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
optimizer.step(closure=optimizer_closure)
# Alternating schedule for optimizer steps (i.e.: GANs)
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# update generator opt every step
if optimizer_idx == 0:
optimizer.step(closure=optimizer_closure)
# update discriminator opt every 2 steps
if optimizer_idx == 1:
if (batch_idx + 1) % 2 == 0 :
optimizer.step(closure=optimizer_closure)
else:
# call the closure by itself to run `training_step` + `backward` without an optimizer step
optimizer_closure()
# ...
# add as many optimizers as you want
Here's another example showing how to use this for more advanced things such as
learning rate warm-up:
.. code-block:: python
# learning rate warm-up
def optimizer_step(
self,
epoch,
batch_idx,
optimizer,
optimizer_idx,
optimizer_closure,
on_tpu,
using_native_amp,
using_lbfgs,
):
# update params
optimizer.step(closure=optimizer_closure)
# manually warm up lr without a scheduler
if self.trainer.global_step < 500:
lr_scale = min(1.0, float(self.trainer.global_step + 1) / 500.0)
for pg in optimizer.param_groups:
pg["lr"] = lr_scale * self.learning_rate
"""
optimizer.step(closure=optimizer_closure)
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
"""Override this method to change the default behaviour of ``optimizer.zero_grad()``.
Args:
epoch: Current epoch
batch_idx: Index of current batch
optimizer: A PyTorch optimizer
optimizer_idx: If you used multiple optimizers this indexes into that list.
Examples::
# DEFAULT
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
optimizer.zero_grad()
# Set gradients to `None` instead of zero to improve performance.
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
optimizer.zero_grad(set_to_none=True)
See :meth:`torch.optim.Optimizer.zero_grad` for the explanation of the above example.
"""
optimizer.zero_grad()
def tbptt_split_batch(self, batch: Any, split_size: int) -> List[Any]:
r"""
When using truncated backpropagation through time, each batch must be split along the
time dimension. Lightning handles this by default, but for custom behavior override
this function.
Args:
batch: Current batch
split_size: The size of the split
Return:
List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated
back propagation through time. The default implementation splits root level Tensors and
Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length.
Examples::
def tbptt_split_batch(self, batch, split_size):
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t:t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t:t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
Note:
Called in the training loop after
:meth:`~pytorch_lightning.callbacks.base.Callback.on_train_batch_start`
if :paramref:`~pytorch_lightning.core.lightning.LightningModule.truncated_bptt_steps` > 0.
Each returned batch split is passed separately to :meth:`training_step`.
"""
time_dims = [len(x[0]) for x in batch if isinstance(x, (torch.Tensor, collections.Sequence))]
assert len(time_dims) >= 1, "Unable to determine batch time dimension"
assert all(x == time_dims[0] for x in time_dims), "Batch time dimension length is ambiguous"
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t : t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t : t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
def freeze(self) -> None:
r"""
Freeze all params for inference.
Example::
model = MyLightningModule(...)
model.freeze()
"""
for param in self.parameters():
param.requires_grad = False
self.eval()
def unfreeze(self) -> None:
"""Unfreeze all parameters for training.
.. code-block:: python
model = MyLightningModule(...)
model.unfreeze()
"""
for param in self.parameters():
param.requires_grad = True
self.train()
def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:
r"""
.. deprecated:: v1.5
This method was deprecated in v1.5 in favor of
`pytorch_lightning.callbacks.progress.base.get_metrics` and will be removed in v1.7.
Implement this to override the default items displayed in the progress bar.
By default it includes the average loss value, split index of BPTT (if used)
and the version of the experiment when using a logger.
.. code-block::
Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10]
Here is an example how to override the defaults:
.. code-block:: python
def get_progress_bar_dict(self):
# don't show the version number
items = super().get_progress_bar_dict()
items.pop("v_num", None)
return items
Return:
Dictionary with the items to be displayed in the progress bar.
"""
return progress_base.get_standard_metrics(self.trainer, self)
def _verify_is_manual_optimization(self, fn_name):
if self.automatic_optimization:
raise MisconfigurationException(
f"to use {fn_name}, please disable automatic optimization:"
" set model property `automatic_optimization` as False"
)
@classmethod
def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:
"""Collect all module arguments in the current constructor and all child constructors. The child
constructors are all the ``__init__`` methods that reach the current class through (chained)
``super().__init__()`` calls.
Args:
frame: instance frame
Returns:
self_arguments: arguments dictionary of the first instance
parents_arguments: arguments dictionary of the parent's instances
"""
if not frame:
frame = inspect.currentframe()
frame_args = collect_init_args(frame.f_back, [])
self_arguments = frame_args[-1]
# set hyper_parameters in child
self_arguments = self_arguments
parents_arguments = {}
# add all arguments from parents
for args in frame_args[:-1]:
parents_arguments.update(args)
return self_arguments, parents_arguments
@torch.no_grad()
def to_onnx(self, file_path: Union[str, Path], input_sample: Optional[Any] = None, **kwargs):
"""Saves the model in ONNX format.
Args:
file_path: The path of the file the onnx model should be saved to.
input_sample: An input for tracing. Default: None (Use self.example_input_array)
**kwargs: Will be passed to torch.onnx.export function.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
>>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile:
... model = SimpleModel()
... input_sample = torch.randn((1, 64))
... model.to_onnx(tmpfile.name, input_sample, export_params=True)
... os.path.isfile(tmpfile.name)
True
"""
mode = self.training
if input_sample is None:
if self.example_input_array is None:
raise ValueError(
"Could not export to ONNX since neither `input_sample` nor"
" `model.example_input_array` attribute is set."
)
input_sample = self.example_input_array
input_sample = self._apply_batch_transfer_handler(input_sample)
if not _TORCH_GREATER_EQUAL_1_10 and "example_outputs" not in kwargs:
self.eval()
if isinstance(input_sample, Tuple):
kwargs["example_outputs"] = self(*input_sample)
else:
kwargs["example_outputs"] = self(input_sample)
torch.onnx.export(self, input_sample, file_path, **kwargs)
self.train(mode)
@torch.no_grad()
def to_torchscript(
self,
file_path: Optional[Union[str, Path]] = None,
method: Optional[str] = "script",
example_inputs: Optional[Any] = None,
**kwargs,
) -> Union[ScriptModule, Dict[str, ScriptModule]]:
"""By default compiles the whole model to a :class:`~torch.jit.ScriptModule`. If you want to use tracing,
please provided the argument ``method='trace'`` and make sure that either the `example_inputs` argument is
provided, or the model has :attr:`example_input_array` set. If you would like to customize the modules that
are scripted you should override this method. In case you want to return multiple modules, we recommend
using a dictionary.
Args:
file_path: Path where to save the torchscript. Default: None (no file saved).
method: Whether to use TorchScript's script or trace method. Default: 'script'
example_inputs: An input to be used to do tracing when method is set to 'trace'.
Default: None (uses :attr:`example_input_array`)
**kwargs: Additional arguments that will be passed to the :func:`torch.jit.script` or
:func:`torch.jit.trace` function.
Note:
- Requires the implementation of the
:meth:`~pytorch_lightning.core.lightning.LightningModule.forward` method.
- The exported script will be set to evaluation mode.
- It is recommended that you install the latest supported version of PyTorch
to use this feature without limitations. See also the :mod:`torch.jit`
documentation for supported features.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
...
>>> model = SimpleModel()
>>> model.to_torchscript(file_path="model.pt") # doctest: +SKIP
>>> os.path.isfile("model.pt") # doctest: +SKIP
>>> torch.jit.save(model.to_torchscript(file_path="model_trace.pt", method='trace', # doctest: +SKIP
... example_inputs=torch.randn(1, 64))) # doctest: +SKIP
>>> os.path.isfile("model_trace.pt") # doctest: +SKIP
True
Return:
This LightningModule as a torchscript, regardless of whether `file_path` is
defined or not.
"""
mode = self.training
self._running_torchscript = True
if method == "script":
torchscript_module = torch.jit.script(self.eval(), **kwargs)
elif method == "trace":
# if no example inputs are provided, try to see if model has example_input_array set
if example_inputs is None:
if self.example_input_array is None:
raise ValueError(
"Choosing method=`trace` requires either `example_inputs`"
" or `model.example_input_array` to be defined."
)
example_inputs = self.example_input_array
# automatically send example inputs to the right device and use trace
example_inputs = self._apply_batch_transfer_handler(example_inputs)
torchscript_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)
else:
raise ValueError(f"The 'method' parameter only supports 'script' or 'trace', but value given was: {method}")
self.train(mode)
if file_path is not None:
fs = get_filesystem(file_path)
with fs.open(file_path, "wb") as f:
torch.jit.save(torchscript_module, f)
self._running_torchscript = False
return torchscript_module
@property
def use_amp(self) -> bool:
r"""
.. deprecated:: v1.6.
This property was deprecated in v1.6 and will be removed in v1.8.
"""
if not self._running_torchscript: # remove with the deprecation removal
rank_zero_deprecation(
"`LightningModule.use_amp` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.amp_backend`.",
stacklevel=5,
)
return self._use_amp
@use_amp.setter
def use_amp(self, use_amp: bool) -> None:
r"""
.. deprecated:: v1.6.
This property was deprecated in v1.6 and will be removed in v1.8.
"""
if not self._running_torchscript: # remove with the deprecation removal
rank_zero_deprecation(
"`LightningModule.use_amp` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.amp_backend`.",
stacklevel=5,
)
self._use_amp = use_amp
def add_to_queue(self, queue: pl.strategies.launchers.spawn._FakeQueue) -> None:
"""Appends the :attr:`trainer.callback_metrics` dictionary to the given queue. To avoid issues with memory
sharing, we cast the data to numpy.
Args:
queue: the instance of the queue to append the data.
.. deprecated:: v1.5
This method was deprecated in v1.5 and will be removed in v1.7.
"""
def get_from_queue(self, queue: pl.strategies.launchers.spawn._FakeQueue) -> None:
"""Retrieve the :attr:`trainer.callback_metrics` dictionary from the given queue. To preserve consistency,
we cast back the data to ``torch.Tensor``.
Args:
queue: the instance of the queue from where to get the data.
.. deprecated:: v1.5
This method was deprecated in v1.5 and will be removed in v1.7.
"""
@contextmanager
def _prevent_trainer_and_dataloaders_deepcopy(self) -> None:
self._should_prevent_trainer_and_dataloaders_deepcopy = True
yield
self._should_prevent_trainer_and_dataloaders_deepcopy = False
def __getstate__(self) -> Dict[str, Any]:
state = dict(self.__dict__)
if self._should_prevent_trainer_and_dataloaders_deepcopy:
state["trainer"] = None
state.pop("train_dataloader", None)
state.pop("val_dataloader", None)
state.pop("test_dataloader", None)
state.pop("predict_dataloader", None)
return state
def _register_sharded_tensor_state_dict_hooks_if_available(self) -> None:
"""Adds ShardedTensor state dict hooks if ShardedTensors are supported.
These hooks ensure that ShardedTensors are included when saving, and are loaded the LightningModule correctly.
"""
if not _TORCH_GREATER_EQUAL_1_10 or _IS_WINDOWS or not torch.distributed.is_available():
rank_zero_debug("Could not register sharded tensor state dict hooks")
return
from torch.distributed._sharded_tensor import pre_load_state_dict_hook, state_dict_hook
self._register_state_dict_hook(state_dict_hook)
self._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
| [
"torch.distributed.is_available",
"torch._C._log_api_usage_once",
"torch.no_grad",
"torch.jit.save",
"torch.tensor",
"torch.onnx.export"
] | 1.8 | adamreeve/pytorch-lightning | 908e05880d4271ad32876311320d4465a008a710 |
1.10 | # -*- coding: utf-8 -*-
from abc import ABC, abstractmethod
from sentence_transformers import SentenceTransformer, models
from torch import nn
import numpy as np
from wasabi import msg
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import OneHotEncoder
from transformers import logging
logging.set_verbosity_error()
def get_fitted_model_by_config_string(config_string, records):
if config_string == "identity":
return IdentityEmbedder(records)
elif config_string == "boc":
return BoCEmbedder(records)
elif config_string == "bow":
return BoWEmbedder(records)
elif config_string == "onehot":
return OneHotEmbedder(records)
else:
try:
return DocumentEmbedder(records, config_string)
except:
msg.fail(
f"Embedding '{config_string}' is unknown. Please check https://onetask.readme.io/ for more information"
)
class Embedder(ABC):
def __init__(self, records):
self.fit(records)
@abstractmethod
def encode(self, document):
pass
@abstractmethod
def fit(self, records):
pass
class IdentityEmbedder(Embedder):
def __init__(self, records):
super().__init__(records)
def fit(self, records):
pass
def encode(self, document):
return np.array([document])
class BoCEmbedder(Embedder):
def __init__(self, records):
self.model = CountVectorizer(analyzer="char")
super().__init__(records)
def fit(self, records):
self.model.fit(records)
def encode(self, document):
return self.model.transform([document]).toarray()[0]
class BoWEmbedder(Embedder):
def __init__(self, records):
self.model = CountVectorizer(min_df=0.1)
super().__init__(records)
def fit(self, records):
self.model.fit(records)
def encode(self, document):
return self.model.transform([document]).toarray()[0]
class OneHotEmbedder(Embedder):
def __init__(self, records):
self.model = OneHotEncoder()
super().__init__(records)
def fit(self, records):
self.model.fit(records.reshape(-1, 1))
def encode(self, document):
return self.model.transform([[document]]).toarray()[0]
class DocumentEmbedder(Embedder):
def __init__(self, records, configuration_string: str = "distilbert-base-uncased"):
word_embedding_model = models.Transformer(configuration_string)
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension()
)
dense_model = models.Dense(
in_features=pooling_model.get_sentence_embedding_dimension(),
out_features=256,
activation_function=nn.Tanh(),
)
self.model = SentenceTransformer(
modules=[word_embedding_model, pooling_model, dense_model]
)
super().__init__(records)
def fit(self, records):
pass
def encode(self, document: str):
return self.model.encode(document)
| [
"torch.nn.Tanh"
] | 1.10.0 | onetask-ai/onetask-python | ea810a3092a029d5b30f6af9e9a5f17567e0b901 |
1.8 | import torch
from typing import Dict, Tuple, Any
from torch.distributions.categorical import Categorical
from malib.algorithm.common.loss_func import LossFunc
from malib.backend.datapool.offline_dataset_server import Episode
from malib.utils.typing import TrainingMetric
def cal_entropy(logits):
max_value, _ = torch.max(logits, dim=-1, keepdim=True)
a0 = logits - max_value
ea0 = torch.exp(a0)
z0 = torch.sum(ea0, dim=-1, keepdim=True)
p0 = ea0 / z0
return torch.sum(p0 * (torch.log(z0) - a0), dim=-1)
class PPOLoss(LossFunc):
def __init__(self):
super().__init__()
self._params.update(
{
"actor_lr": 1e-2,
"critic_lr": 1e-2,
"cliprange": 0.2,
"entropy_coef": 1e-3,
"value_coef": 1e-2,
}
)
def setup_optimizers(self, *args, **kwargs):
if self.optimizers is None:
optim_cls = getattr(torch.optim, self._params.get("optimizer", "Adam"))
self.optimizers = []
self.optimizers.append(
optim_cls(self.policy.actor.parameters(), lr=self._params["actor_lr"])
)
self.optimizers.append(
optim_cls(self.policy.critic.parameters(), lr=self._params["critic_lr"])
)
else:
for p in self.optimizers:
p.param_groups = []
self.optimizers[0].add_param_group(
{"params": self.policy.actor.parameters()}
)
self.optimizers[1].add_param_group(
{"params": self.policy.critic.parameters()}
)
def step(self) -> Any:
"""Step optimizers and update target"""
# do loss backward and target update
_ = [item.backward() for item in self.loss]
self.push_gradients(
{
"actor": {
name: -self._params["actor_lr"] * param.grad.numpy()
for name, param in self.policy.actor.named_parameters()
},
"critic": {
name: -self._params["critic_lr"] * param.grad.numpy()
for name, param in self.policy.critic.named_parameters()
},
}
)
_ = [p.step() for p in self.optimizers]
def __call__(self, batch) -> Dict[str, Any]:
# empty loss
self.loss = []
# total loss = policy_gradient_loss - entropy * entropy_coefficient + value_coefficient * value_loss
rewards = torch.from_numpy(batch[Episode.REWARDS].copy())
actions = torch.from_numpy(batch[Episode.ACTIONS].copy())
cliprange = self._params["cliprange"]
ent_coef = self._params["entropy_coef"]
vf_coef = self._params["value_coef"]
old_probs = self.policy.target_actor(batch[Episode.CUR_OBS].copy())
old_neglogpac = -Categorical(probs=old_probs).log_prob(actions)
old_vpred = self.policy.target_value_function(
batch[Episode.CUR_OBS].copy()
).detach()
# torch.from_numpy(batch[Episode.STATE_VALUE].copy())
probs = self.policy.actor(batch[Episode.CUR_OBS].copy())
distri = Categorical(probs=probs)
neglogpac = -distri.log_prob(actions)
ratio = torch.exp(old_neglogpac.detach() - neglogpac)
entropy = torch.mean(cal_entropy(distri.logits))
adv = self.policy.compute_advantage(batch).detach()
pg_loss = -adv * ratio
pg_loss2 = -adv * torch.clip(ratio, 1.0 - cliprange, 1.0 + cliprange)
pg_loss = torch.mean(torch.maximum(pg_loss, pg_loss2))
approx_kl = 0.5 * torch.mean(torch.square(neglogpac - old_neglogpac))
clip_frac = torch.mean(torch.greater(torch.abs(ratio - 1.0), cliprange).float())
vpred = self.policy.value_function(batch[Episode.CUR_OBS].copy())
vpred_clipped = old_vpred + torch.clip(vpred - old_vpred, -cliprange, cliprange)
next_value = self.policy.target_value_function(batch[Episode.NEXT_OBS].copy())
td_value = (
rewards
+ self.policy.custom_config["gamma"]
* (1.0 - torch.from_numpy(batch[Episode.DONES].copy()).float())
* next_value
)
vf_loss1 = torch.square(vpred - td_value)
vf_loss2 = torch.square(vpred_clipped - td_value)
vf_loss = 0.5 * torch.mean(torch.maximum(vf_loss1, vf_loss2))
# total loss
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
loss_names = [
"policy_loss",
"value_loss",
"policy_entropy",
"approxkl",
"clipfrac",
]
self.loss.append(loss)
stats_list = [
pg_loss.detach().numpy(),
vf_loss.detach().numpy(),
entropy.detach().numpy(),
approx_kl.detach().numpy(),
clip_frac.detach().numpy(),
]
return {
TrainingMetric.LOSS: loss.detach().numpy(),
**dict(zip(loss_names, stats_list)),
}
| [
"torch.max",
"torch.square",
"torch.maximum",
"torch.clip",
"torch.distributions.categorical.Categorical",
"torch.abs",
"torch.log",
"torch.exp",
"torch.sum"
] | 1.8.1 | ReinholdM/play_football_with_human | 9ac2f0a8783aede56f4ac1f6074db7daa41b6b6c |
1.0 | """ Class for the Sequence to sequence model for ATIS."""
import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
def get_token_indices(token, index_to_token):
""" Maps from a gold token (string) to a list of indices.
Inputs:
token (string): String to look up.
index_to_token (list of tokens): Ordered list of tokens.
Returns:
list of int, representing the indices of the token in the probability
distribution.
"""
if token in index_to_token:
if len(set(index_to_token)) == len(index_to_token): # no duplicates
return [index_to_token.index(token)]
else:
indices = []
for index, other_token in enumerate(index_to_token):
if token == other_token:
indices.append(index)
assert len(indices) == len(set(indices))
return indices
else:
return [index_to_token.index(UNK_TOK)]
def flatten_utterances(utterances):
""" Gets a flat sequence from a sequence of utterances.
Inputs:
utterances (list of list of str): Utterances to concatenate.
Returns:
list of str, representing the flattened sequence with separating
delimiter tokens.
"""
sequence = []
for i, utterance in enumerate(utterances):
sequence.extend(utterance)
if i < len(utterances) - 1:
sequence.append(DEL_TOK)
return sequence
def encode_snippets_with_states(snippets, states):
""" Encodes snippets by using previous query states instead.
Inputs:
snippets (list of Snippet): Input snippets.
states (list of dy.Expression): Previous hidden states to use.
TODO: should this by dy.Expression or vector values?
"""
for snippet in snippets:
snippet.set_embedding(torch.cat([states[snippet.startpos],states[snippet.endpos]], dim=0))
return snippets
def load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):
print(output_vocabulary.inorder_tokens)
print()
def read_glove_embedding(embedding_filename, embedding_size):
glove_embeddings = {}
with open(embedding_filename) as f:
cnt = 1
for line in f:
cnt += 1
if params.debug or not params.train:
if cnt == 1000:
print('Read 1000 word embeddings')
break
l_split = line.split()
word = " ".join(l_split[0:len(l_split) - embedding_size])
embedding = np.array([float(val) for val in l_split[-embedding_size:]])
glove_embeddings[word] = embedding
return glove_embeddings
print('Loading Glove Embedding from', params.embedding_filename)
glove_embedding_size = 300
glove_embeddings = read_glove_embedding(params.embedding_filename, glove_embedding_size)
print('Done')
input_embedding_size = glove_embedding_size
def create_word_embeddings(vocab):
vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)
vocabulary_tokens = vocab.inorder_tokens
glove_oov = 0
para_oov = 0
for token in vocabulary_tokens:
token_id = vocab.token_to_id(token)
if token in glove_embeddings:
vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]
else:
glove_oov += 1
print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab))
return vocabulary_embeddings
input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)
output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)
output_vocabulary_schema_embeddings = None
if output_vocabulary_schema:
output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)
return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size
class ATISModel(torch.nn.Module):
""" Sequence-to-sequence model for predicting a SQL query given an utterance
and an interaction prefix.
"""
def __init__(
self,
params,
input_vocabulary,
output_vocabulary,
output_vocabulary_schema,
anonymizer):
super().__init__()
self.params = params
if params.use_bert:
self.model_bert, self.tokenizer, self.bert_config = utils_bert.get_bert(params)
if 'atis' not in params.data_directory:
if params.use_bert:
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
else:
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
params.input_embedding_size = input_embedding_size
self.params.input_embedding_size = input_embedding_size
# Create the input embeddings
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
initializer=input_vocabulary_embeddings,
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=params.freeze)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = Embedder(params.input_embedding_size,
name="schema-embedding",
initializer=output_vocabulary_schema_embeddings,
vocabulary=output_vocabulary_schema,
anonymizer=anonymizer,
freeze=params.freeze)
else:
# Create the input embeddings
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=False)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
# Create the encoder
encoder_input_size = params.input_embedding_size
encoder_output_size = params.encoder_state_size
if params.use_bert:
encoder_input_size = self.bert_config.hidden_size
if params.discourse_level_lstm:
encoder_input_size += params.encoder_state_size / 2
self.utterance_encoder = Encoder(params.encoder_num_layers, encoder_input_size, encoder_output_size)
# Positional embedder for utterances
attention_key_size = params.encoder_state_size
self.schema_attention_key_size = attention_key_size
if params.state_positional_embeddings:
attention_key_size += params.positional_embedding_size
self.positional_embedder = Embedder(
params.positional_embedding_size,
name="positional-embedding",
num_tokens=params.maximum_utterances)
self.utterance_attention_key_size = attention_key_size
# Create the discourse-level LSTM parameters
if params.discourse_level_lstm:
self.discourse_lstms = torch_utils.create_multilayer_lstm_params(1, params.encoder_state_size, params.encoder_state_size / 2, "LSTM-t")
self.initial_discourse_state = torch_utils.add_params(tuple([params.encoder_state_size / 2]), "V-turn-state-0")
# Snippet encoder
final_snippet_size = 0
if params.use_snippets and not params.previous_decoder_snippet_encoding:
snippet_encoding_size = int(params.encoder_state_size / 2)
final_snippet_size = params.encoder_state_size
if params.snippet_age_embedding:
snippet_encoding_size -= int(
params.snippet_age_embedding_size / 4)
self.snippet_age_embedder = Embedder(
params.snippet_age_embedding_size,
name="snippet-age-embedding",
num_tokens=params.max_snippet_age_embedding)
final_snippet_size = params.encoder_state_size + params.snippet_age_embedding_size / 2
self.snippet_encoder = Encoder(params.snippet_num_layers,
params.output_embedding_size,
snippet_encoding_size)
# Previous query Encoder
if params.use_previous_query:
self.query_encoder = Encoder(params.encoder_num_layers, params.output_embedding_size, params.encoder_state_size)
self.final_snippet_size = final_snippet_size
self.dropout = 0.
def _encode_snippets(self, previous_query, snippets, input_schema):
""" Computes a single vector representation for each snippet.
Inputs:
previous_query (list of str): Previous query in the interaction.
snippets (list of Snippet): Snippets extracted from the previous
Returns:
list of Snippets, where the embedding is set to a vector.
"""
startpoints = [snippet.startpos for snippet in snippets]
endpoints = [snippet.endpos for snippet in snippets]
assert len(startpoints) == 0 or min(startpoints) >= 0
if input_schema:
assert len(endpoints) == 0 or max(endpoints) <= len(previous_query)
else:
assert len(endpoints) == 0 or max(endpoints) < len(previous_query)
snippet_embedder = lambda query_token: self.get_query_token_embedding(query_token, input_schema)
if previous_query and snippets:
_, previous_outputs = self.snippet_encoder(
previous_query, snippet_embedder, dropout_amount=self.dropout)
assert len(previous_outputs) == len(previous_query)
for snippet in snippets:
if input_schema:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos-1]], dim=0)
else:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos]], dim=0)
if self.params.snippet_age_embedding:
embedding = torch.cat([embedding, self.snippet_age_embedder(min(snippet.age, self.params.max_snippet_age_embedding - 1))], dim=0)
snippet.set_embedding(embedding)
return snippets
def _initialize_discourse_states(self):
discourse_state = self.initial_discourse_state
discourse_lstm_states = []
for lstm in self.discourse_lstms:
hidden_size = lstm.weight_hh.size()[1]
if lstm.weight_hh.is_cuda:
h_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
c_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
else:
h_0 = torch.zeros(1,hidden_size)
c_0 = torch.zeros(1,hidden_size)
discourse_lstm_states.append((h_0, c_0))
return discourse_state, discourse_lstm_states
def _add_positional_embeddings(self, hidden_states, utterances, group=False):
grouped_states = []
start_index = 0
for utterance in utterances:
grouped_states.append(hidden_states[start_index:start_index + len(utterance)])
start_index += len(utterance)
assert len(hidden_states) == sum([len(seq) for seq in grouped_states]) == sum([len(utterance) for utterance in utterances])
new_states = []
flat_sequence = []
num_utterances_to_keep = min(self.params.maximum_utterances, len(utterances))
for i, (states, utterance) in enumerate(zip(
grouped_states[-num_utterances_to_keep:], utterances[-num_utterances_to_keep:])):
positional_sequence = []
index = num_utterances_to_keep - i - 1
for state in states:
positional_sequence.append(torch.cat([state, self.positional_embedder(index)], dim=0))
assert len(positional_sequence) == len(utterance), \
"Expected utterance and state sequence length to be the same, " \
+ "but they were " + str(len(utterance)) \
+ " and " + str(len(positional_sequence))
if group:
new_states.append(positional_sequence)
else:
new_states.extend(positional_sequence)
flat_sequence.extend(utterance)
return new_states, flat_sequence
def build_optim(self):
params_trainer = []
params_bert_trainer = []
for name, param in self.named_parameters():
if param.requires_grad:
if 'model_bert' in name:
params_bert_trainer.append(param)
else:
params_trainer.append(param)
self.trainer = torch.optim.Adam(params_trainer, lr=self.params.initial_learning_rate)
if self.params.fine_tune_bert:
self.bert_trainer = torch.optim.Adam(params_bert_trainer, lr=self.params.lr_bert)
def set_dropout(self, value):
""" Sets the dropout to a specified value.
Inputs:
value (float): Value to set dropout to.
"""
self.dropout = value
def set_learning_rate(self, value):
""" Sets the learning rate for the trainer.
Inputs:
value (float): The new learning rate.
"""
for param_group in self.trainer.param_groups:
param_group['lr'] = value
def save(self, filename):
""" Saves the model to the specified filename.
Inputs:
filename (str): The filename to save to.
"""
torch.save(self.state_dict(), filename)
def load(self, filename):
""" Loads saved parameters into the parameter collection.
Inputs:
filename (str): Name of file containing parameters.
"""
self.load_state_dict(torch.load(filename,map_location=torch.device('cpu')))
# self.load_state_dict(torch.load(filename))
print("Loaded model from file " + filename)
| [
"torch.zeros",
"torch.device",
"torch.cat",
"torch.optim.Adam",
"torch.cuda.FloatTensor"
] | 1.0.1 | ManishBachhu/editsql | c046dfbee72d3b54ebc7a2326b1eb7797b23d10e |
1.0 | from collections import OrderedDict
import torch
import torch.nn as nn
from torch.utils import model_zoo
from torchvision import models
from .fcn import convolutionalize
def vgg16(is_caffe=True):
"""
Load the VGG-16 net for use as a fully convolutional backbone.
- cast to fully convolutional by converting `Linear` modules
- name the same way as the original paper (for style and sanity)
- load original Caffe weights (if requested)
- decapitate last classifier layer
- switch to ceiling mode for pooling like in Caffe
Take
is_caffe: flag for whether to load Caffe weights (default) or not
"""
vgg16 = models.vgg16(pretrained=True)
# cast into fully convolutional form (as list of layers)
vgg16 = convolutionalize(list(vgg16.features) + list(vgg16.classifier),
(3, 224, 224))
# name layers like the original paper
names = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5',
'fc6', 'relu6', 'drop6', 'fc7', 'relu7', 'drop7', 'fc8']
vgg16 = nn.Sequential(OrderedDict(zip(names, vgg16)))
if is_caffe:
# substitute original Caffe weights for improved fine-tuning accuracy
# see https://github.com/jcjohnson/pytorch-vgg
caffe_params = model_zoo.load_url('https://s3-us-west-2.amazonaws.com/'
'jcjohns-models/vgg16-00b39a1b.pth')
for new_p, old_p in zip(vgg16.parameters(), caffe_params.values()):
new_p.data.copy_(old_p.view_as(new_p))
# surgery: decapitate final classifier
del vgg16.fc8
# surgery: keep fuller spatial dims by including incomplete pooling regions
for m in vgg16.modules():
if isinstance(m, nn.MaxPool2d):
m.ceil_mode = True
return vgg16
| [
"torch.utils.model_zoo.load_url"
] | 1.0.0 | Global19/revolver | 74fc12afff8a8747224d9e7098fe97542f81cea0 |
1.7 | from typing import List, Union
import torch
class ConfigurationError(Exception):
pass
class FeedForward(torch.nn.Module):
"""
This `Module` is a feed-forward neural network, just a sequence of `Linear` layers with
activation functions in between.
# Parameters
input_dim : `int`, required
The dimensionality of the input. We assume the input has shape `(batch_size, input_dim)`.
num_layers : `int`, required
The number of `Linear` layers to apply to the input.
hidden_dims : `Union[int, List[int]]`, required
The output dimension of each of the `Linear` layers. If this is a single `int`, we use
it for all `Linear` layers. If it is a `List[int]`, `len(hidden_dims)` must be
`num_layers`.
activations : `Union[Activation, List[Activation]]`, required
The activation function to use after each `Linear` layer. If this is a single function,
we use it after all `Linear` layers. If it is a `List[Activation]`,
`len(activations)` must be `num_layers`. Activation must have torch.nn.Module type.
dropout : `Union[float, List[float]]`, optional (default = `0.0`)
If given, we will apply this amount of dropout after each layer. Semantics of `float`
versus `List[float]` is the same as with other parameters.
# Examples
```python
FeedForward(124, 2, [64, 32], torch.nn.ReLU(), 0.2)
#> FeedForward(
#> (_activations): ModuleList(
#> (0): ReLU()
#> (1): ReLU()
#> )
#> (_linear_layers): ModuleList(
#> (0): Linear(in_features=124, out_features=64, bias=True)
#> (1): Linear(in_features=64, out_features=32, bias=True)
#> )
#> (_dropout): ModuleList(
#> (0): Dropout(p=0.2, inplace=False)
#> (1): Dropout(p=0.2, inplace=False)
#> )
#> )
```
"""
def __init__(
self,
input_dim,
num_layers,
hidden_dims,
activations,
dropout) -> None:
super().__init__()
if not isinstance(hidden_dims, list):
hidden_dims = [hidden_dims] * num_layers # type: ignore
if not isinstance(activations, list):
activations = [activations] * num_layers # type: ignore
if not isinstance(dropout, list):
dropout = [dropout] * num_layers # type: ignore
if len(hidden_dims) != num_layers:
raise ConfigurationError(
"len(hidden_dims) (%d) != num_layers (%d)" % (len(hidden_dims), num_layers)
)
if len(activations) != num_layers:
raise ConfigurationError(
"len(activations) (%d) != num_layers (%d)" % (len(activations), num_layers)
)
if len(dropout) != num_layers:
raise ConfigurationError(
"len(dropout) (%d) != num_layers (%d)" % (len(dropout), num_layers)
)
self._activations = torch.nn.ModuleList(activations)
input_dims = [input_dim] + hidden_dims[:-1]
linear_layers = []
for layer_input_dim, layer_output_dim in zip(input_dims, hidden_dims):
linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim))
self._linear_layers = torch.nn.ModuleList(linear_layers)
dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]
self._dropout = torch.nn.ModuleList(dropout_layers)
self._output_dim = hidden_dims[-1]
self.input_dim = input_dim
def get_output_dim(self):
return self._output_dim
def get_input_dim(self):
return self.input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
output = inputs
for layer, activation, dropout in zip(
self._linear_layers, self._activations, self._dropout
):
output = dropout(activation(layer(output)))
return output
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.ModuleList"
] | 1.7.1 | dwadden/longchecker | 9efdc2f13130146dfea187c76037e69008ad62d5 |
1.8 | import math
import torch
import torch.nn as nn
from colossalai.global_variables import moe_env
from colossalai.context import ParallelMode, seed
from colossalai.utils import get_current_device
class MoeExperts(nn.Module):
def __init__(self, comm: str):
super().__init__()
assert comm in {"all_to_all", "all_gather"}, \
"This kind of communication has not been implemented yet.\n Please use Experts build function."
self.comm = comm
class Experts(MoeExperts):
"""A wrapper class to create experts. It will create E experts across the
moe model parallel group, where E is the number of experts. Every expert
is a instence of the class, 'expert' in initialization parameters.
:param expert: The class of all experts
:param num_experts: The number of experts
:param expert_args: Args used to initialize experts
:type num_experts: int
"""
def __init__(self, expert, num_experts, **expert_args):
super().__init__("all_to_all")
assert num_experts % moe_env.model_parallel_size == 0, \
"The number of experts should be divied by moe model size"
num_local_experts = num_experts // moe_env.model_parallel_size
with seed(ParallelMode.MOE_MODEL):
self.experts = nn.ModuleList([expert(**expert_args) for _ in range(num_local_experts)])
for exp in self.experts:
for param in exp.parameters():
param.__setattr__('moe_param', True)
self.num_local_experts = num_local_experts
def forward(self, inputs):
expert_input = torch.chunk(inputs, self.num_local_experts, dim=1)
expert_output = []
for i in range(self.num_local_experts):
expert_output.append(self.experts[i](expert_input[i]))
output = torch.cat(expert_output, dim=1).contiguous()
return output
class FFNExperts(MoeExperts):
def __init__(self, num_experts: int, d_model: int, d_ff: int, activation=None, drop_rate: float = 0):
super().__init__("all_to_all")
assert num_experts % moe_env.model_parallel_size == 0, \
"The number of experts should be divied by moe model size"
num_local_experts = num_experts // moe_env.model_parallel_size
self.w1 = nn.Parameter(torch.empty(num_local_experts, d_model, d_ff, device=get_current_device()))
self.b1 = nn.Parameter(torch.empty(num_local_experts, 1, d_ff, device=get_current_device()))
self.w2 = nn.Parameter(torch.empty(num_local_experts, d_ff, d_model, device=get_current_device()))
self.b2 = nn.Parameter(torch.empty(num_local_experts, 1, d_model, device=get_current_device()))
s1 = math.sqrt(0.1 / d_model)
s2 = math.sqrt(0.1 / d_ff)
with seed(ParallelMode.MOE_MODEL):
nn.init.trunc_normal_(self.w1, std=s1)
nn.init.trunc_normal_(self.b1, std=s1)
nn.init.trunc_normal_(self.w2, std=s2)
nn.init.trunc_normal_(self.b2, std=s2)
self.act = nn.GELU() if activation is None else activation
self.drop = nn.Dropout(p=drop_rate)
for param in self.parameters():
param.__setattr__('moe_param', True)
def forward(self, inputs): # inputs [g, el, c, h]
el = inputs.size(1)
h = inputs.size(-1)
inputs = inputs.transpose(0, 1)
inshape = inputs.shape
inputs = inputs.reshape(el, -1, h)
out_ff = torch.baddbmm(self.b1, inputs, self.w1)
out_act = self.act(out_ff)
with seed(ParallelMode.TENSOR):
inter = self.drop(out_act)
out_model = torch.baddbmm(self.b2, inter, self.w2)
with seed(ParallelMode.TENSOR):
outputs = self.drop(out_model) # outputs [el, gc, h]
outputs = outputs.reshape(inshape)
outputs = outputs.transpose(0, 1).contiguous()
return outputs
class TPExperts(MoeExperts):
def __init__(self, num_experts: int, d_model: int, d_ff: int, activation=None, drop_rate: float = 0):
super().__init__("all_gather")
assert d_ff % moe_env.model_parallel_size == 0, \
"d_ff should be divied by moe model size"
p_ff = d_ff // moe_env.model_parallel_size
self.w1 = nn.Parameter(torch.empty(num_experts, d_model, p_ff, device=get_current_device()))
self.b1 = nn.Parameter(torch.empty(num_experts, 1, p_ff, device=get_current_device()))
self.w2 = nn.Parameter(torch.empty(num_experts, p_ff, d_model, device=get_current_device()))
self.b2 = nn.Parameter(torch.empty(num_experts, 1, d_model, device=get_current_device()))
s1 = math.sqrt(0.1 / d_model)
s2 = math.sqrt(0.1 / d_ff)
with seed(ParallelMode.MOE_MODEL):
nn.init.trunc_normal_(self.w1, std=s1)
nn.init.trunc_normal_(self.b1, std=s1)
nn.init.trunc_normal_(self.w2, std=s2)
nn.init.trunc_normal_(self.b2, std=s2)
self.act = nn.GELU() if activation is None else activation
self.drop = nn.Dropout(p=drop_rate)
self.w1.__setattr__('moe_param', True)
self.w2.__setattr__('moe_param', True)
self.b1.__setattr__('moe_param', True)
def forward(self, inputs): # inputs [g, e, c, h]
e = inputs.size(1)
h = inputs.size(-1)
inputs = inputs.transpose(0, 1)
inshape = inputs.shape
inputs = inputs.reshape(e, -1, h)
out_ff = torch.baddbmm(self.b1, inputs, self.w1)
out_act = self.act(out_ff)
with seed(ParallelMode.TENSOR):
inter = self.drop(out_act)
out_model = torch.baddbmm(self.b2, inter, self.w2)
outputs = self.drop(out_model) # outputs [e, gc, h]
outputs = outputs.reshape(inshape)
outputs = outputs.transpose(0, 1).contiguous()
return outputs # outputs [g, e, c, h]
| [
"torch.nn.Dropout",
"torch.cat",
"torch.nn.init.trunc_normal_",
"torch.baddbmm",
"torch.nn.GELU",
"torch.chunk"
] | 1.8 | mrriteshranjan/ColossalAI | 0d057a1bae67b915a385be7edab7da83413cb645 |
1.9 | import kornia.augmentation as K
import yaml
import torch
from .Utils import resample, clamp_with_grad
from torch import nn
CONFIG_DIRECTORY = 'Config'
AUGMENT_CONFIG = 'augment_config.yaml'
DEFAULT_AUGMENTS = [['Af', 'Pe', 'Ji', 'Er']]
with open(f"{CONFIG_DIRECTORY}/{AUGMENT_CONFIG}", "r") as f:
afg = yaml.load(f, Loader=yaml.FullLoader)
def get_arg_label():
label = ''
if not afg['no_augments'] and afg['augments'] is not None:
label += 'aug'
for aug in afg['augments']:
label += '.' + aug
if afg['sharpness']['use'] and afg['sharpness']['arg'] not in afg['augments']:
label += afg['sharpness']['arg']
if afg['jitter']['use'] and afg['jitter']['arg'] not in afg['augments']:
label += afg['jitter']['arg']
if afg['erasing']['use'] and afg['erasing']['arg'] not in afg['augments']:
label += afg['erasing']['arg']
if afg['gaussian_noise']['use'] and afg['gaussian_noise']['arg'] not in afg['augments']:
label += afg['gaussian_noise']['arg']
if afg['gaussian_blur']['use'] and afg['gaussian_blur']['arg'] not in afg['augments']:
label += afg['gaussian_blur']['arg']
label += '_'
return label
augments = afg['augments'] if afg['augments'] else []
if not augments and not afg['no_augments']:
augments = DEFAULT_AUGMENTS
def get_color_jitter():
jfg = afg['jitter']
return K.ColorJitter(
brightness=jfg['brightness'],
contrast=jfg['contrast'],
saturation=jfg['saturation'],
hue=jfg['hue'],
p=jfg['p'])
def get_sharpness():
return K.RandomSharpness(
sharpness=afg['sharpness']['sharpness'],
p=afg['sharpness']['p'])
def get_gaussian_noise():
return K.RandomGaussianNoise(
mean=afg['gaussian_noise']['mean'],
std=afg['gaussian_noise']['std'],
p=afg['gaussian_noise']['p'])
def get_motion_blur():
mblr = afg['motion_blur']
return K.RandomMotionBlur(
kernel_size=mblr['kernel_size'],
angle=mblr['angle'],
direction=mblr['direction'],
border_type=mblr['border_type'],
resample=mblr['resample'],
same_on_batch=mblr['same_on_batch'],
p=mblr['p'],
keepdim=mblr['keepdim']
)
def get_gaussian_blur():
gblr = afg['gaussian_blur']
return K.RandomGaussianBlur(
kernel_size=gblr['kernel_size'],
sigma=gblr['sigma'],
border_type=gblr['border_type'],
same_on_batch=gblr['same_on_batch'],
p=gblr['p']
)
def get_erasing():
efg = afg['erasing']
return K.RandomErasing(
scale=efg['scale'],
ratio=efg['ratio'],
same_on_batch=efg['same_on_batch'],
p=efg['p']
)
def get_affine(cut_method):
cm = cut_method
aff = afg['affine']
return K.RandomAffine(
degrees=aff['degrees'],
translate=(0.1, 0.1),
shear=aff['shear'],
p=aff['p'],
padding_mode='border' if cm == 'updatedpooling' else 'zeros',
keepdim=True)
def get_updated_pooling_augments():
augment_list = [
get_color_jitter(),
get_erasing(),
get_affine('updatedpooling'),
K.RandomPerspective(distortion_scale=0.7, p=0.7)
]
return augment_list
def get_augment_list(cut_method, cut_size):
augment_list = []
cm = cut_method
if afg['no_augments']:
if cm == 'updatedpooling':
augment_list.append(get_color_jitter())
augment_list.append(get_erasing())
augment_list.append(get_affine(cut_method))
augment_list.append(K.RandomPerspective(
distortion_scale=afg['perspective']['distortion_scale'],
p=afg['perspective']['p']))
else:
dummy = get_color_jitter()
dummy.p = 0.0
augment_list.append(dummy)
return augment_list
# Xib TODO: make this respect order again
if afg['jitter']['use'] or afg['jitter']['arg'] in augments[0] \
or cm == 'updatedpooling':
augment_list.append(get_color_jitter())
if (afg['sharpness']['use'] or afg['sharpness']['arg'] in augments[0]) \
and cm not in afg['sharpness']['incompatible']:
augment_list.append(get_sharpness())
if afg['gaussian_noise']['use']:
augment_list.append(get_gaussian_noise())
if afg['motion_blur']['use']:
augment_list.append(get_motion_blur())
if afg['gaussian_blur']['use']:
augment_list.append(get_gaussian_blur())
if (afg['erasing']['use'] or afg['erasing']['arg'] in augments[0]) \
or cm == 'updatedpooling':
augment_list.append(get_erasing())
if (afg['affine']['use'] or afg['affine']['arg'] in augments[0]) \
or cm == 'updatedpooling':
augment_list.append(get_affine(cut_method))
if (afg['perspective']['use'] or afg['perspective']['arg'] in augments[0]) \
or cm == 'updatedpooling':
augment_list.append(K.RandomPerspective(
distortion_scale=afg['perspective']['distortion_scale'],
p=afg['perspective']['p']))
if afg['crop']['use'] or afg['crop']['arg'] in augments[0]:
augment_list.append(K.RandomCrop(
size=(cut_size, cut_size),
pad_if_needed=afg['crop']['pad_if_needed'],
padding_mode=afg['crop']['padding_mode'],
p=afg['crop']['p']))
if afg['elastic_transform']['use'] or afg['elastic_transform']['arg'] in augments[0]:
augment_list.append(K.RandomElasticTransform(p=afg['elastic_transform']['p']))
if afg['rotation']['use'] or afg['rotation']['arg'] in augments[0]:
augment_list.append(K.RandomRotation(
degrees=afg['rotation']['degrees'],
p=afg['rotation']['p']))
if afg['resized_crop']['use'] or afg['resized_crop']['arg'] in augments[0]:
rc = afg['resized_crop']
augment_list.append(K.RandomResizedCrop(
size=(cut_size, cut_size),
scale=rc['scale'],
ratio=rc['ratio'],
cropping_mode=rc['cropping_mode'],
p=rc['p']))
if afg['thin_plate_spline']['use'] or afg['thin_plate_spline']['arg'] in augments[0]:
tps = afg['thin_plate_spline']
augment_list.append(K.RandomThinPlateSpline(
scale=tps['scale'], same_on_batch=tps['same_on_batch'], p=tps['p']))
return augment_list
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow # not used with pooling
# Pick your own augments & their order
augment_list = get_augment_list('latest', cut_size)
self.augs = nn.Sequential(*augment_list)
self.noise_fac = afg['noise_fac']
# Uncomment if you like seeing the list ;)
# print_green(augment_list)
# Pooling
self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))
self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))
def forward(self, input):
cutouts = []
for _ in range(self.cutn):
# Use Pooling
cutout = (self.av_pool(input) + self.max_pool(input)) / 2
cutouts.append(cutout)
batch = self.augs(torch.cat(cutouts, dim=0))
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
# An updated version with Kornia augments and pooling (where my version started):
# xibnote: ai art machine calls this "cumin"
class MakeCutoutsPoolingUpdate(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow # Not used with pooling
augment_list = get_updated_pooling_augments()
self.augs = nn.Sequential(*augment_list)
self.noise_fac = afg['noise_fac']
self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))
self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
cutout = (self.av_pool(input) + self.max_pool(input)) / 2
cutouts.append(cutout)
batch = self.augs(torch.cat(cutouts, dim=0))
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
# An Nerdy updated version with selectable Kornia augments, but no pooling:
class MakeCutoutsNRUpdate(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
self.noise_fac = afg['noise_fac']
# Pick your own augments & their order
augment_list = get_augment_list('nrupdated', cut_size)
self.augs = nn.Sequential(*augment_list)
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
size = int(torch.rand([]) ** self.cut_pow * (max_size - min_size) + min_size)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
batch = self.augs(torch.cat(cutouts, dim=0))
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
# An updated version with Kornia augments, but no pooling:
class MakeCutoutsUpdate(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
self.augs = nn.Sequential(
K.RandomHorizontalFlip(p=0.5),
get_color_jitter(),
# K.RandomSolarize(0.01, 0.01, p=0.7),
K.RandomSharpness(0.3, p=0.4),
K.RandomAffine(degrees=30, translate=(0.1, 0.1), p=0.8, padding_mode='border'),
K.RandomPerspective(0.2, p=0.4), )
self.noise_fac = afg['noise_fac']
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
size = int(torch.rand([]) ** self.cut_pow * (max_size - min_size) + min_size)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
batch = self.augs(torch.cat(cutouts, dim=0))
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
# This is the original version (No pooling)
class MakeCutoutsOrig(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
size = int(torch.rand([]) ** self.cut_pow * (max_size - min_size) + min_size)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
return clamp_with_grad(torch.cat(cutouts, dim=0), 0, 1)
def get_cutout_function(cfg, cut_size):
if cfg['cut_method'] == 'latest':
make_cutouts = MakeCutouts(cut_size, cfg['cutn'], cut_pow=cfg['cut_pow'])
elif cfg['cut_method'] == 'original':
make_cutouts = MakeCutoutsOrig(cut_size, cfg['cutn'], cut_pow=cfg['cut_pow'])
elif cfg['cut_method'] == 'updated':
make_cutouts = MakeCutoutsUpdate(cut_size, cfg['cutn'], cut_pow=cfg['cut_pow'])
elif cfg['cut_method'] == 'nrupdated':
make_cutouts = MakeCutoutsNRUpdate(cut_size, cfg['cutn'], cut_pow=cfg['cut_pow'])
else:
make_cutouts = MakeCutoutsPoolingUpdate(cut_size, cfg['cutn'], cut_pow=cfg['cut_pow'])
return make_cutouts
| [
"torch.nn.AdaptiveMaxPool2d",
"torch.cat",
"torch.rand",
"torch.nn.Sequential",
"torch.randint",
"torch.randn_like",
"torch.nn.AdaptiveAvgPool2d"
] | 1.9.0 | Xibanya/VQGAN-CLIP | 24510ef372df3131dedf289397818217e1fd3df0 |
0.3 | import torch, torchvision
import os
import argparse
def main():
parser = argparse.ArgumentParser(description='PIONEER Zeta')
parser.add_argument('--first', type=str, help='Subject on the left side of the operator')
args=parser.parse_args()
f1=torch.load(os.path.expanduser(args.first))
#First is the file to be analyzed
#calculate sum
sum=torch.sum(f1)
#avg
avg=torch.mean(f1)
#min
min=torch.min(f1)
#max
max=torch.max(f1)
print("Sum: ", sum)
print("Avg: ", avg)
print("Min: ", min)
print("Max: ", max)
main()
| [
"torch.max",
"torch.min",
"torch.mean",
"torch.sum"
] | 0.3.1 | mpaloni/pioneer | c49efa2e071307b2534ca2abe7560f57683d2d9e |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.