version
stringclasses 24
values | code
stringlengths 396
135k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 6
64
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.7 | import math
import torch
import numpy as np
import torch.nn.functional as F
from PIL import Image
from tqdm import tqdm
from .floating_region import FloatingRegionScore
from .spatial_purity import SpatialPurity
def PixelSelection(cfg, feature_extractor, classifier, tgt_epoch_loader):
feature_extractor.eval()
classifier.eval()
active_pixels = math.ceil(cfg.ACTIVE.PIXELS / len(cfg.ACTIVE.SELECT_ITER) / (1280 * 640) * (2048 * 1024))
calculate_purity = SpatialPurity(in_channels=cfg.MODEL.NUM_CLASSES, size=2 * cfg.ACTIVE.RADIUS_K + 1).cuda()
mask_radius = cfg.ACTIVE.RADIUS_K
with torch.no_grad():
for tgt_data in tqdm(tgt_epoch_loader):
tgt_input, path2mask = tgt_data['img'], tgt_data['path_to_mask']
origin_mask, origin_label = tgt_data['origin_mask'], tgt_data['origin_label']
origin_size = tgt_data['size']
active_indicator = tgt_data['active']
selected_indicator = tgt_data['selected']
path2indicator = tgt_data['path_to_indicator']
tgt_input = tgt_input.cuda(non_blocking=True)
tgt_size = tgt_input.shape[-2:]
tgt_feat = feature_extractor(tgt_input)
tgt_out = classifier(tgt_feat, size=tgt_size)
for i in range(len(origin_mask)):
active_mask = origin_mask[i].cuda(non_blocking=True)
ground_truth = origin_label[i].cuda(non_blocking=True)
size = (origin_size[i][0], origin_size[i][1])
active = active_indicator[i]
selected = selected_indicator[i]
output = tgt_out[i:i + 1, :, :, :]
output = F.interpolate(output, size=size, mode='bilinear', align_corners=True)
output = output.squeeze(dim=0)
p = torch.softmax(output, dim=0)
entropy = torch.sum(-p * torch.log(p + 1e-6), dim=0)
pseudo_label = torch.argmax(p, dim=0)
one_hot = F.one_hot(pseudo_label, num_classes=cfg.MODEL.NUM_CLASSES).float()
one_hot = one_hot.permute((2, 0, 1)).unsqueeze(dim=0)
purity = calculate_purity(one_hot).squeeze(dim=0).squeeze(dim=0)
score = entropy * purity
score[active] = -float('inf')
for pixel in range(active_pixels):
values, indices_h = torch.max(score, dim=0)
_, indices_w = torch.max(values, dim=0)
w = indices_w.item()
h = indices_h[w].item()
start_w = w - mask_radius if w - mask_radius >= 0 else 0
start_h = h - mask_radius if h - mask_radius >= 0 else 0
end_w = w + mask_radius + 1
end_h = h + mask_radius + 1
# mask out
score[start_h:end_h, start_w:end_w] = -float('inf')
active[start_h:end_h, start_w:end_w] = True
selected[h, w] = True
# active sampling
active_mask[h, w] = ground_truth[h, w]
active_mask = Image.fromarray(np.array(active_mask.cpu().numpy(), dtype=np.uint8))
active_mask.save(path2mask[i])
indicator = {
'active': active,
'selected': selected
}
torch.save(indicator, path2indicator[i])
feature_extractor.train()
classifier.train()
def RegionSelection(cfg, feature_extractor, classifier, tgt_epoch_loader):
feature_extractor.eval()
classifier.eval()
floating_region_score = FloatingRegionScore(in_channels=cfg.MODEL.NUM_CLASSES, size=2 * cfg.ACTIVE.RADIUS_K + 1).cuda()
per_region_pixels = (2 * cfg.ACTIVE.RADIUS_K + 1) ** 2
active_radius = cfg.ACTIVE.RADIUS_K
mask_radius = cfg.ACTIVE.RADIUS_K * 2
active_ratio = cfg.ACTIVE.RATIO / len(cfg.ACTIVE.SELECT_ITER)
with torch.no_grad():
for tgt_data in tqdm(tgt_epoch_loader):
tgt_input, path2mask = tgt_data['img'], tgt_data['path_to_mask']
origin_mask, origin_label = \
tgt_data['origin_mask'], tgt_data['origin_label']
origin_size = tgt_data['size']
active_indicator = tgt_data['active']
selected_indicator = tgt_data['selected']
path2indicator = tgt_data['path_to_indicator']
tgt_input = tgt_input.cuda(non_blocking=True)
tgt_size = tgt_input.shape[-2:]
tgt_feat = feature_extractor(tgt_input)
tgt_out = classifier(tgt_feat, size=tgt_size)
for i in range(len(origin_mask)):
active_mask = origin_mask[i].cuda(non_blocking=True)
ground_truth = origin_label[i].cuda(non_blocking=True)
size = (origin_size[i][0], origin_size[i][1])
num_pixel_cur = size[0] * size[1]
active = active_indicator[i]
selected = selected_indicator[i]
output = tgt_out[i:i + 1, :, :, :]
output = F.interpolate(output, size=size, mode='bilinear', align_corners=True)
score, purity, entropy = floating_region_score(output)
score[active] = -float('inf')
active_regions = math.ceil(num_pixel_cur * active_ratio / per_region_pixels)
for pixel in range(active_regions):
values, indices_h = torch.max(score, dim=0)
_, indices_w = torch.max(values, dim=0)
w = indices_w.item()
h = indices_h[w].item()
active_start_w = w - active_radius if w - active_radius >= 0 else 0
active_start_h = h - active_radius if h - active_radius >= 0 else 0
active_end_w = w + active_radius + 1
active_end_h = h + active_radius + 1
mask_start_w = w - mask_radius if w - mask_radius >= 0 else 0
mask_start_h = h - mask_radius if h - mask_radius >= 0 else 0
mask_end_w = w + mask_radius + 1
mask_end_h = h + mask_radius + 1
# mask out
score[mask_start_h:mask_end_h, mask_start_w:mask_end_w] = -float('inf')
active[mask_start_h:mask_end_h, mask_start_w:mask_end_w] = True
selected[active_start_h:active_end_h, active_start_w:active_end_w] = True
# active sampling
active_mask[active_start_h:active_end_h, active_start_w:active_end_w] = \
ground_truth[active_start_h:active_end_h, active_start_w:active_end_w]
active_mask = Image.fromarray(np.array(active_mask.cpu().numpy(), dtype=np.uint8))
active_mask.save(path2mask[i])
indicator = {
'active': active,
'selected': selected
}
torch.save(indicator, path2indicator[i])
feature_extractor.train()
classifier.train()
| [
"torch.nn.functional.one_hot",
"torch.max",
"torch.no_grad",
"torch.save",
"torch.nn.functional.interpolate",
"torch.softmax",
"torch.log",
"torch.argmax"
] | 1.7.1 | BIT-DA/RIPU | 125edf112c9ded1e7497aedb2a092331824df100 |
0.4 | import random
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.roi_crop.functions.roi_crop import RoICropFunction
from model.utils.config import cfg
from torch.autograd import Variable
def save_net(fname, net):
import h5py
h5f = h5py.File(fname, mode='w')
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
import h5py
h5f = h5py.File(fname, mode='r')
for k, v in net.state_dict().items():
param = torch.from_numpy(np.asarray(h5f[k]))
v.copy_(param)
def weights_normal_init(model, dev=0.01):
if isinstance(model, list):
for m in model:
weights_normal_init(m, dev)
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, dev)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
def clip_gradient(model, clip_norm):
"""Computes a gradient clipping coefficient based on gradient norm."""
totalnorm = 0
for p in model.parameters():
if p.requires_grad:
modulenorm = p.grad.data.norm()
totalnorm += modulenorm ** 2
totalnorm = np.sqrt(totalnorm)
norm = clip_norm / max(totalnorm, clip_norm)
for p in model.parameters():
if p.requires_grad:
p.grad.mul_(norm)
def vis_detections(im, class_name, dets, thresh=0.8):
"""Visual debugging of detections."""
for i in range(np.minimum(10, dets.shape[0])):
bbox = tuple(int(np.round(x)) for x in dets[i, :4])
score = dets[i, -1]
if score > thresh:
cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2)
cv2.putText(im, '%s: %.3f' % (class_name, score),
(bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 255), thickness=1)
return im
def adjust_learning_rate(optimizer, decay=0.1):
"""Sets the learning rate to the initial LR decayed by 0.5 every 20
epochs"""
for param_group in optimizer.param_groups:
param_group['lr'] = decay * param_group['lr']
def save_checkpoint(state, filename):
torch.save(state, filename)
def _smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights,
bbox_outside_weights, sigma=1.0, dim=[1]):
sigma_2 = sigma ** 2
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff
abs_in_box_diff = torch.abs(in_box_diff)
smoothL1_sign = (abs_in_box_diff < 1. / sigma_2).detach().float()
in_loss_box = torch.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
out_loss_box = bbox_outside_weights * in_loss_box
loss_box = out_loss_box
for i in sorted(dim, reverse=True):
loss_box = loss_box.sum(i)
loss_box = loss_box.mean()
return loss_box
def _crop_pool_layer(bottom, rois, max_pool=True):
# code modified from
# https://github.com/ruotianluo/pytorch-faster-rcnn
# implement it using stn
# box to affine
# input (x1,y1,x2,y2)
"""
[ x2-x1 x1 + x2 - W + 1 ]
[ ----- 0 --------------- ]
[ W - 1 W - 1 ]
[ ]
[ y2-y1 y1 + y2 - H + 1 ]
[ 0 ----- --------------- ]
[ H - 1 H - 1 ]
"""
rois = rois.detach()
batch_size = bottom.size(0)
D = bottom.size(1)
H = bottom.size(2)
W = bottom.size(3)
roi_per_batch = rois.size(0) / batch_size
x1 = rois[:, 1::4] / 16.0
y1 = rois[:, 2::4] / 16.0
x2 = rois[:, 3::4] / 16.0
y2 = rois[:, 4::4] / 16.0
height = bottom.size(2)
width = bottom.size(3)
# affine theta
zero = Variable(rois.data.new(rois.size(0), 1).zero_())
theta = torch.cat([ \
(x2 - x1) / (width - 1),
zero,
(x1 + x2 - width + 1) / (width - 1),
zero,
(y2 - y1) / (height - 1),
(y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)
if max_pool:
pre_pool_size = cfg.POOLING_SIZE * 2
grid = F.affine_grid(theta, torch.Size(
(rois.size(0), 1, pre_pool_size, pre_pool_size)))
bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(
roi_per_batch, batch_size, D, H, W) \
.contiguous().view(-1, D, H, W)
crops = F.grid_sample(bottom, grid)
crops = F.max_pool2d(crops, 2, 2)
else:
grid = F.affine_grid(theta, torch.Size(
(rois.size(0), 1, cfg.POOLING_SIZE, cfg.POOLING_SIZE)))
bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(
roi_per_batch, batch_size, D, H, W) \
.contiguous().view(-1, D, H, W)
crops = F.grid_sample(bottom, grid)
return crops, grid
def _affine_grid_gen(rois, input_size, grid_size):
rois = rois.detach()
x1 = rois[:, 1::4] / 16.0
y1 = rois[:, 2::4] / 16.0
x2 = rois[:, 3::4] / 16.0
y2 = rois[:, 4::4] / 16.0
height = input_size[0]
width = input_size[1]
zero = Variable(rois.data.new(rois.size(0), 1).zero_())
theta = torch.cat([ \
(x2 - x1) / (width - 1),
zero,
(x1 + x2 - width + 1) / (width - 1),
zero,
(y2 - y1) / (height - 1),
(y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)
grid = F.affine_grid(theta,
torch.Size((rois.size(0), 1, grid_size, grid_size)))
return grid
def _affine_theta(rois, input_size):
rois = rois.detach()
x1 = rois[:, 1::4] / 16.0
y1 = rois[:, 2::4] / 16.0
x2 = rois[:, 3::4] / 16.0
y2 = rois[:, 4::4] / 16.0
height = input_size[0]
width = input_size[1]
zero = Variable(rois.data.new(rois.size(0), 1).zero_())
# theta = torch.cat([\
# (x2 - x1) / (width - 1),
# zero,
# (x1 + x2 - width + 1) / (width - 1),
# zero,
# (y2 - y1) / (height - 1),
# (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)
theta = torch.cat([ \
(y2 - y1) / (height - 1),
zero,
(y1 + y2 - height + 1) / (height - 1),
zero,
(x2 - x1) / (width - 1),
(x1 + x2 - width + 1) / (width - 1)], 1).view(-1, 2, 3)
return theta
| [
"torch.cat",
"torch.save",
"torch.abs",
"torch.nn.functional.grid_sample",
"torch.nn.functional.max_pool2d",
"torch.pow"
] | 0.4.1 | sadjadasghari/3d-vehicle-tracking | f8433f72a51dd1a7190570e63e9fda4a924a81f0 |
0.4 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# --------------------------------------------------------
# Reorganized and modified by Jianwei Yang and Jiasen Lu
# --------------------------------------------------------
import torch
def bbox_transform(ex_rois, gt_rois):
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = torch.log(gt_widths / ex_widths)
targets_dh = torch.log(gt_heights / ex_heights)
targets = torch.stack(
(targets_dx, targets_dy, targets_dw, targets_dh), 1)
return targets
def bbox_transform_batch(ex_rois, gt_rois):
if ex_rois.dim() == 2:
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, :, 2] - gt_rois[:, :, 0] + 1.0
gt_heights = gt_rois[:, :, 3] - gt_rois[:, :, 1] + 1.0
gt_ctr_x = gt_rois[:, :, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, :, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x.view(1, -1).expand_as(
gt_ctr_x)) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y.view(1, -1).expand_as(
gt_ctr_y)) / ex_heights
targets_dw = torch.log(
gt_widths / ex_widths.view(1, -1).expand_as(gt_widths))
targets_dh = torch.log(
gt_heights / ex_heights.view(1, -1).expand_as(gt_heights))
elif ex_rois.dim() == 3:
ex_widths = ex_rois[:, :, 2] - ex_rois[:, :, 0] + 1.0
ex_heights = ex_rois[:, :, 3] - ex_rois[:, :, 1] + 1.0
ex_ctr_x = ex_rois[:, :, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, :, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, :, 2] - gt_rois[:, :, 0] + 1.0
gt_heights = gt_rois[:, :, 3] - gt_rois[:, :, 1] + 1.0
gt_ctr_x = gt_rois[:, :, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, :, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = torch.log(gt_widths / ex_widths)
targets_dh = torch.log(gt_heights / ex_heights)
else:
raise ValueError('ex_roi input dimension is not correct.')
targets = torch.stack(
(targets_dx, targets_dy, targets_dw, targets_dh), 2)
return targets
def bbox_transform_inv(boxes, deltas, batch_size):
widths = boxes[:, :, 2] - boxes[:, :, 0] + 1.0
heights = boxes[:, :, 3] - boxes[:, :, 1] + 1.0
ctr_x = boxes[:, :, 0] + 0.5 * widths
ctr_y = boxes[:, :, 1] + 0.5 * heights
dx = deltas[:, :, 0::4]
dy = deltas[:, :, 1::4]
dw = deltas[:, :, 2::4]
dh = deltas[:, :, 3::4]
pred_ctr_x = dx * widths.unsqueeze(2) + ctr_x.unsqueeze(2)
pred_ctr_y = dy * heights.unsqueeze(2) + ctr_y.unsqueeze(2)
pred_w = torch.exp(dw) * widths.unsqueeze(2)
pred_h = torch.exp(dh) * heights.unsqueeze(2)
pred_boxes = deltas.clone()
# x1
pred_boxes[:, :, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, :, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, :, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, :, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def center_transform_inv(boxes, deltas, batch_size):
widths = boxes[:, :, 2] - boxes[:, :, 0] + 1.0
heights = boxes[:, :, 3] - boxes[:, :, 1] + 1.0
ctr_x = boxes[:, :, 0] + 0.5 * widths
ctr_y = boxes[:, :, 1] + 0.5 * heights
dx = deltas[:, :, 0::2]
dy = deltas[:, :, 1::2]
pred_ctr_x = dx * widths.unsqueeze(2) + ctr_x.unsqueeze(2)
pred_ctr_y = dy * heights.unsqueeze(2) + ctr_y.unsqueeze(2)
pred_center = deltas.clone()
# x1
pred_center[:, :, 0::2] = pred_ctr_x
# y1
pred_center[:, :, 1::2] = pred_ctr_y
return pred_center
def clip_boxes_batch(boxes, im_shape, batch_size):
"""
Clip boxes to image boundaries.
"""
# num_rois = boxes.size(1)
boxes[boxes < 0] = 0
# batch_x = (im_shape[:,0]-1).view(batch_size, 1).expand(batch_size,
# num_rois)
# batch_y = (im_shape[:,1]-1).view(batch_size, 1).expand(batch_size,
# num_rois)
batch_x = im_shape[:, 1] - 1
batch_y = im_shape[:, 0] - 1
boxes[:, :, 0][boxes[:, :, 0] > batch_x] = batch_x
boxes[:, :, 1][boxes[:, :, 1] > batch_y] = batch_y
boxes[:, :, 2][boxes[:, :, 2] > batch_x] = batch_x
boxes[:, :, 3][boxes[:, :, 3] > batch_y] = batch_y
return boxes
def clip_boxes(boxes, im_shape, batch_size):
for i in range(batch_size):
boxes[i, :, 0::4].clamp_(0, im_shape[i, 1] - 1)
boxes[i, :, 1::4].clamp_(0, im_shape[i, 0] - 1)
boxes[i, :, 2::4].clamp_(0, im_shape[i, 1] - 1)
boxes[i, :, 3::4].clamp_(0, im_shape[i, 0] - 1)
return boxes
def bbox_overlaps(anchors, gt_boxes):
"""
anchors: (N, 4) ndarray of float
gt_boxes: (K, 4) ndarray of float
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = anchors.size(0)
K = gt_boxes.size(0)
gt_boxes_area = ((gt_boxes[:, 2] - gt_boxes[:, 0] + 1) *
(gt_boxes[:, 3] - gt_boxes[:, 1] + 1)).view(1, K)
anchors_area = ((anchors[:, 2] - anchors[:, 0] + 1) *
(anchors[:, 3] - anchors[:, 1] + 1)).view(N, 1)
boxes = anchors.view(N, 1, 4).expand(N, K, 4)
query_boxes = gt_boxes.view(1, K, 4).expand(N, K, 4)
iw = (torch.min(boxes[:, :, 2], query_boxes[:, :, 2]) -
torch.max(boxes[:, :, 0], query_boxes[:, :, 0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:, :, 3], query_boxes[:, :, 3]) -
torch.max(boxes[:, :, 1], query_boxes[:, :, 1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
overlaps = iw * ih / ua
return overlaps
def bbox_overlaps_batch(anchors, gt_boxes):
"""
anchors: (N, 4) ndarray of float
gt_boxes: (b, K, 5) ndarray of float
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
batch_size = gt_boxes.size(0)
if anchors.dim() == 2:
N = anchors.size(0)
K = gt_boxes.size(1)
anchors = anchors.view(1, N, 4).expand(batch_size, N, 4).contiguous()
gt_boxes = gt_boxes[:, :, :4].contiguous()
gt_boxes_x = (gt_boxes[:, :, 2] - gt_boxes[:, :, 0] + 1)
gt_boxes_y = (gt_boxes[:, :, 3] - gt_boxes[:, :, 1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:, :, 2] - anchors[:, :, 0] + 1)
anchors_boxes_y = (anchors[:, :, 3] - anchors[:, :, 1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N,
1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4)
query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N,
K, 4)
iw = (torch.min(boxes[:, :, :, 2], query_boxes[:, :, :, 2]) -
torch.max(boxes[:, :, :, 0], query_boxes[:, :, :, 0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:, :, :, 3], query_boxes[:, :, :, 3]) -
torch.max(boxes[:, :, :, 1], query_boxes[:, :, :, 1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
overlaps = iw * ih / ua
# mask the overlap here.
overlaps.masked_fill_(
gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(
anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K),
-1)
elif anchors.dim() == 3:
N = anchors.size(1)
K = gt_boxes.size(1)
if anchors.size(2) == 4:
anchors = anchors[:, :, :4].contiguous()
else:
anchors = anchors[:, :, 1:5].contiguous()
gt_boxes = gt_boxes[:, :, :4].contiguous()
gt_boxes_x = (gt_boxes[:, :, 2] - gt_boxes[:, :, 0] + 1)
gt_boxes_y = (gt_boxes[:, :, 3] - gt_boxes[:, :, 1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:, :, 2] - anchors[:, :, 0] + 1)
anchors_boxes_y = (anchors[:, :, 3] - anchors[:, :, 1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N,
1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4)
query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N,
K, 4)
iw = (torch.min(boxes[:, :, :, 2], query_boxes[:, :, :, 2]) -
torch.max(boxes[:, :, :, 0], query_boxes[:, :, :, 0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:, :, :, 3], query_boxes[:, :, :, 3]) -
torch.max(boxes[:, :, :, 1], query_boxes[:, :, :, 1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
overlaps = iw * ih / ua
# mask the overlap here.
overlaps.masked_fill_(
gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(
anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K),
-1)
else:
raise ValueError('anchors input dimension is not correct.')
return overlaps
| [
"torch.stack",
"torch.min",
"torch.max",
"torch.log",
"torch.exp"
] | 0.4.1 | sadjadasghari/3d-vehicle-tracking | f8433f72a51dd1a7190570e63e9fda4a924a81f0 |
0.4 | """
A stacked LSTM with LSTM layers which alternate between going forwards over
the sequence and going backwards.
"""
from typing import Optional, Tuple, Union, List
import torch
from torch.nn.utils.rnn import PackedSequence
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.common.checks import ConfigurationError
TensorPair = Tuple[torch.Tensor, torch.Tensor]
class StackedAlternatingLstm(torch.nn.Module):
"""
A stacked LSTM with LSTM layers which alternate between going forwards over
the sequence and going backwards. This implementation is based on the
description in [Deep Semantic Role Labelling - What works and what's next]
(https://homes.cs.washington.edu/~luheng/files/acl2017_hllz.pdf).
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required
The dimension of the outputs of the LSTM.
num_layers : `int`, required
The number of stacked LSTMs to use.
recurrent_dropout_probability : `float`, optional (default = 0.0)
The dropout probability to be used in a dropout scheme as stated in
[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks]
(https://arxiv.org/abs/1512.05287).
use_input_projection_bias : `bool`, optional (default = True)
Whether or not to use a bias on the input projection layer. This is mainly here
for backwards compatibility reasons and will be removed (and set to False)
in future releases.
# Returns
output_accumulator : PackedSequence
The outputs of the interleaved LSTMs per timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
) -> None:
super().__init__()
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
layers = []
lstm_input_size = input_size
for layer_index in range(num_layers):
go_forward = layer_index % 2 == 0
layer = AugmentedLstm(
lstm_input_size,
hidden_size,
go_forward,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
lstm_input_size = hidden_size
self.add_module("layer_{}".format(layer_index), layer)
layers.append(layer)
self.lstm_layers = layers
def forward(
self, inputs: PackedSequence, initial_state: Optional[TensorPair] = None
) -> Tuple[Union[torch.Tensor, PackedSequence], TensorPair]:
"""
# Parameters
inputs : `PackedSequence`, required.
A batch first `PackedSequence` to run the stacked LSTM over.
initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (1, batch_size, output_dimension).
# Returns
output_sequence : PackedSequence
The encoded sequence of shape (batch_size, sequence_length, hidden_size)
final_states: Tuple[torch.Tensor, torch.Tensor]
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers, batch_size, hidden_size).
"""
if not initial_state:
hidden_states: List[Optional[TensorPair]] = [None] * len(self.lstm_layers)
elif initial_state[0].size()[0] != len(self.lstm_layers):
raise ConfigurationError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(
zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0))
)
output_sequence = inputs
final_states = []
for i, state in enumerate(hidden_states):
layer = getattr(self, "layer_{}".format(i))
# The state is duplicated to mirror the Pytorch API for LSTMs.
output_sequence, final_state = layer(output_sequence, state)
final_states.append(final_state)
final_hidden_state, final_cell_state = tuple(
torch.cat(state_list, 0) for state_list in zip(*final_states)
)
return output_sequence, (final_hidden_state, final_cell_state)
| [
"torch.cat"
] | 0.4.1 | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be |
2.0 | """
Copyright (c) 2019-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
import torch
from copy import deepcopy
from pytest import approx
from torch import nn
from nncf.api.compression import CompressionStage
from nncf.config import NNCFConfig
from nncf.torch.module_operations import UpdateWeight
from nncf.torch.sparsity.rb.algo import RBSparsityController
from nncf.torch.sparsity.rb.layers import RBSparsifyingWeight
from nncf.torch.sparsity.rb.loss import SparseLoss, SparseLossForPerLayerSparsity
from nncf.common.sparsity.schedulers import PolynomialSparsityScheduler
from tests.torch.helpers import MockModel, BasicConvTestModel, TwoConvTestModel, \
create_compressed_model_and_algo_for_test, check_correct_nncf_modules_replacement, get_empty_config
def get_basic_sparsity_config(input_sample_size=None,
sparsity_init=0.02, sparsity_target=0.5, sparsity_target_epoch=2,
sparsity_freeze_epoch=3):
if input_sample_size is None:
input_sample_size = [1, 1, 4, 4]
config = NNCFConfig()
config.update({
"model": "basic_sparse_conv",
"input_info":
{
"sample_size": input_sample_size,
},
"compression":
{
"algorithm": "rb_sparsity",
"sparsity_init": sparsity_init,
"params":
{
"schedule": "polynomial",
"sparsity_target": sparsity_target,
"sparsity_target_epoch": sparsity_target_epoch,
"sparsity_freeze_epoch": sparsity_freeze_epoch
},
}
})
return config
def test_can_load_sparse_algo__with_defaults():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
sparse_model, compression_ctrl = create_compressed_model_and_algo_for_test(deepcopy(model), config)
assert isinstance(compression_ctrl, RBSparsityController)
_, sparse_model_conv = check_correct_nncf_modules_replacement(model, sparse_model)
for sparse_module in sparse_model_conv.values():
store = []
for op in sparse_module.pre_ops.values():
if isinstance(op, UpdateWeight) and isinstance(op.operand, RBSparsifyingWeight):
assert torch.allclose(op.operand.binary_mask, torch.ones_like(sparse_module.weight))
assert not op.operand.frozen
assert op.__class__.__name__ not in store
store.append(op.__class__.__name__)
def test_can_set_sparse_layers_to_loss():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
loss = compression_ctrl.loss
assert isinstance(loss, SparseLoss)
# pylint: disable=protected-access
for layer in loss._sparse_layers:
assert isinstance(layer, RBSparsifyingWeight)
def test_sparse_algo_does_not_replace_not_conv_layer():
class TwoLayersTestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
def forward(self, x):
return self.bn(self.conv(x))
model = TwoLayersTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
assert isinstance(compression_ctrl, RBSparsityController)
for m in compression_ctrl.sparsified_module_info:
assert isinstance(m.operand, RBSparsifyingWeight)
def test_can_create_sparse_loss_and_scheduler():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
scheduler = compression_ctrl.scheduler
scheduler.epoch_step()
loss = compression_ctrl.loss
assert isinstance(loss, SparseLoss)
assert not loss.disabled
assert loss.target_sparsity_rate == approx(0.02)
assert loss.p == approx(0.05)
assert isinstance(scheduler, PolynomialSparsityScheduler)
assert scheduler.current_sparsity_level == approx(0.02)
assert scheduler.target_level == approx(0.5)
assert scheduler.target_epoch == 2
assert scheduler.freeze_epoch == 3
def test_sparse_algo_can_calc_sparsity_rate__for_basic_model():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
nncf_stats = compression_ctrl.statistics()
sparse_model_stats = nncf_stats.rb_sparsity.model_statistics
assert sparse_model_stats.sparsity_level == (
1 - (model.nz_weights_num + model.nz_bias_num) / (model.weights_num + model.bias_num)
)
assert sparse_model_stats.sparsity_level_for_layers == 1 - model.nz_weights_num / model.weights_num
assert len(compression_ctrl.sparsified_module_info) == 1
def test_sparse_algo_can_collect_sparse_layers():
model = TwoConvTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
assert len(compression_ctrl.sparsified_module_info) == 2
def test_sparse_algo_can_calc_sparsity_rate__for_2_conv_model():
model = TwoConvTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
nncf_stats = compression_ctrl.statistics()
sparse_model_stats = nncf_stats.rb_sparsity.model_statistics
assert pytest.approx(sparse_model_stats.sparsity_level) == (
1 - (model.nz_weights_num + model.nz_bias_num) / (model.weights_num + model.bias_num)
)
assert sparse_model_stats.sparsity_level_for_layers == 1 - model.nz_weights_num / model.weights_num
def test_scheduler_can_do_epoch_step__with_rb_algo():
config = NNCFConfig()
config['input_info'] = [{"sample_size": [1, 1, 32, 32]}]
config['compression'] = {
'algorithm': 'rb_sparsity',
'sparsity_init': 0.2,
"params": {
'schedule': 'polynomial',
'power': 1,
'sparsity_target_epoch': 2,
'sparsity_target': 0.6,
'sparsity_freeze_epoch': 3
}
}
_, compression_ctrl = create_compressed_model_and_algo_for_test(BasicConvTestModel(), config)
scheduler = compression_ctrl.scheduler
loss = compression_ctrl.loss
assert pytest.approx(loss.target_sparsity_rate) == 0.2
assert not loss.disabled
for module_info in compression_ctrl.sparsified_module_info:
assert not module_info.operand.frozen
scheduler.epoch_step()
assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.2
assert pytest.approx(loss().item(), abs=1e-3) == 16
assert not loss.disabled
scheduler.epoch_step()
assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.4
assert pytest.approx(loss().item(), abs=1e-3) == 64
assert not loss.disabled
scheduler.epoch_step()
assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.6
assert pytest.approx(loss().item(), abs=1e-3) == 144
assert not loss.disabled
scheduler.epoch_step()
assert loss.disabled
assert loss.target_sparsity_rate == 0.6
assert loss() == 0
for module_info in compression_ctrl.sparsified_module_info:
assert module_info.operand.frozen
def test_create_rb_algo_with_per_layer_loss():
config = get_empty_config()
config['compression'] = {'algorithm': 'rb_sparsity', "params": {"sparsity_level_setting_mode": 'local'}}
_, compression_ctrl = create_compressed_model_and_algo_for_test(MockModel(), config)
# pylint: disable=protected-access
assert isinstance(compression_ctrl._loss, SparseLossForPerLayerSparsity)
def test_rb_sparsity__can_set_sparsity_level_for_module():
config = get_empty_config()
config['compression'] = {'algorithm': 'rb_sparsity', "params": {"sparsity_level_setting_mode": 'local'}}
_, compression_ctrl = create_compressed_model_and_algo_for_test(BasicConvTestModel(), config)
# pylint: disable=protected-access
assert list(compression_ctrl._loss.per_layer_target.values())[0] == 1
compression_ctrl.set_sparsity_level(0.7, compression_ctrl.sparsified_module_info[0])
assert list(compression_ctrl._loss.per_layer_target.values())[0] == pytest.approx(0.3)
def test_create_rb_algo_with_local_sparsity_mode():
config = get_empty_config()
config['compression'] = {'algorithm': 'rb_sparsity', "params": {"sparsity_level_setting_mode": 'local'}}
_, compression_ctrl = create_compressed_model_and_algo_for_test(MockModel(), config)
assert compression_ctrl.compression_stage() == CompressionStage.FULLY_COMPRESSED
def test_can_set_compression_rate_for_rb_sparsity_algo():
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(BasicConvTestModel(), config)
compression_ctrl.compression_rate = 0.65
assert pytest.approx(compression_ctrl.compression_rate, 1e-2) == 0.65
| [
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.ones_like"
] | 2.0 | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 |
2.0 | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils import model_zoo
__all__ = ['Inception3', 'inception_v3']
model_urls = {
# Inception v3 ported from TensorFlow
'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
}
def inception_v3(pretrained=False, **kwargs):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pretrained on ImageNet
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
model = Inception3(**kwargs)
model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google']))
return model
return Inception3(**kwargs)
class Inception3(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super().__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
from scipy import stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.numel()))
values = values.view(m.weight.size())
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
# N x 3 x 299 x 299
x = self.Conv2d_1a_3x3(x)
# N x 32 x 149 x 149
x = self.Conv2d_2a_3x3(x)
# N x 32 x 147 x 147
x = self.Conv2d_2b_3x3(x)
# N x 64 x 147 x 147
x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 64 x 73 x 73
x = self.Conv2d_3b_1x1(x)
# N x 80 x 73 x 73
x = self.Conv2d_4a_3x3(x)
# N x 192 x 71 x 71
x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 192 x 35 x 35
x = self.Mixed_5b(x)
# N x 256 x 35 x 35
x = self.Mixed_5c(x)
# N x 288 x 35 x 35
x = self.Mixed_5d(x)
# N x 288 x 35 x 35
x = self.Mixed_6a(x)
# N x 768 x 17 x 17
x = self.Mixed_6b(x)
# N x 768 x 17 x 17
x = self.Mixed_6c(x)
# N x 768 x 17 x 17
x = self.Mixed_6d(x)
# N x 768 x 17 x 17
x = self.Mixed_6e(x)
# N x 768 x 17 x 17
if self.training and self.aux_logits:
aux = self.AuxLogits(x)
# N x 768 x 17 x 17
x = self.Mixed_7a(x)
# N x 1280 x 8 x 8
x = self.Mixed_7b(x)
# N x 2048 x 8 x 8
x = self.Mixed_7c(x)
# N x 2048 x 8 x 8
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 2048 x 1 x 1
x = F.dropout(x, training=self.training)
# N x 2048 x 1 x 1
x = x.view(x.size(0), -1)
# N x 2048
x = self.fc(x)
# N x 1000 (num_classes)
if self.training and self.aux_logits:
return x, aux
return x
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super().__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super().__init__()
self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super().__init__()
self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)
self.conv1 = BasicConv2d(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001
def forward(self, x):
# N x 768 x 17 x 17
x = F.avg_pool2d(x, kernel_size=5, stride=3)
# N x 768 x 5 x 5
x = self.conv0(x)
# N x 128 x 5 x 5
x = self.conv1(x)
# N x 768 x 1 x 1
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 768 x 1 x 1
x = x.view(x.size(0), -1)
# N x 768
x = self.fc(x)
# N x 1000
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.functional.avg_pool2d",
"torch.nn.init.constant_",
"torch.nn.functional.dropout",
"torch.nn.BatchNorm2d",
"torch.utils.model_zoo.load_url",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.unsqueeze",
"torch.nn.Conv2d",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d"
] | 2.0 | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 |
2.0 | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import queue
from typing import List, Callable, Optional, Tuple, Dict
import numpy as np
import torch
from torch import nn
from copy import deepcopy, copy
from functools import partial
from torch import optim
from nncf.config.config import NNCFConfig
from nncf.torch.utils import get_filters_num
class EvolutionOptimizer:
"""
Class for optimizing ranking coefficients for the model with evolution algorithm (agent).
The evolution algorithm works as follows:
1. For the first population_size steps it generates and returns random actions (generated with some prior
information). For every action, it gets a reward (some measure whether this action is good or not). During these
generations all action - reward pairs saving to the population.
2. During remaining (generations - population_size) generations it predict action by next scheme:
- Choosing random num_samples actions from population
- Choosing the best one from sampled and mutate it
- Return the resulting action
During this generation's action - reward pairs saving by updating oldest actions in population.
After all generations, the best action (with the best reward value) is returned.
"""
def __init__(self, initial_filter_norms: Dict, hparams: Dict, random_seed: int):
"""
:param initial_filter_norms: Initial filter norms needed to get std and var of filter norms in each leyer.
:param hparams: hyperparams of the Optimizer, can contain population_size, num_generations, num_samples,
mutate_percent, sigma_scale
:param random_seed: random seed, thet should be set during action generation for reproducibility
"""
self.random_seed = random_seed
# Optimizer hyper-params
self.population_size = hparams.get('population_size', 64)
self.num_generations = hparams.get('num_generations', 400)
self.num_samples = hparams.get('num_samples', 16)
self.mutate_percent = hparams.get('mutate_percent', 0.1)
self.scale_sigma = hparams.get('sigma_scale', 1)
self.max_reward = -np.inf
self.mean_rewards = []
self.indexes_queue = queue.Queue(self.population_size)
self.oldest_index = None
self.population_rewards = np.zeros(self.population_size)
self.population = [None for i in range(self.population_size)]
self.best_action = None
self.last_action = None
self.num_layers = len(initial_filter_norms)
self.layer_keys = np.array(list(initial_filter_norms.keys()))
self.initial_norms_stats = {}
for key in self.layer_keys:
layer_norms = initial_filter_norms[key].cpu().detach().numpy()
self.initial_norms_stats[key] = {'mean': np.mean(layer_norms), 'std': np.std(layer_norms)}
self.cur_state = None
self.cur_reward = None
self.cur_episode = None
self.cur_info = None
def get_best_action(self):
return self.best_action
def _save_episode_info(self, reward: float) -> None:
"""
Saving episode information: action-reward pairs and updating best_action/reward variables if needed.
:param reward: reward for the current episode
"""
# Update best action and reward if needed
if reward > self.max_reward:
self.best_action = self.last_action
self.max_reward = reward
if self.cur_episode < self.population_size:
self.indexes_queue.put(self.cur_episode)
self.population[self.cur_episode] = self.last_action
self.population_rewards[self.cur_episode] = reward
else:
self.indexes_queue.put(self.oldest_index)
self.population[self.oldest_index] = self.last_action
self.population_rewards[self.oldest_index] = reward
def _predict_action(self) -> Dict:
"""
Predict action for the current episode. Works as described above.
:return: new generated action
"""
np.random.seed(self.random_seed)
episode_num = self.cur_episode
action = {}
if episode_num < self.population_size - 1:
# During first population_size generations, generates random actions
for key in self.layer_keys:
scale = np.exp(np.random.normal(0, self.scale_sigma))
shift = np.random.normal(0, self.initial_norms_stats[key]['std'])
action[key] = (scale, shift)
elif episode_num == self.population_size - 1:
# Adding identity action to population
for key in self.layer_keys:
action[key] = (1, 0)
else:
step_size = 1 - (float(episode_num) / (self.num_generations * 1.25)) # Rename this
self.mean_rewards.append(np.mean(self.population_rewards))
# 1. Sampling num_samples actions from population and choosing the best one
sampled_idxs = np.random.choice(len(self.population_rewards), self.num_samples)
sampled_rewards = self.population_rewards[sampled_idxs]
best_action = self.population[sampled_idxs[np.argmax(sampled_rewards)]]
# 2. Mutate best action
mutate_num = int(self.mutate_percent * self.num_layers)
mutate_idxs = np.random.choice(self.layer_keys, mutate_num)
for key in self.layer_keys:
scale, shift = 1, 0
if key in mutate_idxs:
scale = np.exp(np.random.normal(0, self.scale_sigma * step_size))
shift = np.random.normal(0, self.initial_norms_stats[key]['std'])
action[key] = (scale * best_action[key][0], shift + best_action[key][1])
self.oldest_index = self.indexes_queue.get()
return action
def ask(self, episode_num: int) -> Dict:
"""
Predict and returns action for the last told episode information: state, reward, episode_num and info
:return: predicted action
"""
self.cur_episode = episode_num
action = self._predict_action()
self.last_action = action
return action
def tell(self, state: torch.Tensor, reward: float, end_of_episode: bool, episode_num: int, info: List) -> None:
"""
Getting info about episode step and save it every end of episode
"""
# Saving state, reward and info from the current step
self.cur_state = state
self.cur_reward = reward
self.cur_episode = episode_num
self.cur_info = info
if end_of_episode:
self._save_episode_info(reward)
class LeGREvolutionEnv:
"""
Environment class for optimizing the accuracy of the pruned model with different ranking coefficients.
During 'step' environment doing step with received action calculates current reward and useful info and return it
During 'reset' resetting Pruner and environment params changed during iteration.
"""
def __init__(self, filter_pruner: 'LeGRPruner', model: nn.Module, train_loader: torch.utils.data.DataLoader,
val_loader: torch.utils.data.DataLoader, train_fn: Callable,
train_optimizer: Optional[torch.optim.Optimizer], val_fn: Callable, config: NNCFConfig,
train_steps: int, pruning_max: float):
"""
:param filter_pruner: LeGRPruner, should have an interface for pruning model and resetting pruner.
:param model: target model for which ranking coefficients are trained
:param train_loader: data loader for training the model
:param val_loader: data loader for validating the model
:param train_fn: callable for training the model
:param train_optimizer: optional, optimizer for training the model
:param val_fn: callable for validation of the model, returns acc, loss
:param config: NNCF config for model compression
:param train_steps: number of training steps to evaluate action (ranking coefficients set)
:param pruning_max: pruning level for the model
"""
self.loss_as_reward = True
self.prune_target = pruning_max
self.steps = train_steps
# Train/test params
self.train_loader, self.val_loader = train_loader, val_loader
self.train_fn = train_fn
self.train_optimizer = train_optimizer
if self.train_optimizer is None:
# Default optimizer when the user did not provide a custom optimizer
self.train_optimizer = partial(optim.SGD, lr=1e-2, momentum=0.9, weight_decay=5e-4, nesterov=True)
self.validate_fn = val_fn
self.config = config
self.filter_pruner = filter_pruner
self.model = model
def reset(self) -> Tuple[torch.Tensor, List]:
"""
Resetting pruner params (all changes in the model made by training) and environment params changed during
the step.
:return: tuple with state and info : full flops in the model and number of flops that is rest in the model
"""
self.filter_pruner.reset()
self.model.eval()
self.full_flops = self.filter_pruner.get_full_flops_number_in_model()
self.rest = self.full_flops
self.last_act = None
return torch.zeros(1), [self.full_flops, self.rest]
def _train_steps(self, steps: int) -> None:
"""
Training model with train_fn for received steps number.
:param steps: number of model training steps
"""
optimizer = self.train_optimizer(self.model.parameters())
self.train_fn(self.train_loader, self.model, optimizer, self.filter_pruner, steps)
def _get_reward(self) -> Tuple[float, float, float]:
"""
Validating model with validate_fn and return result in format: (acc, loss)
"""
return self.validate_fn(self.model, self.val_loader)
def step(self, action: Dict) -> Tuple[torch.Tensor, float, bool, List]:
"""
1. Getting action (ranking coefficients)
2. Making step with this action - prune model with ranking coefficients
3. Getting a reward for this action- train model for some steps and validate it
4. Returning new state (for current settings state is default and not used), reward,
whether the episode is over or not (for current settings an episode is over after every step) and additional
info (full flops in model and flops left in the model)
:param action: ranking coefficients
"""
self.last_act = action
new_state = torch.zeros(1)
reduced = self.filter_pruner.prune(self.prune_target, action)
self._train_steps(self.steps)
acc, loss = self._get_reward()
if self.loss_as_reward:
reward = -loss
else:
reward = acc
done = 1
info = [self.full_flops, reduced]
return new_state, reward, done, info
class LeGRPruner:
"""
Wrapper for pruning controller with a simplified interface, allowing prune model with received ranking coefficients
and resetting all changes in the model made by the environment.
"""
def __init__(self, filter_pruner_ctrl: 'FilterPruningController', model: nn.Module):
self.filter_pruner = filter_pruner_ctrl
self.scheduler = copy(self.filter_pruner.scheduler)
self.model = model
self.model_params_copy = None
self._save_model_weights()
self.init_filter_norms = {node.node_name: self.filter_pruner.filter_importance(node.module.weight)
for node in self.filter_pruner.pruned_module_groups_info.get_all_nodes()}
def loss(self) -> float:
"""
:return: loss for pruning algorithm
"""
return self.filter_pruner.loss()
def _save_model_weights(self) -> None:
"""
Saving copy of all model parameters
"""
self.model_params_copy = deepcopy(self.model.state_dict())
def _restore_model_weights(self):
"""
Restoring saved original model parameters to discard any changes in model weights.
"""
self.model.load_state_dict(self.model_params_copy)
def _reset_masks(self) -> None:
"""
Resetting masks for all pruned nodes
"""
for minfo in self.filter_pruner.pruned_module_groups_info.get_all_nodes():
new_mask = torch.ones(get_filters_num(minfo.module)).to(
minfo.module.weight.device)
self.filter_pruner.set_mask(minfo, new_mask)
def reset(self) -> None:
"""
Resetting all changes made in the model (and model masks during environment step) by restoring the original
model weights, resetting masks.
"""
self._restore_model_weights()
self._reset_masks()
self.scheduler = copy(self.filter_pruner.scheduler)
def get_full_flops_number_in_model(self) -> float:
return self.filter_pruner.full_flops
def prune(self, flops_pruning_target: float, ranking_coeffs: Dict) -> None:
"""
Prune target model to flops pruning target with ranking_coeffs.
:param flops_pruning_target: pruning target for the model pruning
:param ranking_coeffs: ranking coefficients, that will be used for layers ranking during pruning
"""
self.filter_pruner.ranking_coeffs = ranking_coeffs
self.filter_pruner.set_pruning_level(flops_pruning_target)
| [
"torch.zeros"
] | 2.0 | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 |
1.0 | import os
import logging
from dotenv import load_dotenv
load_dotenv(verbose=True)
logger = logging.getLogger(__name__)
# The Root Directory of the project
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
POSENET_PATH = os.path.join(ROOT_DIR, 'data','raw','posenet.pth')
POSTURENET_PATH = os.path.join(ROOT_DIR, 'data','raw','posturenet.pth')
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
| [
"torch.cuda.is_available"
] | 1.0.1 | Sushil-Thapa/rectif.ai | b308f613402097dca9734806a8c27ba3eef6a358 |
1.5 | import os
import os
print(os.getcwd())
# os.path.dirname(os.path.abspath("__file__"))
path = '/Volumes/Extreme SSD/MLWork/DocAI/PICK-pytorch'
os.chdir(path)
# os.chdir('../')
# path = '/Users/neerajyadav/Documents/pycv/PICK-pytorch/'
"""Convert files of a selected directory in jpg format"""
import converter
# !pip install easyocr
import easyocr
#download the model
reader = easyocr.Reader(['en'], gpu = True)
# show an image
import PIL
from PIL import ImageDraw
from PIL import Image
import cv2
import PIL
from PIL import ImageDraw
from PIL import Image
import cv2
import pandas as pd
from pandas import DataFrame
import pandas as pd
import json
import glob
# import xlrd
import csv
import argparse
import torch
from tqdm import tqdm
from pathlib import Path
from torch.utils.data.dataloader import DataLoader
from allennlp.data.dataset_readers.dataset_utils.span_utils import bio_tags_to_spans
from parse_config import ConfigParser
import model.pick as pick_arch_module
from data_utils.pick_dataset import PICKDataset
from data_utils.pick_dataset import BatchCollateFn
from utils.util import iob_index_to_str, text_index_to_str
import converter
import shutil, os
### convert image into transcript file
"""Select jpg files and convert into transcript files"""
filenames = glob.glob("../TestImage/*.jpg")
filenamesj = glob.glob("../TestImage/*.jpeg")
filenames = filenames + filenamesj
filenames.sort()
def draw_boxes(image, bounds, color='green', width=1):
draw = ImageDraw.Draw(image)
for bound in bounds:
p0, p1, p2, p3 = bound[0]
draw.line([*p0, *p1, *p2, *p3, *p0], fill=color , width=width)
# if bound[1] == "ToTAL" or bound[1] =="TOTAL" or bound[1]=="TOTAL" or bound[1] =="Total Payable;" or bound[1] =="Total Payable:" or bound[1] =="Total Payable:" or bound[1]=='Total' or bound[1]=='TOTAL' or bound[1]=="Totz' Ingi, 0f GST" or bound[1]=="Total Sales (Inclusive of GST)" or bound[1]=="Net Total (MYR)":
# draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
# print(bound[0])
# image.save("temp.jpg")
return image
# draw_boxes(im, bounds)
def concatenate_list_data(list):
result= ''
for element in list:
result = result +str(element)
return result
for s in filenames:
# s = "Invoice0.jpg"
filen = s.split(".")[0]
print(filen)
im = PIL.Image.open(s).convert('RGB')
# Doing OCR. Get bounding boxes.
bounds = reader.readtext(s)
im = PIL.Image.open(s).convert('RGB')
df = pd.DataFrame()
CoordinatesValue = []
for i in bounds:
Coordinates =[]
CoordinatesValue=[]
temp_df = pd.DataFrame()
Coordinates.append(concatenate_list_data(i[0]).replace("][",",").replace("[","").replace("]","").replace(" ",""))
# print(i[1])
CoordinatesValue.append(i[1])
temp_df = DataFrame(zip(Coordinates,CoordinatesValue),columns = ['Coordinates', 'Value'])
# print(temp_df)
df = df.append(temp_df)
# print(item[0])
combine_lambda = lambda x: '{},{}'.format(x.Coordinates, x.Value)
df['Result'] = df.apply(combine_lambda, axis = 1)
dfnew= df['Result']
dfnew = dfnew[0].str.split(',', expand=True)
dfnew.insert(0,'name_of_column','')
dfnew['name_of_column'] = 1
# dfnew.to_csv(str(filen)+".tsv", sep = ',',index=False ,header=False )
dfnew.to_csv(str(filen)+".tsv",sep = ',',index=False,header=False, quotechar='',escapechar='\\',quoting=csv.QUOTE_NONE, )
### copy file from source folder to destination folder ###
for f in filenames:
shutil.copy(f, 'test_img/')
filetsv = glob.glob("/Volumes/Extreme SSD/MLWork/DocAI/TestImage/*.tsv")
for f in filetsv:
shutil.copy(f, 'test_boxes_and_transcripts/')
### inference code #####
device = torch.device(f'cuda:{args.gpu}' if -1 != -1 else 'cpu') ### setting value of gpu to -1 to run inference
savedCheckpiont = 'saved/models/PICK_Default/test_999/model_best.pth'
checkpoint = torch.load(savedCheckpiont, map_location=device)
config = checkpoint['config']
state_dict = checkpoint['state_dict']
monitor_best = checkpoint['monitor_best']
print('Loading checkpoint: {} \nwith saved mEF {:.4f} ...'.format(savedCheckpiont, monitor_best))
# prepare model for testing
pick_model = config.init_obj('model_arch', pick_arch_module)
pick_model = pick_model.to(device)
pick_model.load_state_dict(state_dict)
pick_model.eval()
## pick ocr transcript file and image in below folders
out_img_path = "test_img/"
out_box_path = "test_boxes_and_transcripts/"
# setup dataset and data_loader instances
batch_size_val=1
test_dataset = PICKDataset(boxes_and_transcripts_folder=out_box_path,
images_folder=out_img_path,
resized_image_size=(480, 960),
ignore_error=False,
training=False)
test_data_loader = DataLoader(test_dataset, batch_size=batch_size_val, shuffle=False,
num_workers=0, collate_fn=BatchCollateFn(training=False)) ## have changed the number of workers to zero
# setup output path
output_folder = 'output'
output_path = Path(output_folder)
output_path.mkdir(parents=True, exist_ok=True)
with torch.no_grad():
for step_idx, input_data_item in enumerate(test_data_loader):
for key, input_value in input_data_item.items():
if input_value is not None and isinstance(input_value, torch.Tensor):
input_data_item[key] = input_value.to(device)
# For easier debug.
image_names = input_data_item["filenames"]
# print('image names')
# print(image_names)
output = pick_model(**input_data_item)
# print(output)
logits = output['logits'] # (B, N*T, out_dim)
# print(logits)
new_mask = output['new_mask']
# print(new_mask)
image_indexs = input_data_item['image_indexs'] # (B,)
text_segments = input_data_item['text_segments'] # (B, num_boxes, T)
mask = input_data_item['mask']
# List[(List[int], torch.Tensor)]
best_paths = pick_model.decoder.crf_layer.viterbi_tags(logits, mask=new_mask, logits_batch_first=True)
# print('best_paths')
# print(best_paths)
predicted_tags = []
for path, score in best_paths:
# print(path,score)
predicted_tags.append(path)
# convert iob index to iob string
decoded_tags_list = iob_index_to_str(predicted_tags)
# union text as a sequence and convert index to string
decoded_texts_list = text_index_to_str(text_segments, mask)
# print(decoded_texts_list)
for decoded_tags, decoded_texts, image_index in zip(decoded_tags_list, decoded_texts_list, image_indexs):
# List[ Tuple[str, Tuple[int, int]] ]
spans = bio_tags_to_spans(decoded_tags, [])
spans = sorted(spans, key=lambda x: x[1][0])
entities = [] # exists one to many case
# print(spans)
for entity_name, range_tuple in spans:
entity = dict(entity_name=entity_name,
text=''.join(decoded_texts[range_tuple[0]:range_tuple[1] + 1]))
entities.append(entity)
result_file = output_path.joinpath(Path(test_dataset.files_list[image_index]).stem + '.txt')
# print(entities)
with result_file.open(mode='w') as f:
for item in entities:
f.write('{}\t{}\n'.format(item['entity_name'], item['text']))
print(item['entity_name'],item['text'])
# dir = 'path/to/dir'
try:
dir = out_img_path
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
except:
pass
try:
dir = out_box_path
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
except:
pass
| [
"torch.device",
"torch.no_grad",
"torch.load"
] | 1.5.1 | NeerajAI/PICK-pytorch | 61deb7c1e11df30c8f03726c061a2866234ac770 |
1.5 | import warnings
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import load_state_dict_from_url
__all__ = ['GoogLeNet', 'googlenet']
model_urls = {
# GoogLeNet ported from TensorFlow
'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth',
}
_GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
def googlenet(pretrained=False, progress=True, **kwargs):
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' not in kwargs:
kwargs['aux_logits'] = False
if kwargs['aux_logits']:
warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
'so make sure to train them')
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
kwargs['init_weights'] = False
model = GoogLeNet(**kwargs)
state_dict = load_state_dict_from_url(model_urls['googlenet'],
progress=progress)
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
del model.aux1, model.aux2
return model
return GoogLeNet(**kwargs)
class GoogLeNet(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, init_weights=True):
super(GoogLeNet, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = BasicConv2d(64, 64, kernel_size=1)
self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)
self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)
self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)
self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)
self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)
self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)
self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128)
if aux_logits:
self.aux1 = InceptionAux(512, num_classes)
self.aux2 = InceptionAux(528, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.2)
self.fc = nn.Linear(1024, num_classes)
if init_weights:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
if self.training and self.aux_logits:
aux1 = self.aux1(x)
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
if self.training and self.aux_logits:
aux2 = self.aux2(x)
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
if self.training and self.aux_logits:
return _GoogLeNetOutputs(x, aux2, aux1)
return x
class Inception(nn.Module):
def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
super(Inception, self).__init__()
self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)
self.branch2 = nn.Sequential(
BasicConv2d(in_channels, ch3x3red, kernel_size=1),
BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1)
)
self.branch3 = nn.Sequential(
BasicConv2d(in_channels, ch5x5red, kernel_size=1),
BasicConv2d(ch5x5red, ch5x5, kernel_size=3, padding=1)
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
BasicConv2d(in_channels, pool_proj, kernel_size=1)
)
def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
outputs = [branch1, branch2, branch3, branch4]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv = BasicConv2d(in_channels, 128, kernel_size=1)
self.fc1 = nn.Linear(2048, 1024)
self.fc2 = nn.Linear(1024, num_classes)
def forward(self, x):
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
# N x 2048
x = F.relu(self.fc1(x), inplace=True)
# N x 1024
x = F.dropout(x, 0.7, training=self.training)
# N x 1024
x = self.fc2(x)
# N x 1000 (num_classes)
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.flatten",
"torch.nn.MaxPool2d",
"torch.nn.init.constant_",
"torch.nn.functional.dropout",
"torch.nn.BatchNorm2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.no_grad",
"torch.nn.functional.relu",
"torch.unsqueeze",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
] | 1.5.1 | GreenCUBIC/GasBotty | 158f5991201c80bf4cbbbb9deabc9954ff19bbb1 |
1.7 | import sys
import argparse
import torch
from ModelLoader import load_model
def main():
parser = argparse.ArgumentParser(prog='gentool')
parser.add_argument("--training", action='store_true', help="Whether or not to start the model in training mode.")
parser.add_argument("--model", type=str, help="The model to loader.")
parser.add_argument("--iterations", type=int, default=10000, help="Number of iterations to train for.")
parser.add_argument("--itr_offset", type=int, default=0, help="Iteration count offset.")
parser.add_argument("--no_cuda", action='store_true', help="Disables loading to GPU.")
opt = parser.parse_args()
cuda = not opt.no_cuda
if opt.model is None:
print('Model not defined!')
sys.exit(1)
if cuda:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
model = load_model(opt.model, cuda)
if opt.training:
model.train()
else:
model.eval()
if opt.training:
model.fit(opt.iterations, offset=opt.itr_offset)
if __name__ == '__main__':
main()
| [
"torch.set_default_tensor_type"
] | 1.7.1 | TheDudeFromCI/generative-toolkit | 4a0aed629b72e6eea807dadc460afa90dd330f7f |
1.4 | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.utils import model_zoo
from torchvision import models
from utils.spp_layer import spatial_pyramid_pool
class CIS_VGGBN(nn.Module):
def __init__(self, backbone='vgg16_bn', pretrained=True, freeze_backbone=False):
super(CIS_VGGBN, self).__init__()
self.output_num = [4,2,1]
vgg = models.vgg16_bn(pretrained)
features = list(vgg.features.children())
self.dec1 = nn.Sequential(*features[:7]) # 160
# self.dec2 = nn.Sequential(*features[5:10]) # 80
# self.dec3 = nn.Sequential(*features[10:17]) # 40
# self.dec4 = nn.Sequential(*features[17:24]) # 20
# self.dec5 = nn.Sequential(*features[24:]) # 10
self.cis1 = nn.Sequential(
nn.Conv2d(128, 64, 3, padding=1,stride=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.cis2 = nn.Sequential(
nn.Linear(1344, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 2)
)
self.dp = nn.Softmax()
def forward(self, x, y,bt):
x_f1 = self.dec1(x)
y_f1 = self.dec1(y)
enc = torch.cat((x_f1, y_f1), 1)
clc1 = self.cis1(enc)
spp = spatial_pyramid_pool(clc1,bt,[clc1.size(2),clc1.size(3)], self.output_num)
clc2 = self.cis2(spp)
dp = self.dp(clc2)
return x_f1, y_f1, dp
# def initialize(self):
# self.load_state_dict(torch.load('../res/resnet50-19c8e357.pth'), strict=False)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Softmax",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.4.0 | rfww/EfficientChangeDetection | 42d466c56ed262980c27fd6cde6ffe65314e638f |
1.4 | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import os
from torch.utils import model_zoo
from torchvision import models
class SegNetEnc(nn.Module):
def __init__(self, in_channels, out_channels, scale, num_layers):
super().__init__()
layers = [
nn.Upsample(scale_factor=scale, mode='bilinear'),
nn.Conv2d(in_channels, in_channels // 2, 3, padding=1),
nn.BatchNorm2d(in_channels // 2),
nn.ReLU(inplace=True),
]
layers += [
nn.Conv2d(in_channels // 2, in_channels // 2, 3, padding=1),
nn.BatchNorm2d(in_channels // 2),
nn.ReLU(inplace=True),
] * num_layers
layers += [
nn.Conv2d(in_channels // 2, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
self.encode = nn.Sequential(*layers)
def forward(self, x):
return self.encode(x)
class ARPPNET(nn.Module):
def __init__(self, num_classes):
super().__init__()
my_model = models.vgg16(pretrained=True)
input_1_new = nn.Conv2d(6, 64, (3, 3), 1, 1)
my_model.features[0] = input_1_new
decoders = list(my_model.features.children())
self.dec1 = nn.Sequential(*decoders[:5])
self.dec2 = nn.Sequential(*decoders[5:10])
self.dec3 = nn.Sequential(*decoders[10:17])
self.dec4 = nn.Sequential(*decoders[17:24])
# self.dec5 = nn.Sequential(*decoders[24:])
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.requires_grad = True
#block1
self.AR_dec1_1 = nn.Conv2d(64, 16, 3, padding=1, dilation=1)
self.AR_dec1_3 = nn.Conv2d(64, 16, 3, padding=3, dilation=3)
self.AR_dec1_5 = nn.Conv2d(64, 16, 3, padding=5, dilation=5)
self.AR_dec1_7 = nn.Conv2d(64, 16, 3, padding=7, dilation=7)
self.AR_dec1_conv = nn.Sequential(
nn.Conv2d(16, 8, 3, padding=1),
nn.Conv2d(8, 8, 3, padding=1),
)
#block2
self.AR_dec2_1 = nn.Conv2d(128, 32, 3, padding=1, dilation=1)
self.AR_dec2_3 = nn.Conv2d(128, 32, 3, padding=3, dilation=3)
self.AR_dec2_5 = nn.Conv2d(128, 32, 3, padding=5, dilation=5)
self.AR_dec2_7 = nn.Conv2d(128, 32, 3, padding=7, dilation=7)
self.AR_dec2_conv = nn.Sequential(
nn.Conv2d(32, 16, 3, padding=1),
nn.Conv2d(16, 16, 3, padding=1),
)
#block3
self.AR_dec3_1 = nn.Conv2d(256, 64, 3, padding=1, dilation=1)
self.AR_dec3_3 = nn.Conv2d(256, 64, 3, padding=3, dilation=3)
self.AR_dec3_5 = nn.Conv2d(256, 64, 3, padding=5, dilation=5)
self.AR_dec3_7 = nn.Conv2d(256, 64, 3, padding=7, dilation=7)
self.AR_dec3_conv = nn.Sequential(
nn.Conv2d(64, 32, 3, padding=1),
nn.Conv2d(32, 32, 3, padding=1),
)
#block4
self.AR_dec4_1 = nn.Conv2d(512, 128, 3, padding=1, dilation=1)
self.AR_dec4_3 = nn.Conv2d(512, 128, 3, padding=3, dilation=3)
self.AR_dec4_5 = nn.Conv2d(512, 128, 3, padding=5, dilation=5)
self.AR_dec4_7 = nn.Conv2d(512, 128, 3, padding=7, dilation=7)
self.AR_dec4_conv = nn.Sequential(
nn.Conv2d(128, 128, 3, padding=1),
nn.Conv2d(128, 128, 3, padding=1),
)
#deconv
self.enc4 = SegNetEnc(512, 128, 2, 0)
self.enc3 = SegNetEnc(256, 64, 2, 0)
self.enc2 = SegNetEnc(128, 32, 2, 0)
self.enc1 = SegNetEnc(64, 64, 2, 0)
self.final = nn.Conv2d(64, 2, 3, padding=1)
def forward(self, x, y):
'''
Attention, input size should be the 32x.
'''
################################fusion of im1######################################
# all node of layer 1
concat_xy = torch.cat([x,y],1)
dec1 = self.dec1(concat_xy)
dec2 = self.dec2(dec1)
dec3 = self.dec3(dec2)
dec4 = self.dec4(dec3)
#dec5 = self.dec5(dec4)
AR_dec1_1 = self.AR_dec1_1(dec1)
AR_dec1_1 = self.AR_dec1_conv(AR_dec1_1)
AR_dec1_3 = self.AR_dec1_3(dec1)
AR_dec1_3 = self.AR_dec1_conv(AR_dec1_3)
AR_dec1_5 = self.AR_dec1_5(dec1)
AR_dec1_5 = self.AR_dec1_conv(AR_dec1_5)
AR_dec1_7 = self.AR_dec1_7(dec1)
AR_dec1_7 = self.AR_dec1_conv(AR_dec1_7)
AR_dec1_cat = torch.cat([AR_dec1_1, AR_dec1_3, AR_dec1_5, AR_dec1_7], 1)
AR_dec2_1 = self.AR_dec2_1(dec2)
AR_dec2_1 = self.AR_dec2_conv(AR_dec2_1)
AR_dec2_3 = self.AR_dec2_3(dec2)
AR_dec2_3 = self.AR_dec2_conv(AR_dec2_3)
AR_dec2_5 = self.AR_dec2_5(dec2)
AR_dec2_5 = self.AR_dec2_conv(AR_dec2_5)
AR_dec2_7 = self.AR_dec2_7(dec2)
AR_dec2_7 = self.AR_dec2_conv(AR_dec2_7)
AR_dec2_cat = torch.cat([AR_dec2_1, AR_dec2_3, AR_dec2_5, AR_dec2_7], 1)
AR_dec3_1 = self.AR_dec3_1(dec3)
AR_dec3_1 = self.AR_dec3_conv(AR_dec3_1)
AR_dec3_3 = self.AR_dec3_3(dec3)
AR_dec3_3 = self.AR_dec3_conv(AR_dec3_3)
AR_dec3_5 = self.AR_dec3_5(dec3)
AR_dec3_5 = self.AR_dec3_conv(AR_dec3_5)
AR_dec3_7 = self.AR_dec3_7(dec3)
AR_dec3_7 = self.AR_dec3_conv(AR_dec3_7)
AR_dec3_cat = torch.cat([AR_dec3_1, AR_dec3_3, AR_dec3_5, AR_dec3_7], 1)
AR_dec4_1 = self.AR_dec4_1(dec4)
AR_dec4_1 = self.AR_dec4_conv(AR_dec4_1)
AR_dec4_3 = self.AR_dec4_3(dec4)
AR_dec4_3 = self.AR_dec4_conv(AR_dec4_3)
AR_dec4_5 = self.AR_dec4_5(dec4)
AR_dec4_5 = self.AR_dec4_conv(AR_dec4_5)
AR_dec4_7 = self.AR_dec4_7(dec4)
AR_dec4_7 = self.AR_dec4_conv(AR_dec4_7)
AR_dec4_cat = torch.cat([AR_dec4_1, AR_dec4_3, AR_dec4_5, AR_dec4_7], 1)
enc4 = self.enc4(AR_dec4_cat)
enc3 = self.enc3(torch.cat([AR_dec3_cat, F.upsample_bilinear(enc4, AR_dec3_cat.size()[2:])], 1))
enc2 = self.enc2(torch.cat([AR_dec2_cat, F.upsample_bilinear(enc3, AR_dec2_cat.size()[2:])], 1))
enc1 = self.enc1(torch.cat([AR_dec1_cat, F.upsample_bilinear(enc2, AR_dec1_cat.size()[2:])], 1))
final = F.upsample_bilinear(self.final(enc1), x.size()[2:])
return final
#model = models.vgg16(pretrained=False)
#print(model)
#model = TFPCD_middle_fusion(2)
#print(model)
| [
"torch.cat",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.Upsample",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.4.0 | rfww/EfficientChangeDetection | 42d466c56ed262980c27fd6cde6ffe65314e638f |
1.0 | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import os
import random
import tempfile
import unittest
from importlib import import_module
from typing import List, Tuple
from transformers import is_tf_available
from transformers.testing_utils import _tf_gpu_memory_limit, is_pt_tf_cross_test, require_tf, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TFSharedEmbeddings,
tf_top_k_top_p_filtering,
)
if _tf_gpu_memory_limit is not None:
gpus = tf.config.list_physical_devices("GPU")
for gpu in gpus:
# Restrict TensorFlow to only allocate x GB of memory on the GPUs
try:
tf.config.set_logical_device_configuration(
gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print("Logical GPUs", logical_gpus)
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key:
setattr(configs_no_init, key, 0.0)
return configs_no_init
@require_tf
class TFModelTesterMixin:
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_resize_embeddings = True
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict = {
k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(v, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():
inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.values():
inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),
*TF_MODEL_FOR_CAUSAL_LM_MAPPING.values(),
*TF_MODEL_FOR_MASKED_LM_MAPPING.values(),
*TF_MODEL_FOR_PRETRAINING_MAPPING.values(),
*TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),
]:
inputs_dict["labels"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
)
return inputs_dict
def test_initialization(self):
pass
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
model = model_class.from_pretrained(tmpdirname)
after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assert_outputs_same(after_outputs, outputs)
def test_graph_mode(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@tf.function
def run_in_graph_mode():
return model(inputs)
outputs = run_in_graph_mode()
self.assertIsNotNone(outputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(
["head_mask", "decoder_head_mask", "encoder_outputs"]
if "head_mask" and "decoder_head_mask" in arg_names
else ["encoder_outputs"]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_saved_model_creation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = False
config.output_attentions = False
if hasattr(config, "use_cache"):
config.use_cache = False
model_class = self.all_model_classes[0]
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
model(class_inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model")
self.assertTrue(os.path.exists(saved_model_dir))
@slow
def test_saved_model_creation_extended(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
if hasattr(config, "use_cache"):
config.use_cache = True
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
model(class_inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model")
self.assertTrue(os.path.exists(saved_model_dir))
@slow
def test_saved_model_with_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
# A saved model is always executed in graph mode, since we merged the PR #8777
# the booleans in graph mode are always the ones in the config, then we update
# the use_cache property if it exists in order to have similar booleans with the inputs
if "use_cache" in class_inputs_dict:
config.use_cache = class_inputs_dict.pop("use_cache")
model = model_class(config)
num_out = len(model(class_inputs_dict))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
saved_model_dir = os.path.join(tmpdirname, "saved_model")
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output = outputs["encoder_hidden_states"] if isinstance(outputs, dict) else outputs[-1]
else:
output = outputs["hidden_states"] if isinstance(outputs, dict) else outputs[-1]
hidden_states = [t.numpy() for t in output]
self.assertEqual(len(outputs), num_out)
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
@slow
def test_saved_model_with_attentions_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_attentions = True
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
# A saved model is always executed in graph mode, since we merged the PR #8777
# the booleans in graph mode are always the ones in the config, then we update
# the use_cache property if it exists in order to have similar booleans with the inputs
if "use_cache" in class_inputs_dict:
config.use_cache = class_inputs_dict.pop("use_cache")
model = model_class(config)
num_out = len(model(class_inputs_dict))
with tempfile.TemporaryDirectory() as tmpdirname:
saved_model_dir = os.path.join(tmpdirname, "saved_model")
model.save_pretrained(saved_model_dir)
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output = outputs["encoder_attentions"] if isinstance(outputs, dict) else outputs[-1]
else:
output = outputs["attentions"] if isinstance(outputs, dict) else outputs[-1]
attentions = [t.numpy() for t in output]
self.assertEqual(len(outputs), num_out)
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_keras_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
tf_main_layer_classes = set(
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
)
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(99, 32, name="shared")
config.use_cache = inputs_dict.pop("use_cache", None)
main_layer = main_layer_class(config, embed_tokens=shared)
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
outputs = model(inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
after_outputs = model(inputs_dict)
self.assert_outputs_same(after_outputs, outputs)
def assert_outputs_same(self, after_outputs, outputs):
# Make sure we don't have nans
if isinstance(after_outputs, tf.Tensor):
out_1 = after_outputs.numpy()
elif isinstance(after_outputs, dict):
out_1 = after_outputs[list(after_outputs.keys())[0]].numpy()
else:
out_1 = after_outputs[0].numpy()
out_2 = outputs[0].numpy()
self.assertEqual(out_1.shape, out_2.shape)
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
@is_pt_tf_cross_test
def test_pt_tf_model_equivalence(self):
import torch
import transformers
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
config.output_hidden_states = True
tf_model = model_class(config)
pt_model = pt_model_class(config)
# Check we can load pt model in tf and vice-versa with model => model functions
tf_model = transformers.load_pytorch_model_in_tf2_model(
tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)
)
pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = {}
for name, key in self._prepare_for_class(inputs_dict, model_class).items():
if type(key) == bool:
pt_inputs_dict[name] = key
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)
tf_hidden_states = tfo[0].numpy()
pt_hidden_states = pto[0].numpy()
tf_nans = np.copy(np.isnan(tf_hidden_states))
pt_nans = np.copy(np.isnan(pt_hidden_states))
pt_hidden_states[tf_nans] = 0
tf_hidden_states[tf_nans] = 0
pt_hidden_states[pt_nans] = 0
tf_hidden_states[pt_nans] = 0
max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
self.assertLessEqual(max_diff, 4e-2)
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
with tempfile.TemporaryDirectory() as tmpdirname:
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
torch.save(pt_model.state_dict(), pt_checkpoint_path)
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
tf_model.save_weights(tf_checkpoint_path)
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = {}
for name, key in self._prepare_for_class(inputs_dict, model_class).items():
if type(key) == bool:
key = np.array(key, dtype=bool)
pt_inputs_dict[name] = torch.from_numpy(key).to(torch.long)
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
tfo = tfo[0].numpy()
pto = pto[0].numpy()
tf_nans = np.copy(np.isnan(tfo))
pt_nans = np.copy(np.isnan(pto))
pto[tf_nans] = 0
tfo[tf_nans] = 0
pto[pt_nans] = 0
tfo[pt_nans] = 0
max_diff = np.amax(np.abs(tfo - pto))
self.assertLessEqual(max_diff, 4e-2)
def test_train_pipeline_custom_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
tf_main_layer_classes = set(
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
)
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared")
config.use_cache = False
main_layer = main_layer_class(config, embed_tokens=shared)
del inputs_dict["use_cache"]
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
if hasattr(self.model_tester, "num_labels"):
num_labels = self.model_tester.num_labels
else:
num_labels = 2
X = tf.data.Dataset.from_tensor_slices(
(inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1)))
).batch(1)
hidden_states = main_layer(symbolic_inputs)[0]
outputs = tf.keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states)
model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"])
model.fit(X, epochs=1)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
model(inputs_dict)
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
max_input = getattr(self.model_tester, "max_position_embeddings", 512)
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
for model_class in self.all_model_classes:
if self.is_encoder_decoder:
input_ids = {
"decoder_input_ids": tf.keras.Input(
batch_shape=(2, max_input),
name="decoder_input_ids",
dtype="int32",
),
"input_ids": tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32"),
}
elif model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
input_ids = tf.keras.Input(batch_shape=(4, 2, max_input), name="input_ids", dtype="int32")
else:
input_ids = tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32")
# Prepare our model
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving.
# Let's load it from the disk to be sure we can use pretrained weights
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test integration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_keyword_and_dict_args(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs_dict = model(inputs)
inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs_keywords.pop("input_ids", None)
outputs_keywords = model(input_ids, **inputs_keywords)
output_dict = outputs_dict[0].numpy()
output_keywords = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
def check_decoder_attentions_output(outputs):
out_len = len(outputs)
self.assertEqual(out_len % 2, 0)
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
def check_encoder_attentions_output(outputs):
attentions = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_hidden_states_output(config, inputs_dict, model_class):
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
if model.config.is_encoder_decoder:
encoder_hidden_states = outputs.encoder_hidden_states
decoder_hidden_states = outputs.decoder_hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(encoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(encoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
self.assertEqual(len(decoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(decoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
else:
hidden_states = outputs.hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(config, inputs_dict, model_class)
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(config, inputs_dict, model_class)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
list_lm_models = (
list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.values())
+ list(TF_MODEL_FOR_MASKED_LM_MAPPING.values())
+ list(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values())
)
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in list_lm_models:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
first, second = (
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
)
out_1 = first.numpy()
out_2 = second.numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(tuple_object, dict_object)),
msg=f"Tuple and dict output are not equal. Difference: {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}",
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
def _get_embeds(self, wte, input_ids):
# ^^ In our TF models, the input_embeddings can take slightly different forms,
# so we try a few of them.
# We used to fall back to just synthetically creating a dummy tensor of ones:
try:
x = wte(input_ids, mode="embedding")
except Exception:
try:
x = wte([input_ids], mode="embedding")
except Exception:
try:
x = wte([input_ids, None, None, None], mode="embedding")
except Exception:
if hasattr(self.model_tester, "embedding_size"):
x = tf.ones(
input_ids.shape + [self.model_tester.embedding_size],
dtype=tf.dtypes.float32,
)
else:
x = tf.ones(
input_ids.shape + [self.model_tester.hidden_size],
dtype=tf.dtypes.float32,
)
return x
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = self._get_embeds(wte, input_ids)
else:
inputs["inputs_embeds"] = self._get_embeds(wte, encoder_input_ids)
inputs["decoder_inputs_embeds"] = self._get_embeds(wte, decoder_input_ids)
model(inputs)
def test_numpy_arrays_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def prepare_numpy_arrays(inputs_dict):
inputs_np_dict = {}
for k, v in inputs_dict.items():
if tf.is_tensor(v):
inputs_np_dict[k] = v.numpy()
else:
inputs_np_dict[k] = np.array(k)
return inputs_np_dict
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
inputs_np = prepare_numpy_arrays(inputs)
model(inputs_np)
def test_resize_token_embeddings(self):
if not self.test_resize_embeddings:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if hasattr(embedding_layer, "word_embeddings"):
return embedding_layer.word_embeddings
elif hasattr(embedding_layer, "weight"):
return embedding_layer.weight
elif hasattr(embedding_layer, "decoder"):
return embedding_layer.decoder
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model(model.dummy_inputs)
if hasattr(embedding_layer, "word_embeddings"):
return embedding_layer.word_embeddings
elif hasattr(embedding_layer, "weight"):
return embedding_layer.weight
elif hasattr(embedding_layer, "decoder"):
return embedding_layer.decoder
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_bias = model.get_bias()
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_bias = model.get_bias()
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_bias is not None and new_bias is not None:
for old_weight, new_weight in zip(old_bias.values(), new_bias.values()):
self.assertEqual(new_weight.shape[0], assert_size)
models_equal = True
for p1, p2 in zip(old_weight.value(), new_weight.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1])
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def test_lm_head_model_random_no_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict["input_ids"]
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined mobel needs input_ids
with self.assertRaises(AssertionError):
model.generate(do_sample=True, max_length=5)
# num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5))
with self.assertRaises(AssertionError):
# generating multiple sequences when no beam search generation
# is not allowed as it would always generate the same sequences
model.generate(input_ids, do_sample=False, num_return_sequences=2)
# num_return_sequences > 1, sample
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_lm_head_model_random_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict["input_ids"]
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined mobel needs input_ids, num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))
with self.assertRaises(AssertionError):
# generating more sequences than having beams leads is not possible
model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)
# num_return_sequences > 1, sample
self._check_generated_ids(
model.generate(
input_ids,
do_sample=True,
num_beams=2,
num_return_sequences=2,
)
)
# num_return_sequences > 1, greedy
self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_loss_computation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
if getattr(model, "compute_loss", None):
# The number of elements in the loss should be the same as the number of elements in the label
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
added_label = prepared_for_class[
sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0]
]
loss_size = tf.size(added_label)
if model.__class__ in TF_MODEL_FOR_CAUSAL_LM_MAPPING.values():
# if loss is causal lm loss, labels are shift, so that one label per batch
# is cut
loss_size = loss_size - self.model_tester.batch_size
# Test that model correctly compute the loss with kwargs
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
input_ids = prepared_for_class.pop("input_ids")
loss = model(input_ids, **prepared_for_class)[0]
self.assertEqual(loss.shape, [loss_size])
# Test that model correctly compute the loss with a dict
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
loss = model(prepared_for_class)[0]
self.assertEqual(loss.shape, [loss_size])
# Test that model correctly compute the loss with a tuple
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
# Get keys that were added with the _prepare_for_class function
label_keys = prepared_for_class.keys() - inputs_dict.keys()
signature = inspect.signature(model.call).parameters
signature_names = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
tuple_index_mapping = {0: "input_ids"}
for label_key in label_keys:
label_key_index = signature_names.index(label_key)
tuple_index_mapping[label_key_index] = label_key
sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
list_input = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
list_input[index] = prepared_for_class[value]
tuple_input = tuple(list_input)
# Send to model
loss = model(tuple_input[:-1])[0]
self.assertEqual(loss.shape, [loss_size])
def _generate_random_bad_tokens(self, num_bad_tokens, model):
# special tokens cannot be bad tokens
special_tokens = []
if model.config.bos_token_id is not None:
special_tokens.append(model.config.bos_token_id)
if model.config.pad_token_id is not None:
special_tokens.append(model.config.pad_token_id)
if model.config.eos_token_id is not None:
special_tokens.append(model.config.eos_token_id)
# create random bad tokens that are not special tokens
bad_tokens = []
while len(bad_tokens) < num_bad_tokens:
token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
if token not in special_tokens:
bad_tokens.append(token)
return bad_tokens
def _check_generated_ids(self, output_ids):
for token_id in output_ids[0].numpy().tolist():
self.assertGreaterEqual(token_id, 0)
self.assertLess(token_id, self.model_tester.vocab_size)
def _check_match_tokens(self, generated_ids, bad_words_ids):
# for all bad word tokens
for bad_word_ids in bad_words_ids:
# for all slices in batch
for generated_ids_slice in generated_ids:
# for all word idx
for i in range(len(bad_word_ids), len(generated_ids_slice)):
# if tokens match
if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
return True
return False
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
return output
@require_tf
class UtilsFunctionsTest(unittest.TestCase):
# tests whether the top_k_top_p_filtering function behaves as expected
def test_top_k_top_p_filtering(self):
logits = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
],
dtype=tf.float32,
)
non_inf_expected_idx = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],
dtype=tf.int32,
) # expected non filtered idx as noted above
non_inf_expected_output = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023],
dtype=tf.float32,
) # expected non filtered values as noted above
output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[output != -float("inf")]
non_inf_idx = tf.cast(
tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))),
dtype=tf.int32,
)
tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)
tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx)
| [
"torch.no_grad",
"torch.from_numpy"
] | 1.0 | wilcoln/transformers | 6331d4fe59e85840bb5693837e791f4caedcd53b |
1.4 | import kornia
import kornia.testing as utils # test utils
from test.common import device
import torch
from torch.autograd import gradcheck
from torch.testing import assert_allclose
import pytest
class TestRgbToRgba:
def test_smoke(self, device):
data = torch.rand(3, 4, 4).to(device)
assert kornia.rgb_to_rgba(data, 0.).shape == (4, 4, 4)
def test_back_and_forth_rgb(self, device):
a_val: float = 1.
x_rgb = torch.rand(3, 4, 4).to(device)
x_rgba = kornia.rgb_to_rgba(x_rgb, a_val)
x_rgb_new = kornia.rgba_to_rgb(x_rgba)
assert_allclose(x_rgb, x_rgb_new)
def test_back_and_forth_bgr(self, device):
a_val: float = 1.
x_bgr = torch.rand(3, 4, 4).to(device)
x_rgba = kornia.bgr_to_rgba(x_bgr, a_val)
x_bgr_new = kornia.rgba_to_bgr(x_rgba)
assert_allclose(x_bgr, x_bgr_new)
def test_bgr(self, device):
a_val: float = 1.
x_rgb = torch.rand(3, 4, 4).to(device)
x_bgr = kornia.rgb_to_bgr(x_rgb)
x_rgba = kornia.rgb_to_rgba(x_rgb, a_val)
x_rgba_new = kornia.bgr_to_rgba(x_bgr, a_val)
assert_allclose(x_rgba, x_rgba_new)
def test_single(self, device):
data = torch.tensor([[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]]) # 3x2x2
data = data.to(device)
aval: float = 0.4
expected = torch.tensor([[[1.0, 1.0],
[1.0, 1.0]],
[[2.0, 2.0],
[2.0, 2.0]],
[[3.0, 3.0],
[3.0, 3.0]],
[[0.4, 0.4],
[0.4, 0.4]]]) # 4x2x2
expected = expected.to(device)
assert_allclose(kornia.rgb_to_rgba(data, aval), expected)
def test_batch(self, device):
data = torch.tensor([[[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]],
[[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]]]) # 2x3x2x2
data = data.to(device)
aval: float = 45.
expected = torch.tensor([[[[1.0, 1.0],
[1.0, 1.0]],
[[2.0, 2.0],
[2.0, 2.0]],
[[3.0, 3.0],
[3.0, 3.0]],
[[45., 45.],
[45., 45.]]],
[[[1.0, 1.0],
[1.0, 1.0]],
[[2.0, 2.0],
[2.0, 2.0]],
[[3.0, 3.0],
[3.0, 3.0]],
[[45., 45.],
[45., 45.]]]])
expected = expected.to(device)
assert_allclose(kornia.rgb_to_rgba(data, aval), expected)
def test_gradcheck(self, device):
data = torch.rand(1, 3, 2, 2).to(device)
data = utils.tensor_to_gradcheck_var(data) # to var
assert gradcheck(kornia.color.RgbToRgba(1.), (data,), raise_exception=True)
class TestBgrToRgb:
def test_back_and_forth(self, device):
data_bgr = torch.rand(1, 3, 3, 2).to(device)
data_rgb = kornia.bgr_to_rgb(data_bgr)
data_bgr_new = kornia.rgb_to_bgr(data_rgb)
assert_allclose(data_bgr, data_bgr_new)
def test_bgr_to_rgb(self, device):
data = torch.tensor([[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]]) # 3x2x2
expected = torch.tensor([[[3., 3.], [3., 3.]],
[[2., 2.], [2., 2.]],
[[1., 1.], [1., 1.]]]) # 3x2x2
# move data to the device
data = data.to(device)
expected = expected.to(device)
f = kornia.color.BgrToRgb()
assert_allclose(f(data), expected)
def test_batch_bgr_to_rgb(self, device):
data = torch.tensor([[[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]],
[[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]]]) # 2x3x2x2
expected = torch.tensor([[[[3., 3.], [3., 3.]],
[[2., 2.], [2., 2.]],
[[1., 1.], [1., 1.]]],
[[[3., 3.], [3., 3.]],
[[2., 2.], [2., 2.]],
[[1., 1.], [1., 1.]]]]) # 2x3x2x2
# move data to the device
data = data.to(device)
expected = expected.to(device)
f = kornia.color.BgrToRgb()
out = f(data)
assert_allclose(out, expected)
def test_gradcheck(self, device):
data = torch.tensor([[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]]) # 3x2x2
data = data.to(device)
data = utils.tensor_to_gradcheck_var(data) # to var
assert gradcheck(kornia.color.BgrToRgb(), (data,), raise_exception=True)
@pytest.mark.skip(reason="turn off all jit for a while")
def test_jit(self, device):
@torch.jit.script
def op_script(data: torch.Tensor) -> torch.Tensor:
return kornia.bgr_to_rgb(data)
data = torch.Tensor([[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]]) # 3x2x2
actual = op_script(data)
expected = kornia.bgr_to_rgb(data)
assert_allclose(actual, expected)
class TestRgbToBgr:
def test_back_and_forth(self, device):
data_rgb = torch.rand(1, 3, 3, 2).to(device)
data_bgr = kornia.rgb_to_bgr(data_rgb)
data_rgb_new = kornia.bgr_to_rgb(data_bgr)
assert_allclose(data_rgb, data_rgb_new)
def test_rgb_to_bgr(self, device):
# prepare input data
data = torch.tensor([[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]]) # 3x2x2
expected = torch.tensor([[[3., 3.],
[3., 3.]],
[[2., 2.],
[2., 2.]],
[[1., 1.],
[1., 1.]]]) # 3x2x2
# move data to the device
data = data.to(device)
expected = expected.to(device)
f = kornia.color.RgbToBgr()
assert_allclose(f(data), expected)
def test_gradcheck(self, device):
# prepare input data
data = torch.tensor([[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]]) # 3x2x2
data = data.to(device)
data = utils.tensor_to_gradcheck_var(data) # to var
assert gradcheck(kornia.color.RgbToBgr(), (data,),
raise_exception=True)
@pytest.mark.skip(reason="turn off all jit for a while")
def test_jit(self):
@torch.jit.script
def op_script(data: torch.Tensor) -> torch.Tensor:
return kornia.rgb_to_bgr(data)
data = torch.Tensor([[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]]) # 3x2x
actual = op_script(data)
expected = kornia.rgb_to_bgr(data)
assert_allclose(actual, expected)
def test_batch_rgb_to_bgr(self, device):
# prepare input data
data = torch.tensor([[[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]],
[[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]]]) # 2x3x2x2
expected = torch.tensor([[[[3., 3.],
[3., 3.]],
[[2., 2.],
[2., 2.]],
[[1., 1.],
[1., 1.]]],
[[[3., 3.],
[3., 3.]],
[[2., 2.],
[2., 2.]],
[[1., 1.],
[1., 1.]]]]) # 2x3x2x2
# move data to the device
data = data.to(device)
expected = expected.to(device)
f = kornia.color.RgbToBgr()
out = f(data)
assert_allclose(out, expected)
| [
"torch.rand",
"torch.Tensor",
"torch.tensor",
"torch.testing.assert_allclose"
] | 1.4.0 | connorlee77/kornia | af5b1f76bedf2a7fc0e0da2386b1be3032b6534f |
1.6 | from typing import Type
import torch
import torch.nn
import torch.distributions.distribution
import n3ml.population
import n3ml.learning
class Synapse(torch.nn.Module):
def __init__(self,
source: n3ml.population.Population,
target: n3ml.population.Population,
w: torch.Tensor,
w_min: float = 0.0,
w_max: float = 1.0,
alpha: float = None,
learning_rule: Type[n3ml.learning.LearningRule] = None,
initializer: torch.distributions.distribution.Distribution = None) -> None:
super().__init__()
self.source = source
self.target = target
self.register_buffer('w', w)
self.w_min = w_min
self.w_max = w_max
self.alpha = alpha
if learning_rule is None:
self.learning_rule = learning_rule
else:
self.learning_rule = learning_rule(self)
self.initializer = initializer
def init(self) -> None:
self.w[:] = self.initializer.sample(sample_shape=self.w.size())
def normalize(self) -> None:
if self.alpha is not None:
w_abs_sum = self.w.abs().sum(dim=1).unsqueeze(dim=1)
w_abs_sum[w_abs_sum == 0.0] = 1.0
self.w *= self.alpha / w_abs_sum
def update(self) -> None:
if self.learning_rule is not None:
self.learning_rule.run()
def run(self) -> None:
raise NotImplementedError
class LinearSynapse(Synapse):
def __init__(self,
source: n3ml.population.Population,
target: n3ml.population.Population,
w: torch.Tensor = None,
w_min: float = 0.0,
w_max: float = 1.0,
alpha: float = None,
learning_rule: n3ml.learning.LearningRule = None,
initializer: torch.distributions.distribution.Distribution = None) -> None:
if w is None:
w = torch.zeros(size=(target.neurons, source.neurons))
super().__init__(source, target, w, w_min, w_max, alpha, learning_rule, initializer)
def run(self) -> torch.Tensor:
"""
Non batch processing
self.w.size: [self.target.neurons, self.source.neurons]
self.source.s.size: [self.source.neurons]
"""
return torch.matmul(self.w, self.source.s)
class ConvSynapse(Synapse):
pass
| [
"torch.zeros",
"torch.matmul"
] | 1.6.0 | chatterboy/n3ml | 28b4e25a277e55e734e6054e8239237a5ff7d1f1 |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--lr_drop', default=200, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
# if args.distributed:
# sampler_train = DistributedSampler(dataset_train)
# sampler_val = DistributedSampler(dataset_val, shuffle=False)
# else:
# sampler_train = torch.utils.data.RandomSampler(dataset_train)
# sampler_val = torch.utils.data.SequentialSampler(dataset_val)
# batch_sampler_train = torch.utils.data.BatchSampler(
# sampler_train, args.batch_size, drop_last=True)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
# data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
# collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn_leimao, num_workers=args.num_workers)
for inputs, labels in data_loader_val:
print("---------------------")
print(inputs.shape)
print(labels)
# for input_m in inputs.tensors:
# print(input_m.shape)
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| [
"torch.device",
"torch.utils.data.SequentialSampler",
"torch.manual_seed",
"torch.utils.data.DataLoader"
] | 1.5.0 | leimao/detr | cd88c4ea01257831ac677b6268e1aef7cd37eca4 |
1.10 | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
# Adapted by Florian Lux 2021
import numpy as np
import pyworld
import torch
import torch.nn.functional as F
from scipy.interpolate import interp1d
from Utility.utils import pad_list
class Dio(torch.nn.Module):
"""
F0 estimation with dio + stonemask algortihm.
This is f0 extractor based on dio + stonemask algorithm
introduced in https://doi.org/10.1587/transinf.2015EDP7457
"""
def __init__(self, fs=16000, n_fft=1024, hop_length=256, f0min=40, f0max=400, use_token_averaged_f0=True,
use_continuous_f0=False, use_log_f0=False, reduction_factor=1):
super().__init__()
self.fs = fs
self.n_fft = n_fft
self.hop_length = hop_length
self.frame_period = 1000 * hop_length / fs
self.f0min = f0min
self.f0max = f0max
self.use_token_averaged_f0 = use_token_averaged_f0
self.use_continuous_f0 = use_continuous_f0
self.use_log_f0 = use_log_f0
if use_token_averaged_f0:
assert reduction_factor >= 1
self.reduction_factor = reduction_factor
def output_size(self):
return 1
def get_parameters(self):
return dict(fs=self.fs, n_fft=self.n_fft, hop_length=self.hop_length, f0min=self.f0min, f0max=self.f0max,
use_token_averaged_f0=self.use_token_averaged_f0, use_continuous_f0=self.use_continuous_f0, use_log_f0=self.use_log_f0,
reduction_factor=self.reduction_factor)
def forward(self, input_waves, input_waves_lengths=None, feats_lengths=None, durations=None,
durations_lengths=None, norm_by_average=True, text=None):
# If not provided, we assume that the inputs have the same length
if input_waves_lengths is None:
input_waves_lengths = (input_waves.new_ones(input_waves.shape[0], dtype=torch.long) * input_waves.shape[1])
# F0 extraction
pitch = [self._calculate_f0(x[:xl]) for x, xl in zip(input_waves, input_waves_lengths)]
# (Optional): Adjust length to match with the mel-spectrogram
if feats_lengths is not None:
pitch = [self._adjust_num_frames(p, fl).view(-1) for p, fl in zip(pitch, feats_lengths)]
# (Optional): Average by duration to calculate token-wise f0
if self.use_token_averaged_f0:
pitch = [self._average_by_duration(p, d, text).view(-1) for p, d in zip(pitch, durations)]
pitch_lengths = durations_lengths
else:
pitch_lengths = input_waves.new_tensor([len(p) for p in pitch], dtype=torch.long)
# Padding
pitch = pad_list(pitch, 0.0)
# Return with the shape (B, T, 1)
if norm_by_average:
average = pitch[0][pitch[0] != 0.0].mean()
pitch = pitch / average
return pitch.unsqueeze(-1), pitch_lengths
def _calculate_f0(self, input):
x = input.cpu().numpy().astype(np.double)
f0, timeaxis = pyworld.dio(x, self.fs, f0_floor=self.f0min, f0_ceil=self.f0max, frame_period=self.frame_period)
f0 = pyworld.stonemask(x, f0, timeaxis, self.fs)
if self.use_continuous_f0:
f0 = self._convert_to_continuous_f0(f0)
if self.use_log_f0:
nonzero_idxs = np.where(f0 != 0)[0]
f0[nonzero_idxs] = np.log(f0[nonzero_idxs])
return input.new_tensor(f0.reshape(-1), dtype=torch.float)
@staticmethod
def _adjust_num_frames(x, num_frames):
if num_frames > len(x):
x = F.pad(x, (0, num_frames - len(x)))
elif num_frames < len(x):
x = x[:num_frames]
return x
@staticmethod
def _convert_to_continuous_f0(f0: np.array):
if (f0 == 0).all():
return f0
# padding start and end of f0 sequence
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nonzero_idxs = np.where(f0 != 0)[0]
# perform linear interpolation
interp_fn = interp1d(nonzero_idxs, f0[nonzero_idxs])
f0 = interp_fn(np.arange(0, f0.shape[0]))
return f0
def _average_by_duration(self, x, d, text=None):
assert 0 <= len(x) - d.sum() < self.reduction_factor
d_cumsum = F.pad(d.cumsum(dim=0), (1, 0))
x_avg = [
x[start:end].masked_select(x[start:end].gt(0.0)).mean(dim=0) if len(x[start:end].masked_select(x[start:end].gt(0.0))) != 0 else x.new_tensor(0.0)
for start, end in zip(d_cumsum[:-1], d_cumsum[1:])]
# find tokens that are not phones and set pitch to 0
if text is not None:
for i, vector in enumerate(text):
if vector[13] == 0:
# idx 13 corresponds to 'phoneme' feature
x_avg[i] = torch.tensor(0.0)
return torch.stack(x_avg) | [
"torch.stack",
"torch.tensor"
] | 1.10.1 | Adamantcat/IMS-Toucan | 1ae02026a2a3233aaacc9d3a63d391918a2581e8 |
1.0 | import csv
import os
import os.path
import shutil
import tempfile
import unittest
from unittest import mock
import torch
import pandas as pd
from jiant import evaluate
import jiant.tasks.tasks as tasks
from jiant.models import MultiTaskModel
from jiant.__main__ import evaluate_and_write
from jiant.allennlp_mods.numeric_field import NumericField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data import Instance, Token, vocabulary
from allennlp.data.fields import LabelField, ListField, MetadataField, TextField
def model_forward(task, batch, predict=True):
if task.name == "sts-b":
logits = torch.Tensor([0.6, 0.4])
labels = torch.Tensor([0.875, 0.6])
out = {"logits": logits, "labels": labels, "n_exs": 2, "preds": [1.0, 0.8]}
elif task.name == "wic":
logits = torch.Tensor([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]])
labels = torch.LongTensor([0, 1, 1, 0])
out = {"logits": logits, "labels": labels, "n_exs": 4, "preds": [0, 1, 1, 1]}
else:
raise ValueError("Unexpected task found")
task.update_metrics(out, batch)
return out
class TestWritePreds(unittest.TestCase):
def sentence_to_text_field(self, sent, indexers):
""" Helper function to map a sequence of tokens into a sequence of
AllenNLP Tokens, then wrap in a TextField with the given indexers """
return TextField(list(map(Token, sent)), token_indexers=indexers)
def setUp(self):
"""
Since we're testing write_preds, we need to mock model predictions and the parts
of the model, arguments, and trainer needed to write to predictions.
Unlike in update_metrics tests, the actual contents of the examples in val_data
is not the most important as long as it adheres to the API necessary for examples
of that task.
"""
self.temp_dir = tempfile.mkdtemp()
self.path = os.path.join(self.temp_dir, "temp_dataset.tsv")
self.stsb = tasks.STSBTask(self.temp_dir, 100, "sts-b", tokenizer_name="MosesTokenizer")
self.wic = tasks.WiCTask(self.temp_dir, 100, "wic", tokenizer_name="MosesTokenizer")
stsb_val_preds = pd.DataFrame(
data=[
{
"idx": 0,
"labels": 1.00,
"preds": 1.00,
"sent1_str": "A man with a hard hat is dancing.",
"sent2_str": "A man wearing a hard hat is dancing",
},
{
"idx": 1,
"labels": 0.950,
"preds": 0.34,
"sent1_str": "A young child is riding a horse.",
"sent2_str": "A child is riding a horse.",
},
]
)
wic_val_preds = pd.DataFrame(
data=[
{
"idx": 0,
"sent1": "Room and board. ",
"sent2": "He nailed boards across the windows.",
"labels": 0,
"preds": 0,
},
{
"idx": 1,
"sent1": "Hook a fish",
"sent2": "He hooked a snake accidentally.",
"labels": 1,
"preds": 1,
},
]
)
indexers = {"bert_cased": SingleIdTokenIndexer("bert-xe-cased")}
self.wic.set_instance_iterable(
"val",
[
Instance(
{
"sent1_str": MetadataField("Room and board."),
"sent2_str": MetadataField("He nailed boards"),
"idx": LabelField(0, skip_indexing=True),
"idx2": NumericField(2),
"idx1": NumericField(3),
"inputs": self.sentence_to_text_field(
[
"[CLS]",
"Room",
"and",
"Board",
".",
"[SEP]",
"He",
"nailed",
"boards",
"[SEP]",
],
indexers,
),
"labels": LabelField(0, skip_indexing=1),
}
),
Instance(
{
"sent1_str": MetadataField("C ##ir ##culate a rumor ."),
"sent2_str": MetadataField("This letter is being circulated"),
"idx": LabelField(1, skip_indexing=True),
"idx2": NumericField(2),
"idx1": NumericField(3),
"inputs": self.sentence_to_text_field(
[
"[CLS]",
"C",
"##ir",
"##culate",
"a",
"rumor",
"[SEP]",
"This",
"##let",
"##ter",
"is",
"being",
"c",
"##ir",
"##culated",
"[SEP]",
],
indexers,
),
"labels": LabelField(0, skip_indexing=1),
}
),
Instance(
{
"sent1_str": MetadataField("Hook a fish'"),
"sent2_str": MetadataField("He hooked a snake accidentally"),
"idx": LabelField(2, skip_indexing=True),
"idx2": NumericField(2),
"idx1": NumericField(3),
"inputs": self.sentence_to_text_field(
[
"[CLS]",
"Hook",
"a",
"fish",
"[SEP]",
"He",
"hooked",
"a",
"snake",
"accidentally",
"[SEP]",
],
indexers,
),
"labels": LabelField(1, skip_indexing=1),
}
),
Instance(
{
"sent1_str": MetadataField("For recreation he wrote poetry."),
"sent2_str": MetadataField("Drug abuse is often regarded as recreation ."),
"idx": LabelField(3, skip_indexing=True),
"idx2": NumericField(2),
"idx1": NumericField(3),
"inputs": self.sentence_to_text_field(
[
"[CLS]",
"For",
"re",
"##creation",
"he",
"wrote",
"poetry",
"[SEP]",
"Drug",
"abuse",
"is",
"often",
"re",
"##garded",
"as",
"re",
"##creation",
"[SEP]",
],
indexers,
),
"labels": LabelField(1, skip_indexing=1),
}
),
],
)
self.val_preds = {"sts-b": stsb_val_preds, "wic": wic_val_preds}
self.vocab = vocabulary.Vocabulary.from_instances(self.wic.get_instance_iterable("val"))
self.vocab.add_token_to_namespace("True", "wic_tags")
for data in self.wic.get_instance_iterable("val"):
data.index_fields(self.vocab)
self.glue_tasks = [self.stsb, self.wic]
self.args = mock.Mock()
self.args.batch_size = 4
self.args.cuda = -1
self.args.run_dir = self.temp_dir
self.args.exp_dir = ""
def test_write_preds_does_run(self):
evaluate.write_preds(
self.glue_tasks, self.val_preds, self.temp_dir, "test", strict_glue_format=True
)
assert os.path.exists(self.temp_dir + "/STS-B.tsv") and os.path.exists(
self.temp_dir + "/WiC.jsonl"
)
def test_write_preds_glue(self):
evaluate.write_preds(
self.glue_tasks, self.val_preds, self.temp_dir, "test", strict_glue_format=True
)
stsb_predictions = pd.read_csv(self.temp_dir + "/STS-B.tsv", sep="\t")
assert "index" in stsb_predictions.columns and "prediction" in stsb_predictions.columns
assert stsb_predictions.iloc[0]["prediction"] == 5.00
assert stsb_predictions.iloc[1]["prediction"] == 1.7
def test_write_preds_superglue(self):
"""
Ensure that SuperGLUE write predictions for test is saved to the correct file
format.
"""
evaluate.write_preds(
[self.wic], self.val_preds, self.temp_dir, "test", strict_glue_format=True
)
wic_predictions = pd.read_json(self.temp_dir + "/WiC.jsonl", lines=True)
assert "idx" in wic_predictions.columns and "label" in wic_predictions.columns
assert wic_predictions.iloc[0]["label"] == "false"
assert wic_predictions.iloc[1]["label"] == "true"
@mock.patch("jiant.models.MultiTaskModel.forward", side_effect=model_forward)
def test_evaluate_and_write_does_run(self, model_forward_function):
"""
Testing that evaluate_and_write runs without breaking.
"""
with mock.patch("jiant.models.MultiTaskModel") as MockModel:
MockModel.return_value.eval.return_value = None
MockModel.return_value.forward = model_forward
MockModel.use_bert = 1
model = MockModel()
evaluate_and_write(self.args, model, [self.wic], splits_to_write="val", cuda_device=-1)
def tear_down(self):
shutil.rmtree(self.temp_dir)
| [
"torch.LongTensor",
"torch.Tensor"
] | 1.0 | YianZhang/jiant-v1-legacy-online-code | b6b1066de7cdbe1b95ca1ae3de6989d07b2e9629 |
1.0 | import unittest
import torch
from jiant.metrics.nli_metrics import NLITwoClassAccuracy
class TestNLIMetric(unittest.TestCase):
def test_two_class_acc_w_two_class_data_and_model(self):
nli_scorer = NLITwoClassAccuracy()
# Note: predictions are of shape num_batches x batch_size x num_classes
predictions = torch.Tensor([[[0, 1], [0, 1]], [[1, 0], [1, 0]]])
true_labels = torch.Tensor([[1, 1], [0, 0]])
nli_scorer(predictions, true_labels)
acc = nli_scorer.get_metric(reset=True)
assert acc == 1.0
predictions = torch.Tensor([[[1, 0], [1, 0]], [[1, 0], [0, 1]]])
true_labels = torch.Tensor([[1, 1], [0, 0]])
nli_scorer(predictions, true_labels)
acc = nli_scorer.get_metric(reset=True)
assert acc == 1.0 / 4.0
def test_two_class_acc_w_two_class_data(self):
nli_scorer = NLITwoClassAccuracy()
# Note: predictions are of shape num_batches x batch_size x num_classes
predictions = torch.Tensor([[[0, 1, 0], [0, 1, 0]], [[0, 0, 1], [1, 0, 0]]])
true_labels = torch.Tensor([[1, 1], [0, 0]])
nli_scorer(predictions, true_labels)
acc = nli_scorer.get_metric(reset=True)
assert acc == 1.0
predictions = torch.Tensor([[[1, 0, 0], [1, 0, 0]], [[0, 0, 1], [0, 1, 0]]])
true_labels = torch.Tensor([[1, 1], [0, 0]])
nli_scorer(predictions, true_labels)
acc = nli_scorer.get_metric(reset=True)
assert acc == 1.0 / 4.0
def test_two_class_acc_w_two_class_model(self):
nli_scorer = NLITwoClassAccuracy()
# Note: predictions are of shape num_batches x batch_size x num_classes
predictions = torch.Tensor([[[0, 1], [0, 1]], [[1, 0], [1, 0]]])
true_labels = torch.Tensor([[1, 1], [2, 0]])
nli_scorer(predictions, true_labels)
acc = nli_scorer.get_metric(reset=True)
assert acc == 1.0
predictions = torch.Tensor([[[1, 0], [1, 0]], [[1, 0], [0, 1]]])
true_labels = torch.Tensor([[1, 1], [2, 0]])
nli_scorer(predictions, true_labels)
acc = nli_scorer.get_metric(reset=True)
assert acc == 1.0 / 4.0
| [
"torch.Tensor"
] | 1.0 | YianZhang/jiant-v1-legacy-online-code | b6b1066de7cdbe1b95ca1ae3de6989d07b2e9629 |
1.8 | import torch
from ...utils import box_coder_utils, box_utils
from .point_head_template import PointHeadTemplate
class PointIntraPartOffsetHead(PointHeadTemplate):
"""
Point-based head for predicting the intra-object part locations.
Reference Paper: https://arxiv.org/abs/1907.03670
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.predict_boxes_when_training = predict_boxes_when_training
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
self.part_reg_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.PART_FC,
input_channels=input_channels,
output_channels=3
)
target_cfg = self.model_cfg.TARGET_CONFIG
if target_cfg.get('BOX_CODER', None) is not None:
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
self.box_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.REG_FC,
input_channels=input_channels,
output_channels=self.box_coder.code_size
)
else:
self.box_layers = None
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_part_labels=True, ret_box_labels=(self.box_layers is not None)
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict = self.get_cls_layer_loss(tb_dict)
point_loss_part, tb_dict = self.get_part_layer_loss(tb_dict)
point_loss = point_loss_cls + point_loss_part
if self.box_layers is not None:
point_loss_box, tb_dict = self.get_box_layer_loss(tb_dict)
point_loss += point_loss_box
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
point_part_preds = self.part_reg_layers(point_features)
ret_dict = {
'point_cls_preds': point_cls_preds,
'point_part_preds': point_part_preds,
}
if self.box_layers is not None:
point_box_preds = self.box_layers(point_features)
ret_dict['point_box_preds'] = point_box_preds
point_cls_scores = torch.sigmoid(point_cls_preds)
point_part_offset = torch.sigmoid(point_part_preds)
batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
batch_dict['point_part_offset'] = point_part_offset
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
ret_dict['point_part_labels'] = targets_dict.get('point_part_labels')
ret_dict['point_box_labels'] = targets_dict.get('point_box_labels')
if self.box_layers is not None and (not self.training or self.predict_boxes_when_training):
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=batch_dict['point_coords'][:, 1:4],
point_cls_preds=point_cls_preds, point_box_preds=ret_dict['point_box_preds']
)
batch_dict['batch_cls_preds'] = point_cls_preds
batch_dict['batch_box_preds'] = point_box_preds
batch_dict['batch_index'] = batch_dict['point_coords'][:, 0]
batch_dict['cls_preds_normalized'] = False
self.forward_ret_dict = ret_dict
return batch_dict
| [
"torch.sigmoid"
] | 1.8 | Gltina/OpenPCDet | e32dc7f8f903a3f0e1c93effc68d74dbe16766e2 |
1.8 | import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe',
'backbone_2d', 'dense_head', 'point_head', 'roi_head'
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD', None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict['num_point_features'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
if 'version' in checkpoint:
logger.info('==> Checkpoint trained from version: %s' % checkpoint['version'])
update_model_state = {}
for key, val in model_state_disk.items():
if key in self.state_dict() and self.state_dict()[key].shape == model_state_disk[key].shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
state_dict = self.state_dict()
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(self.state_dict())))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self.load_state_dict(checkpoint['model_state'])
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| [
"torch.zeros",
"torch.device",
"torch.cat",
"torch.sigmoid",
"torch.arange",
"torch.max",
"torch.LongTensor",
"torch.load"
] | 1.8 | Gltina/OpenPCDet | e32dc7f8f903a3f0e1c93effc68d74dbe16766e2 |
1.3 | """ Training augmented model """
import os
import torch
import torch.nn as nn
import numpy as np
from tensorboardX import SummaryWriter
from config import AugmentConfig
import utils
from models.augment_cnn import AugmentCNN
import copy
config = AugmentConfig()
device = torch.device("cuda")
# tensorboard
writer = SummaryWriter(log_dir=os.path.join(config.path, "tb"))
writer.add_text('config', config.as_markdown(), 0)
logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name)))
config.print_params(logger.info)
class Architect():
""" Compute gradients of alphas """
def __init__(self, net, w_momentum, w_weight_decay):
"""
Args:
net
w_momentum: weights momentum
"""
self.net = net
self.v_net = copy.deepcopy(net)
self.w_momentum = w_momentum
self.w_weight_decay = w_weight_decay
def virtual_step(self, trn_X, trn_y, xi, w_optim, model, Likelihood, batch_size, step):
"""
Compute unrolled weight w' (virtual step)
Step process:
1) forward
2) calc loss
3) compute gradient (by backprop)
4) update gradient
Args:
xi: learning rate for virtual gradient step (same as weights lr)
w_optim: weights optimizer
"""
# forward & calc loss
dataIndex = len(trn_y)+step*batch_size
ignore_crit = nn.CrossEntropyLoss(reduction='none').cuda()
# forward
logits,_ = self.net(trn_X)
# sigmoid loss
loss = torch.dot(torch.sigmoid(Likelihood[step*batch_size:dataIndex]), ignore_crit(logits, trn_y))/(torch.sigmoid(Likelihood[step*batch_size:dataIndex]).sum())
loss.backward()
dtloss_ll = Likelihood.grad
dtloss_w = []
# do virtual step (update gradient)
# below operations do not need gradient tracking
with torch.no_grad():
# dict key is not the value, but the pointer. So original network weight have to
# be iterated also.
for w, vw in zip(self.net.weights(), self.v_net.weights()):
m = w_optim.state[w].get('momentum_buffer', 0.) * self.w_momentum
if w.grad is not None:
vw.copy_(w - xi * (m + w.grad ))
dtloss_w.append(m + w.grad )
elif w.grad is None:
dtloss_w.append(w.grad )
return dtloss_w, dtloss_ll
# 1399:[48, 3, 3, 3], 1:25000
def unrolled_backward(self, trn_X, trn_y, val_X, val_y, xi, w_optim, model, likelihood, Likelihood_optim, batch_size, step):
""" Compute unrolled loss and backward its gradients
Args:
xi: learning rate for virtual gradient step (same as net lr)
w_optim: weights optimizer - for virtual step
"""
# do virtual step (calc w`)
dtloss_w, dtloss_ll = self.virtual_step(trn_X, trn_y, xi, w_optim, model, likelihood, batch_size, step)
logits, aux_logits = self.v_net(val_X)
# calc unrolled loss
ignore_crit = nn.CrossEntropyLoss(reduction='none').to(device)
dataIndex = len(trn_y)+step*batch_size
loss = torch.dot(torch.sigmoid(likelihood[step*batch_size:dataIndex]), ignore_crit(logits, trn_y))
loss = loss/(torch.sigmoid(likelihood[step*batch_size:dataIndex]).sum()) # L_val(w`)
# compute gradient
loss.backward()
dvloss_tloss = 0
for v, dt in zip(self.v_net.weights(), dtloss_w):
if v.grad is not None:
grad_valw_d_trainw = torch.div(v.grad, dt)
grad_valw_d_trainw[torch.isinf(grad_valw_d_trainw)] = 0
grad_valw_d_trainw[torch.isnan(grad_valw_d_trainw)] = 0
grad_val_train = torch.sum(grad_valw_d_trainw)
# print(grad_val_train)
dvloss_tloss += grad_val_train
dlikelihood = dvloss_tloss* dtloss_ll
vprec1, vprec5 = utils.accuracy(logits, val_y, topk=(1, 5))
Likelihood_optim.zero_grad()
likelihood.grad = dlikelihood
print(dvloss_tloss)
print(dtloss_ll)
print('likelihood gradient is:', likelihood.grad)
Likelihood_optim.step()
return likelihood, Likelihood_optim, loss, vprec1, vprec5
def main():
logger.info("Logger is set - training start")
# set default gpu device id
torch.cuda.set_device(config.gpus[0])
# set seed
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
torch.backends.cudnn.benchmark = True
# get data with meta info
input_size, input_channels, n_classes, train_val_data, test_data = utils.get_data(
config.dataset, config.data_path, config.cutout_length, validation=True)
criterion = nn.CrossEntropyLoss().to(device)
use_aux = config.aux_weight > 0.
model = AugmentCNN(input_size, input_channels, config.init_channels, n_classes, config.layers,
use_aux, config.genotype).to(device) #single GPU
# model = nn.DataParallel(model, device_ids=config.gpus).to(device)
# model size
mb_params = utils.param_size(model)
logger.info("Model size = {:.3f} MB".format(mb_params))
# weights optimizer with SGD
optimizer = torch.optim.SGD(model.parameters(), config.lr, momentum=config.momentum,
weight_decay=config.weight_decay)
n_train = len(train_val_data)
split = n_train // 2
indices = list(range(n_train))
# each train data is endowed with a weight
Likelihood = torch.nn.Parameter(torch.ones(len(indices[:split])).cuda(),requires_grad=True)
Likelihood_optim = torch.optim.SGD({Likelihood}, config.lr)
# data split
train_data = torch.utils.data.Subset(train_val_data, indices[:split])
valid_data = torch.utils.data.Subset(train_val_data, indices[split:])
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.workers,
pin_memory=False)
valid_loader = torch.utils.data.DataLoader(valid_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.workers,
pin_memory=False)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.epochs)
architect = Architect(model, 0.9, 3e-4)
best_top1 = 0.
# training loop
for epoch in range(config.epochs):
lr_scheduler.step()
lr = lr_scheduler.get_lr()[0]
drop_prob = config.drop_path_prob * epoch / config.epochs
model.drop_path_prob(drop_prob)
# training
train(train_loader, valid_loader, model, architect, optimizer, criterion, lr, epoch, Likelihood, Likelihood_optim, config.batch_size)
# validation
cur_step = (epoch+1) * len(train_loader)
top1 = validate(valid_loader, model, criterion, epoch, cur_step)
# save
if best_top1 < top1:
best_top1 = top1
is_best = True
else:
is_best = False
utils.save_checkpoint(model, config.path, is_best)
print("")
logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
def train(train_loader, valid_loader, model, architect, optimizer, criterion, lr, epoch, Likelihood, Likelihood_optim, batch_size):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
standard_losses = utils.AverageMeter()
valid_losses = utils.AverageMeter()
cur_step = epoch*len(train_loader)
cur_lr = optimizer.param_groups[0]['lr']
logger.info("Epoch {} LR {}".format(epoch, cur_lr))
writer.add_scalar('train/lr', cur_lr, cur_step)
model.train()
for step, ((trn_X, trn_y), (val_X, val_y)) in enumerate(zip(train_loader, valid_loader)):
trn_X, trn_y = trn_X.to(device, non_blocking=True), trn_y.to(device, non_blocking=True)
val_X, val_y = val_X.to(device, non_blocking=True), val_y.to(device, non_blocking=True)
N = trn_X.size(0)
M = val_X.size(0)
# phase 2. Likelihood step (Likelihood)
Likelihood_optim.zero_grad()
Likelihood, Likelihood_optim, valid_loss, vprec1, vprec5= architect.unrolled_backward(trn_X, trn_y, val_X, val_y, lr, optimizer, model, Likelihood, Likelihood_optim, batch_size, step)
# phase 1. network weight step (w)
optimizer.zero_grad()
logits, aux_logits = model(trn_X)
ignore_crit = nn.CrossEntropyLoss(reduction='none').to(device)
dataIndex = len(trn_y)+step*batch_size
loss = torch.dot(torch.sigmoid(Likelihood[step*batch_size:dataIndex]), ignore_crit(logits, trn_y))
loss = loss/(torch.sigmoid(Likelihood[step*batch_size:dataIndex]).sum())
'''
if config.aux_weight > 0.:
loss += config.aux_weight * criterion(aux_logits, y)
'''
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip)
# update network weight on train data
optimizer.step()
#compare normal loss without weighted
standard_loss = criterion(logits, trn_y)
prec1, prec5 = utils.accuracy(logits, trn_y, topk=(1, 5))
losses.update(loss.item(), N)
standard_losses.update(standard_loss.item(), N)
valid_losses.update(valid_loss.item(), M)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
if step % config.print_freq == 0 or step == len(train_loader)-1:
logger.info(
"Train: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} standard Loss {slosses.avg:.3f} Valid Loss {vlosses.avg:.3f}"
" Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, config.epochs, step, len(train_loader)-1, losses=losses, slosses=standard_losses, vlosses=valid_losses,
top1=top1, top5=top5))
writer.add_scalar('train/loss', loss.item(), cur_step)
writer.add_scalar('train/top1', prec1.item(), cur_step)
writer.add_scalar('train/top5', prec5.item(), cur_step)
writer.add_scalar('val/loss', valid_loss.item(), cur_step)
writer.add_scalar('train/top1', vprec1.item(), cur_step)
writer.add_scalar('train/top5', vprec5.item(), cur_step)
cur_step += 1
logger.info("Train: [{:3d}/{}] Final Prec@1 {:.4%}".format(epoch+1, config.epochs, top1.avg))
def validate(valid_loader, model, criterion, epoch, cur_step):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
model.eval()
with torch.no_grad():
for step,(X, y) in enumerate(valid_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
logits, _ = model(X)
loss = criterion(logits, y)
prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5))
losses.update(loss.item(), N)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
if step % config.print_freq == 0 or step == len(valid_loader)-1:
logger.info(
"Test: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} "
"Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, config.epochs, step, len(valid_loader)-1, losses=losses,
top1=top1, top5=top5))
writer.add_scalar('test/loss', losses.avg, cur_step)
writer.add_scalar('test/top1', top1.avg, cur_step)
writer.add_scalar('test/top5', top5.avg, cur_step)
logger.info("Test: [{:3d}/{}] Final Prec@1 {:.4%}".format(epoch+1, config.epochs, top1.avg))
return top1.avg
if __name__ == "__main__":
main()
| [
"torch.device",
"torch.sigmoid",
"torch.cuda.manual_seed_all",
"torch.isnan",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.no_grad",
"torch.optim.SGD",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.utils.data.DataLoader",
"torch.isinf",
"torch.utils.data.Subset",
"torch.div",
"torch.nn.CrossEntropyLoss",
"torch.sum"
] | 1.3 | jkooy/darts_ignoring | 7ae7c769cffe81441af9e1a0e0b92552245ae1d1 |
1.8 | # CLI interface to decode task
import argparse
import sys
from argparse import ArgumentDefaultsHelpFormatter as ArgFormatter
import torch
from pathlib import Path
from rtg import TranslationExperiment as Experiment, log, yaml
from rtg.module.decoder import Decoder, ReloadEvent
from rtg.utils import IO
def parse_args():
parser = argparse.ArgumentParser(prog="rtg.decode", description="Decode using NMT model",
formatter_class=ArgFormatter)
parser.add_argument("work_dir", help="Working directory", type=str)
parser.add_argument("model_path", type=str, nargs='*',
help="Path to model's checkpoint. "
"If not specified, a best model (based on the score on validation set)"
" from the experiment directory will be used."
" If multiple paths are specified, then an ensembling is performed by"
" averaging the param weights")
parser.add_argument("-if", '--input', default=sys.stdin,
type=argparse.FileType('r', encoding='utf-8', errors='ignore'),
help='Input file path. default is STDIN')
parser.add_argument("-of", '--output', default=sys.stdout,
type=argparse.FileType('w', encoding='utf-8', errors='ignore'),
help='Output File path. default is STDOUT')
parser.add_argument("-bs", '--beam-size', type=int, default=5,
help='Beam size. beam_size=1 is greedy, '
'In theory: higher beam is better approximation but expensive. '
'But in practice, higher beam doesnt always increase.')
parser.add_argument("-bc", '--batch-size', type=int, default=1,
help='Number of source tokens in a batch, approximately. '
'tries to fit in atleast one sentence => so even if you set 0 or 1, '
'there will be atleast one sentence in batch. '
'1 sentence seems better in CPU but larger number is better on GPUs')
parser.add_argument("-lp", '--lp-alpha', type=float, default=0.6,
help='Length penalty alpha. to disable set <= 0.0 '
'Ideally in the range [0.0, 1.0] but you are allowed to '
'experiment beyond > 1.0 but not less than 0.0')
parser.add_argument("-ml", '--max-len', type=int, default=60,
help='Maximum output sequence length. '
'Example: if max_len=10 and if source_len is 50, '
'then decoder goes up to 50+10 time steps in search of EOS token.')
parser.add_argument("-msl", '--max-src-len', type=int,
help='max source len; longer seqs will be truncated')
parser.add_argument("-nh", '--num-hyp', type=int, default=1,
help='Number of hypothesis to output. This should be smaller than beam_size')
parser.add_argument("--prepared", dest="prepared", action='store_true', default=None,
help='Each token is a valid integer which is an index to embedding,'
' so skip indexifying again')
parser.add_argument("-bp", '--binmt-path', type=str, default=None,
choices=['E1D1', 'E2D2', 'E1D2E2D1', 'E2D2E1D2', 'E1D2', 'E2D1'],
help='Sub module path inside BiNMT. applicable only when model is BiNMT')
parser.add_argument("-it", '--interactive', action='store_true',
help='Open interactive shell with decoder')
parser.add_argument("-sc", '--skip-check', action='store_true',
help='Skip Checking whether the experiment dir is prepared and trained')
parser.add_argument("-en", '--ensemble', type=int, default=1,
help='Ensemble best --ensemble models by averaging them')
parser.add_argument("-cb", '--sys-comb', type=Path,
help='System combine models at the softmax layer using the weights'
' specified in this file. When this argument is supplied, model_path '
'argument is ignored.')
args = vars(parser.parse_args())
return args
def validate_args(args, exp: Experiment):
if not args.pop('skip_check'): # if --skip-check is not requested
assert exp.has_prepared(), \
f'Experiment dir {exp.work_dir} is not ready to train. Please run "prep" sub task'
assert exp.has_trained(), \
f'Experiment dir {exp.work_dir} is not ready to decode.' \
f' Please run "train" sub task or --skip-check to ignore this'
weights_file = exp.work_dir / 'combo-weights.yml'
if not args.get('sys_comb') and weights_file.exists():
log.warning("Found default combo weights, switching to combo mode")
args['sys_comb'] = weights_file
if args.get("sys_comb"):
with IO.reader(args['sys_comb']) as fh:
weights = yaml.load(fh)['weights']
args['model_path'], args['weights'] = zip(*weights.items())
for model in args['model_path']:
assert Path(model).exists(), model
assert abs(sum(args['weights']) - 1) < 1e-3, \
f'Weights from --sys-comb file should sum to 1.0, given={args["weights"]}'
def main():
# No grads required
torch.set_grad_enabled(False)
args = parse_args()
gen_args = {}
exp = Experiment(args.pop('work_dir'), read_only=True)
validate_args(args, exp)
if exp.model_type == 'binmt':
if not args.get('path'):
Exception('--binmt-path argument is needed for BiNMT model.')
gen_args['path'] = args.pop('binmt_path')
weights = args.get('weights')
if weights:
decoder = Decoder.combo_new(exp, model_paths=args.pop('model_path'),
weights=weights)
else:
decoder = Decoder.new(exp, gen_args=gen_args, model_paths=args.pop('model_path', None),
ensemble=args.pop('ensemble', 1))
if args.pop('interactive'):
if weights:
log.warning("Interactive shell not reloadable for combo mode. FIXME: TODO:")
if args['input'] != sys.stdin or args['output'] != sys.stdout:
log.warning('--input and --output args are not applicable in --interactive mode')
args.pop('input')
args.pop('output')
while True:
try:
# an hacky way to unload and reload model when user tries to switch models
decoder.decode_interactive(**args)
break # exit loop if there is no request for reload
except ReloadEvent as re:
decoder = Decoder.new(exp, gen_args=gen_args, model_paths=re.model_paths)
args = re.state
# go back to loop and redo interactive shell
else:
return decoder.decode_file(args.pop('input'), args.pop('output'), **args)
if __name__ == '__main__':
main()
| [
"torch.set_grad_enabled"
] | 1.8.0 | XuezheMax/rtg | a4bfc81dc1874c6f43765eb588d1026a2296aa2f |
0.19 | import os
import os.path
import sys
sys.path.append('../..')
from utils.preprocessSMD import load_SMD
from transformers import (AdamW,WEIGHTS_NAME, CONFIG_NAME)
from utils.hugging_face import load_model,get_parser,top_filtering, SPECIAL_TOKENS, add_special_tokens_, average_distributed_scalar, make_logdir, build_input_from_segments,add_token_bAbI
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
import math
from pprint import pformat
import random
from utils.eval_metrics import moses_multi_bleu, compute_prf, compute_prf_SMD
import numpy as np
from tqdm import tqdm
import warnings
import json
import jsonlines
from collections import defaultdict
def sample_sequence(history, graph,tokenizer, model, args, current_output=None):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
padding = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1])
if current_output is None:
current_output = []
if(args.flatten_KB):
history += graph['edges']
for i in range(args.max_length):
instance = build_input_from_segments(args,history,current_output,graph,tokenizer, with_eos=False)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)
nodes_ids = None
if (args.graph or args.edge_list) and len(instance["input_graph_ids"])>0:
max_c = max(len(col) for col in instance["input_graph_ids"])
temp = []
for clmn in instance["input_graph_ids"]:
temp.append(clmn + [padding] * (max_c - len(clmn)))
nodes_ids = torch.tensor([temp], device=args.device)
att_mask = None
logits = model(input_ids, token_type_ids=token_type_ids, nodes=nodes_ids, attention_mask=att_mask)
if isinstance(logits, tuple): # for gpt2 and maybe others
logits = logits[0]
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
if i < args.min_length and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
if probs.max().item() == 1:
warnings.warn("Warning: model generating special token with probability 1.")
break # avoid infinitely looping over special token
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
current_output.append(prev.item())
return current_output
if __name__ == "__main__":
args = get_parser()
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# Get model and tokenizer
model, tokenizer = load_model(args,load=True)
print("Load Data")
test, _ = load_SMD(args, tokenizer, test_flag=True)
j_output = defaultdict(list)
for i, conv in tqdm(enumerate(test),total=len(test)):
for sample in conv['dialogue']:
out_ids = sample_sequence(sample['history'],sample["graph"] if args.dataset == "DIALKG" else conv,tokenizer, model, args)
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
j_output[conv["id"]].append({"spk":sample['spk'],"text":out_text})
with open(args.model_checkpoint+'/result.json', 'w') as fp:
json.dump(j_output, fp, indent=4)
| [
"torch.cuda.manual_seed",
"torch.random.manual_seed",
"torch.multinomial",
"torch.tensor",
"torch.nn.functional.softmax",
"torch.topk"
] | 0.19.5 | HLTCHKUST/ke-dialogue | cb73237889860adedcfd381b28813feb267cef81 |
1.10 | import datetime
import os
import pprint
import time
import threading
import torch as th
from types import SimpleNamespace as SN
from utils.logging import Logger
from utils.timehelper import time_left, time_str
from os.path import dirname, abspath
from learners import REGISTRY as le_REGISTRY
from runners import REGISTRY as r_REGISTRY
from controllers import REGISTRY as mac_REGISTRY
from components.episode_buffer import ReplayBuffer
from components.transforms import OneHot
def run(_run, _config, _log):
# check args sanity
_config = args_sanity_check(_config, _log)
args = SN(**_config)
args.device = "cuda" if args.use_cuda else "cpu"
# setup loggers
logger = Logger(_log)
_log.info("Experiment Parameters:")
experiment_params = pprint.pformat(_config,
indent=4,
width=1)
_log.info("\n\n" + experiment_params + "\n")
# configure tensorboard logger
unique_token = "{}__{}".format(args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
args.unique_token = unique_token
if args.use_tensorboard:
tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "tb_logs")
tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token)
logger.setup_tb(tb_exp_direc)
# sacred is on by default
logger.setup_sacred(_run)
# Run and train
run_sequential(args=args, logger=logger)
# Clean up after finishing
print("Exiting Main")
print("Stopping all threads")
for t in threading.enumerate():
if t.name != "MainThread":
print("Thread {} is alive! Is daemon: {}".format(t.name, t.daemon))
t.join(timeout=1)
print("Thread joined")
print("Exiting script")
# Making sure framework really exits
os._exit(os.EX_OK)
def evaluate_sequential(args, runner):
for _ in range(args.test_nepisode):
runner.run(test_mode=True)
if args.save_replay:
runner.save_replay()
runner.close_env()
def run_sequential(args, logger):
# Init runner so we can get env info
runner = r_REGISTRY[args.runner](args=args, logger=logger)
# Set up schemes and groups here
env_info = runner.get_env_info()
args.n_agents = env_info["n_agents"]
args.n_actions = env_info["n_actions"]
args.state_shape = env_info["state_shape"]
# Default/Base scheme
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents"},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"adj_matrix": {"vshape":(args.n_agents,), "group": "agents", "dtype":th.int},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": args.n_agents
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if args.buffer_cpu_only else args.device)
# Setup multiagent controller here
mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)
# Give runner the scheme
runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
# Learner
learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)
if args.use_cuda:
learner.cuda()
if args.checkpoint_path != "":
timesteps = []
timestep_to_load = 0
if not os.path.isdir(args.checkpoint_path):
logger.console_logger.info("Checkpoint directiory {} doesn't exist".format(args.checkpoint_path))
return
# Go through all files in args.checkpoint_path
for name in os.listdir(args.checkpoint_path):
full_name = os.path.join(args.checkpoint_path, name)
# Check if they are dirs the names of which are numbers
if os.path.isdir(full_name) and name.isdigit():
timesteps.append(int(name))
if args.load_step == 0:
# choose the max timestep
timestep_to_load = max(timesteps)
else:
# choose the timestep closest to load_step
timestep_to_load = min(timesteps, key=lambda x: abs(x - args.load_step))
model_path = os.path.join(args.checkpoint_path, str(timestep_to_load))
logger.console_logger.info("Loading model from {}".format(model_path))
learner.load_models(model_path)
runner.t_env = timestep_to_load
if args.evaluate or args.save_replay:
evaluate_sequential(args, runner)
return
# start training
episode = 0
last_test_T = -args.test_interval - 1
last_log_T = 0
model_save_time = 0
start_time = time.time()
last_time = start_time
logger.console_logger.info("Beginning training for {} timesteps".format(args.t_max))
while runner.t_env <= args.t_max:
# Run for a whole episode at a time
episode_batch = runner.run(test_mode=False)
buffer.insert_episode_batch(episode_batch)
if buffer.can_sample(args.batch_size):
episode_sample = buffer.sample(args.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != args.device:
episode_sample.to(args.device)
learner.train(episode_sample, runner.t_env, episode)
# Execute test runs once in a while
n_test_runs = max(1, args.test_nepisode // runner.batch_size)
if (runner.t_env - last_test_T) / args.test_interval >= 1.0:
logger.console_logger.info("t_env: {} / {}".format(runner.t_env, args.t_max))
logger.console_logger.info("Estimated time left: {}. Time passed: {}".format(
time_left(last_time, last_test_T, runner.t_env, args.t_max), time_str(time.time() - start_time)))
last_time = time.time()
last_test_T = runner.t_env
for _ in range(n_test_runs):
runner.run(test_mode=True)
if args.save_model and (runner.t_env - model_save_time >= args.save_model_interval or model_save_time == 0):
model_save_time = runner.t_env
save_path = os.path.join(args.local_results_path, "models", args.unique_token, str(runner.t_env))
#"results/models/{}".format(unique_token)
os.makedirs(save_path, exist_ok=True)
logger.console_logger.info("Saving models to {}".format(save_path))
# learner should handle saving/loading -- delegate actor save/load to mac,
# use appropriate filenames to do critics, optimizer states
learner.save_models(save_path)
episode += args.batch_size_run
if (runner.t_env - last_log_T) >= args.log_interval:
logger.log_stat("episode", episode, runner.t_env)
logger.print_recent_stats()
last_log_T = runner.t_env
runner.close_env()
logger.console_logger.info("Finished Training")
def args_sanity_check(config, _log):
# set CUDA flags
# config["use_cuda"] = True # Use cuda whenever possible!
if config["use_cuda"] and not th.cuda.is_available():
config["use_cuda"] = False
_log.warning("CUDA flag use_cuda was switched OFF automatically because no CUDA devices are available!")
if config["test_nepisode"] < config["batch_size_run"]:
config["test_nepisode"] = config["batch_size_run"]
else:
config["test_nepisode"] = (config["test_nepisode"]//config["batch_size_run"]) * config["batch_size_run"]
return config
| [
"torch.cuda.is_available"
] | 1.10.0 | hex-plex/GNN-MARL | ebe964a4eb749fd8d2780af18aead85e342d2988 |
1.7 | import torch.nn.functional as F
import torch
import json
# Setting the seed for Torch
import yaml
from fltk.nets import Cifar10CNN, FashionMNISTCNN, Cifar100ResNet, FashionMNISTResNet, Cifar10ResNet, Cifar100VGG
SEED = 1
torch.manual_seed(SEED)
class Arguments:
def __init__(self, logger):
self.logger = logger
self.batch_size = 10
self.test_batch_size = 1000
self.epochs = 1
self.lr = 0.001
self.momentum = 0.9
self.cuda = False
self.shuffle = False
self.log_interval = 10
self.kwargs = {}
self.contribution_measurement_round = 1
self.contribution_measurement_metric = 'Influence'
self.scheduler_step_size = 50
self.scheduler_gamma = 0.5
self.min_lr = 1e-10
self.round_worker_selection_strategy = None
self.round_worker_selection_strategy_kwargs = None
self.save_model = False
self.save_temp_model = False
self.save_epoch_interval = 1
self.save_model_path = "models"
self.epoch_save_start_suffix = "start"
self.epoch_save_end_suffix = "end"
self.get_poison_effort = 'half'
self.num_workers = 50
# self.num_poisoned_workers = 10
self.rank = 0
self.world_size = 0
self.data_sampler = None
self.distributed = False
self.available_nets = {
"Cifar100ResNet" : Cifar100ResNet,
"Cifar100VGG" : Cifar100VGG,
"Cifar10CNN" : Cifar10CNN,
"Cifar10ResNet" : Cifar10ResNet,
"FashionMNISTCNN" : FashionMNISTCNN,
"FashionMNISTResNet" : FashionMNISTResNet
}
self.net = None
self.set_net_by_name('Cifar10CNN')
# self.net = FashionMNISTCNN
# self.net = Cifar100ResNet
# self.net = FashionMNISTResNet
# self.net = Cifar10ResNet
# self.net = Cifar10ResNet
self.dataset_name = 'cifar10'
self.train_data_loader_pickle_path = {
'cifar10': 'data_loaders/cifar10/train_data_loader.pickle',
'fashion-mnist': 'data_loaders/fashion-mnist/train_data_loader.pickle',
'cifar100': 'data_loaders/cifar100/train_data_loader.pickle',
}
self.test_data_loader_pickle_path = {
'cifar10': 'data_loaders/cifar10/test_data_loader.pickle',
'fashion-mnist': 'data_loaders/fashion-mnist/test_data_loader.pickle',
'cifar100': 'data_loaders/cifar100/test_data_loader.pickle',
}
# self.train_data_loader_pickle_path = "data_loaders/cifar10/train_data_loader.pickle"
# self.test_data_loader_pickle_path = "data_loaders/cifar10/test_data_loader.pickle"
# self.train_data_loader_pickle_path = "data_loaders/fashion-mnist/train_data_loader.pickle"
# self.test_data_loader_pickle_path = "data_loaders/fashion-mnist/test_data_loader.pickle"
# self.train_data_loader_pickle_path = "data_loaders/cifar100/train_data_loader.pickle"
# self.test_data_loader_pickle_path = "data_loaders/cifar100/test_data_loader.pickle"
self.loss_function = torch.nn.CrossEntropyLoss
self.default_model_folder_path = "default_models"
self.data_path = "data"
def get_distributed(self):
return self.distributed
def get_rank(self):
return self.rank
def get_world_size(self):
return self.world_size
def set_sampler(self, sampler):
self.data_sampler = sampler
def get_sampler(self):
return self.data_sampler
def get_round_worker_selection_strategy(self):
return self.round_worker_selection_strategy
def get_round_worker_selection_strategy_kwargs(self):
return self.round_worker_selection_strategy_kwargs
def set_round_worker_selection_strategy_kwargs(self, kwargs):
self.round_worker_selection_strategy_kwargs = kwargs
def set_client_selection_strategy(self, strategy):
self.round_worker_selection_strategy = strategy
def get_data_path(self):
return self.data_path
def get_epoch_save_start_suffix(self):
return self.epoch_save_start_suffix
def get_epoch_save_end_suffix(self):
return self.epoch_save_end_suffix
def get_dataloader_list(self):
return list(self.train_data_loader_pickle_path.keys())
def get_nets_list(self):
return list(self.available_nets.keys())
def set_train_data_loader_pickle_path(self, path, name='cifar10'):
self.train_data_loader_pickle_path[name] = path
def get_train_data_loader_pickle_path(self):
return self.train_data_loader_pickle_path[self.dataset_name]
def set_test_data_loader_pickle_path(self, path, name='cifar10'):
self.test_data_loader_pickle_path[name] = path
def get_test_data_loader_pickle_path(self):
return self.test_data_loader_pickle_path[self.dataset_name]
def set_net_by_name(self, name: str):
self.net = self.available_nets[name]
# net_dict = {
# 'cifar10-cnn': Cifar10CNN,
# 'fashion-mnist-cnn': FashionMNISTCNN,
# 'cifar100-resnet': Cifar100ResNet,
# 'fashion-mnist-resnet': FashionMNISTResNet,
# 'cifar10-resnet': Cifar10ResNet,
# 'cifar100-vgg': Cifar100VGG,
# }
# self.net = net_dict[name]
def get_cuda(self):
return self.cuda
def get_scheduler_step_size(self):
return self.scheduler_step_size
def get_scheduler_gamma(self):
return self.scheduler_gamma
def get_min_lr(self):
return self.min_lr
def get_default_model_folder_path(self):
return self.default_model_folder_path
def get_num_epochs(self):
return self.epochs
def set_num_poisoned_workers(self, num_poisoned_workers):
self.num_poisoned_workers = num_poisoned_workers
def set_num_workers(self, num_workers):
self.num_workers = num_workers
def set_model_save_path(self, save_model_path):
self.save_model_path = save_model_path
def get_logger(self):
return self.logger
def get_loss_function(self):
return self.loss_function
def get_net(self):
return self.net
def get_num_workers(self):
return self.num_workers
def get_num_poisoned_workers(self):
return self.num_poisoned_workers
def get_poison_effort(self):
return self.get_poison_effort
def get_learning_rate(self):
return self.lr
def get_momentum(self):
return self.momentum
def get_shuffle(self):
return self.shuffle
def get_batch_size(self):
return self.batch_size
def get_test_batch_size(self):
return self.test_batch_size
def get_log_interval(self):
return self.log_interval
def get_save_model_folder_path(self):
return self.save_model_path
def get_learning_rate_from_epoch(self, epoch_idx):
lr = self.lr * (self.scheduler_gamma ** int(epoch_idx / self.scheduler_step_size))
if lr < self.min_lr:
self.logger.warning("Updating LR would place it below min LR. Skipping LR update.")
return self.min_lr
self.logger.debug("LR: {}".format(lr))
return lr
def get_contribution_measurement_round(self):
return self.contribution_measurement_round
def get_contribution_measurement_metric(self):
return self.contribution_measurement_metric
def should_save_model(self, epoch_idx):
"""
Returns true/false models should be saved.
:param epoch_idx: current training epoch index
:type epoch_idx: int
"""
if not self.save_model:
return False
if epoch_idx == 1 or epoch_idx % self.save_epoch_interval == 0:
return True
def log(self):
"""
Log this arguments object to the logger.
"""
self.logger.debug("Arguments: {}", str(self))
def __str__(self):
return "\nBatch Size: {}\n".format(self.batch_size) + \
"Test Batch Size: {}\n".format(self.test_batch_size) + \
"Epochs: {}\n".format(self.epochs) + \
"Learning Rate: {}\n".format(self.lr) + \
"Momentum: {}\n".format(self.momentum) + \
"CUDA Enabled: {}\n".format(self.cuda) + \
"Shuffle Enabled: {}\n".format(self.shuffle) + \
"Log Interval: {}\n".format(self.log_interval) + \
"Scheduler Step Size: {}\n".format(self.scheduler_step_size) + \
"Scheduler Gamma: {}\n".format(self.scheduler_gamma) + \
"Scheduler Minimum Learning Rate: {}\n".format(self.min_lr) + \
"Client Selection Strategy: {}\n".format(self.round_worker_selection_strategy) + \
"Client Selection Strategy Arguments: {}\n".format(json.dumps(self.round_worker_selection_strategy_kwargs, indent=4, sort_keys=True)) + \
"Model Saving Enabled: {}\n".format(self.save_model) + \
"Model Saving Interval: {}\n".format(self.save_epoch_interval) + \
"Model Saving Path (Relative): {}\n".format(self.save_model_path) + \
"Epoch Save Start Prefix: {}\n".format(self.epoch_save_start_suffix) + \
"Epoch Save End Suffix: {}\n".format(self.epoch_save_end_suffix) + \
"Number of Clients: {}\n".format(self.num_workers) + \
"Number of Poisoned Clients: {}\n".format(self.num_poisoned_workers) + \
"NN: {}\n".format(self.net) + \
"Train Data Loader Path: {}\n".format(self.train_data_loader_pickle_path) + \
"Test Data Loader Path: {}\n".format(self.test_data_loader_pickle_path) + \
"Loss Function: {}\n".format(self.loss_function) + \
"Default Model Folder Path: {}\n".format(self.default_model_folder_path) + \
"Data Path: {}\n".format(self.data_path) + \
"Dataset Name: {}\n".format(self.dataset_name) | [
"torch.manual_seed"
] | 1.7.1 | tudelft-eemcs-dml/fltk-testbed-gr-5 | 72afa24a37cd1f8f5f49665c83ccbd730d76ad21 |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
import argparse
import csv
import logging
import os
import random
import sys
from io import open
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from pytorch_transformers.modeling_bert import BertForMultipleChoice, BertConfig
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
from pytorch_transformers.tokenization_bert import BertTokenizer
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class SwagExample(object):
"""A single training/test example for the SWAG dataset."""
def __init__(self,
swag_id,
context_sentence,
start_ending,
ending_0,
ending_1,
ending_2,
ending_3,
label = None):
self.swag_id = swag_id
self.context_sentence = context_sentence
self.start_ending = start_ending
self.endings = [
ending_0,
ending_1,
ending_2,
ending_3,
]
self.label = label
def __str__(self):
return self.__repr__()
def __repr__(self):
l = [
"swag_id: {}".format(self.swag_id),
"context_sentence: {}".format(self.context_sentence),
"start_ending: {}".format(self.start_ending),
"ending_0: {}".format(self.endings[0]),
"ending_1: {}".format(self.endings[1]),
"ending_2: {}".format(self.endings[2]),
"ending_3: {}".format(self.endings[3]),
]
if self.label is not None:
l.append("label: {}".format(self.label))
return ", ".join(l)
class InputFeatures(object):
def __init__(self,
example_id,
choices_features,
label
):
self.example_id = example_id
self.choices_features = [
{
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
for _, input_ids, input_mask, segment_ids in choices_features
]
self.label = label
def read_swag_examples(input_file, is_training):
with open(input_file, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
if is_training and lines[0][-1] != 'label':
raise ValueError(
"For training, the input file must contain a label column."
)
examples = [
SwagExample(
swag_id = line[2],
context_sentence = line[4],
start_ending = line[5], # in the swag dataset, the
# common beginning of each
# choice is stored in "sent2".
ending_0 = line[7],
ending_1 = line[8],
ending_2 = line[9],
ending_3 = line[10],
label = int(line[11]) if is_training else None
) for line in lines[1:] # we skip the line with the column names
]
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
is_training):
"""Loads a data file into a list of `InputBatch`s."""
# Swag is a multiple choice task. To perform this task using Bert,
# we will use the formatting proposed in "Improving Language
# Understanding by Generative Pre-Training" and suggested by
# @jacobdevlin-google in this issue
# https://github.com/google-research/bert/issues/38.
#
# Each choice will correspond to a sample on which we run the
# inference. For a given Swag example, we will create the 4
# following inputs:
# - [CLS] context [SEP] choice_1 [SEP]
# - [CLS] context [SEP] choice_2 [SEP]
# - [CLS] context [SEP] choice_3 [SEP]
# - [CLS] context [SEP] choice_4 [SEP]
# The model will output a single value for each input. To get the
# final decision of the model, we will run a softmax over these 4
# outputs.
features = []
for example_index, example in enumerate(examples):
context_tokens = tokenizer.tokenize(example.context_sentence)
start_ending_tokens = tokenizer.tokenize(example.start_ending)
choices_features = []
for ending_index, ending in enumerate(example.endings):
# We create a copy of the context tokens in order to be
# able to shrink it according to ending_tokens
context_tokens_choice = context_tokens[:]
ending_tokens = start_ending_tokens + tokenizer.tokenize(ending)
# Modifies `context_tokens_choice` and `ending_tokens` in
# place so that the total length is less than the
# specified length. Account for [CLS], [SEP], [SEP] with
# "- 3"
_truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3)
tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"]
segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
choices_features.append((tokens, input_ids, input_mask, segment_ids))
label = example.label
if example_index < 5:
logger.info("*** Example ***")
logger.info("swag_id: {}".format(example.swag_id))
for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features):
logger.info("choice: {}".format(choice_idx))
logger.info("tokens: {}".format(' '.join(tokens)))
logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
logger.info("input_mask: {}".format(' '.join(map(str, input_mask))))
logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids))))
if is_training:
logger.info("label: {}".format(label))
features.append(
InputFeatures(
example_id = example.swag_id,
choices_features = choices_features,
label = label
)
)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def select_field(features, field):
return [
[
choice[field]
for choice in feature.choices_features
]
for feature in features
]
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .csv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
# Prepare model
model = BertForMultipleChoice.from_pretrained(args.bert_model,
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)),
num_choices=4)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_train:
# Prepare data loader
train_examples = read_swag_examples(os.path.join(args.data_dir, 'train.csv'), is_training = True)
train_features = convert_examples_to_features(
train_examples, tokenizer, args.max_seq_length, True)
all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long)
all_label = torch.tensor([f.label for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare optimizer
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
param_optimizer = [n for n in param_optimizer]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids, token_types_ids=segment_ids, attention_mask=input_mask, labels=label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.fp16 and args.loss_scale != 1.0:
# rescale loss for fp16 training
# see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
loss = loss * args.loss_scale
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.do_train:
# Save a trained model, configuration and tokenizer
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
model = BertForMultipleChoice.from_pretrained(args.output_dir, num_choices=4)
tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
else:
model = BertForMultipleChoice.from_pretrained(args.bert_model, num_choices=4)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = read_swag_examples(os.path.join(args.data_dir, 'val.csv'), is_training = True)
eval_features = convert_examples_to_features(
eval_examples, tokenizer, args.max_seq_length, True)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor(select_field(eval_features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(eval_features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(eval_features, 'segment_ids'), dtype=torch.long)
all_label = torch.tensor([f.label for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids)
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': tr_loss/global_step}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| [
"torch.distributed.get_world_size",
"torch.utils.data.RandomSampler",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.distributed.init_process_group",
"torch.manual_seed",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.distributed.get_rank",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.utils.data.SequentialSampler",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.utils.data.TensorDataset",
"torch.no_grad",
"torch.utils.data.distributed.DistributedSampler"
] | 1.0.0 | nabihach/pytorch-transformers | 4c99a4eda5459e36ebb45355fa789bb6cc0bce71 |
1.0 | import collections
from typing import Iterable, List
import torch
from torch import nn as nn
from torch.distributions import Normal
from torch.nn import ModuleList
from scvi.models.utils import one_hot
def reparameterize_gaussian(mu, var):
return Normal(mu, var.sqrt()).rsample()
class FCLayers(nn.Module):
r"""A helper class to build fully-connected layers for a neural network.
:param n_in: The dimensionality of the input
:param n_out: The dimensionality of the output
:param n_cat_list: A list containing, for each category of interest,
the number of categories. Each category will be
included using a one-hot encoding.
:param n_layers: The number of fully-connected hidden layers
:param n_hidden: The number of nodes per hidden layer
:param dropout_rate: Dropout rate to apply to each of the hidden layers
:param use_batch_norm: Whether to have `BatchNorm` layers or not
:param use_relu: Whether to have `ReLU` layers or not
:param bias: Whether to learn bias in linear layers or not
"""
def __init__(
self,
n_in: int,
n_out: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
dropout_rate: float = 0.1,
use_batch_norm: bool = True,
use_relu: bool = True,
bias: bool = True,
):
super().__init__()
layers_dim = [n_in] + (n_layers - 1) * [n_hidden] + [n_out]
if n_cat_list is not None:
# n_cat = 1 will be ignored
self.n_cat_list = [n_cat if n_cat > 1 else 0 for n_cat in n_cat_list]
else:
self.n_cat_list = []
self.fc_layers = nn.Sequential(
collections.OrderedDict(
[
(
"Layer {}".format(i),
nn.Sequential(
nn.Linear(n_in + sum(self.n_cat_list), n_out, bias=bias),
# Below, 0.01 and 0.001 are the default values for `momentum` and `eps` from
# the tensorflow implementation of batch norm; we're using those settings
# here too so that the results match our old tensorflow code. The default
# setting from pytorch would probably be fine too but we haven't tested that.
nn.BatchNorm1d(n_out, momentum=0.01, eps=0.001)
if use_batch_norm
else None,
nn.ReLU() if use_relu else None,
nn.Dropout(p=dropout_rate) if dropout_rate > 0 else None,
),
)
for i, (n_in, n_out) in enumerate(
zip(layers_dim[:-1], layers_dim[1:])
)
]
)
)
def forward(self, x: torch.Tensor, *cat_list: int, instance_id: int = 0):
r"""Forward computation on ``x``.
:param x: tensor of values with shape ``(n_in,)``
:param cat_list: list of category membership(s) for this sample
:param instance_id: Use a specific conditional instance normalization (batchnorm)
:return: tensor of shape ``(n_out,)``
:rtype: :py:class:`torch.Tensor`
"""
one_hot_cat_list = [] # for generality in this list many indices useless.
assert len(self.n_cat_list) <= len(
cat_list
), "nb. categorical args provided doesn't match init. params."
for n_cat, cat in zip(self.n_cat_list, cat_list):
assert not (
n_cat and cat is None
), "cat not provided while n_cat != 0 in init. params."
if n_cat > 1: # n_cat = 1 will be ignored - no additional information
if cat.size(1) != n_cat:
one_hot_cat = one_hot(cat, n_cat)
else:
one_hot_cat = cat # cat has already been one_hot encoded
one_hot_cat_list += [one_hot_cat]
for layers in self.fc_layers:
for layer in layers:
if layer is not None:
if isinstance(layer, nn.BatchNorm1d):
if x.dim() == 3:
x = torch.cat(
[(layer(slice_x)).unsqueeze(0) for slice_x in x], dim=0
)
else:
x = layer(x)
else:
if isinstance(layer, nn.Linear):
if x.dim() == 3:
one_hot_cat_list = [
o.unsqueeze(0).expand(
(x.size(0), o.size(0), o.size(1))
)
for o in one_hot_cat_list
]
x = torch.cat((x, *one_hot_cat_list), dim=-1)
x = layer(x)
return x
# Encoder
class Encoder(nn.Module):
r"""Encodes data of ``n_input`` dimensions into a latent space of ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
:param n_input: The dimensionality of the input (data space)
:param n_output: The dimensionality of the output (latent space)
:param n_cat_list: A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
:param n_layers: The number of fully-connected hidden layers
:param n_hidden: The number of nodes per hidden layer
:dropout_rate: Dropout rate to apply to each of the hidden layers
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
dropout_rate: float = 0.1,
):
super().__init__()
self.encoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.mean_encoder = nn.Linear(n_hidden, n_output)
self.var_encoder = nn.Linear(n_hidden, n_output)
def forward(self, x: torch.Tensor, *cat_list: int):
r"""The forward computation for a single sample.
#. Encodes the data into latent space using the encoder network
#. Generates a mean \\( q_m \\) and variance \\( q_v \\) (clamped to \\( [-5, 5] \\))
#. Samples a new value from an i.i.d. multivariate normal \\( \\sim N(q_m, \\mathbf{I}q_v) \\)
:param x: tensor with shape (n_input,)
:param cat_list: list of category membership(s) for this sample
:return: tensors of shape ``(n_latent,)`` for mean and var, and sample
:rtype: 3-tuple of :py:class:`torch.Tensor`
"""
# Parameters for latent distribution
q = self.encoder(x, *cat_list)
q_m = self.mean_encoder(q)
q_v = torch.exp(self.var_encoder(q)) + 1e-4
latent = reparameterize_gaussian(q_m, q_v)
return q_m, q_v, latent
# Multi-Encoder
class Multi_Encoder(nn.Module):
def __init__(
self,
RNA_input: int,
ATAC_input,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
dropout_rate: float = 0.1,
):
super().__init__()
self.scRNA_encoder = FCLayers(
n_in=RNA_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.scATAC_encoder = FCLayers(
n_in=ATAC_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.concat1 = nn.Linear(2 * n_hidden, n_hidden)
self.concat2 = nn.Linear(n_hidden, n_hidden)
self.mean_encoder = nn.Linear(n_hidden, n_output)
self.var_encoder = nn.Linear(n_hidden, n_output)
def forward(self, x: list, *cat_list: int):
# Parameters for latent distribution
if x.__len__() != 2:
raise ValueError("Input training data should be 2 data types(RNA and ATAC),"
"but input was only {}.format(x.__len__())"
)
if not torch.is_tensor(x[0]):
raise ValueError("training data should be a tensor!"
)
q1 = self.scRNA_encoder(x[0], *cat_list)
q2 = self.scATAC_encoder(x[1], *cat_list)
q = self.concat2(self.concat1(torch.cat((q1, q2), 1)))
q_m = self.mean_encoder(q)
q_v = torch.exp(self.var_encoder(q)) + 1e-4
latent = reparameterize_gaussian(q_m, q_v)
return q_m, q_v, latent
# Multi-Encoder
class Multi_Decoder(nn.Module):
def __init__(
self,
n_input: int,
RNA_output: int,
ATAC_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 256,
dropout_rate: float = 0,
):
super().__init__()
# RNA-seq decoder
self.scRNA_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
# mean gamma
self.rna_scale_decoder = nn.Sequential(
nn.Linear(n_hidden, RNA_output), nn.Softmax(dim=-1)
)
# dispersion: here we only deal with gene-cell dispersion case
self.rna_r_decoder = nn.Linear(n_hidden, RNA_output)
# dropout
self.rna_dropout_decoder = nn.Linear(n_hidden, RNA_output)
# ATAC decoder
self.scATAC_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
# mean possion
self.atac_scale_decoder = nn.Sequential(
nn.Linear(n_hidden, ATAC_output), nn.Softmax(dim=-1)
)
# dispersion: here we only deal with gene-cell dispersion case
self.atac_r_decoder = nn.Linear(n_hidden, ATAC_output)
# dropout
self.atac_dropout_decoder = nn.Linear(n_hidden, ATAC_output)
# libaray scale for each cell
self.libaray_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.libaray_rna_scale_decoder = nn.Linear(n_hidden, 1)
self.libaray_atac_scale_decoder = nn.Linear(n_hidden, 1)
def forward(self, z: torch.Tensor, z_c: torch.Tensor, *cat_list: int):
# The decoder returns values for the parameters of the ZINB distribution of scRNA-seq
p_rna = self.scRNA_decoder(z, *cat_list)
p_rna_scale = self.rna_scale_decoder(p_rna)
p_rna_dropout = self.rna_dropout_decoder(p_rna)
libaray_temp = self.libaray_decoder(z_c, *cat_list)
libaray_gene = self.libaray_rna_scale_decoder(libaray_temp)
p_rna_rate = torch.exp(libaray_gene.clamp(max=12)) * p_rna_scale # torch.clamp( , max=12)
#p_rna_rate.clamp(max=12) # maybe it is unnecessary
p_rna_r = self.rna_r_decoder(p_rna)
# The decoder returns values for the parameters of the ZIP distribution of scATAC-seq
p_atac = self.scATAC_decoder(z, *cat_list)
p_atac_scale = self.atac_scale_decoder(p_atac)
p_atac_r = self.atac_r_decoder(p_atac)
p_atac_dropout = self.atac_dropout_decoder(p_atac)
libaray_atac = self.libaray_atac_scale_decoder(libaray_temp)
p_atac_mean = torch.exp(libaray_atac.clamp(13)) * p_atac_scale # for zinp and zip loss
#p_atac_mean = libaray_atac * p_atac_scale # for binary loss
return p_rna_scale, p_rna_r, p_rna_rate, p_rna_dropout, p_atac_scale, p_atac_r, p_atac_mean, p_atac_dropout
# Decoder
class DecoderSCVI(nn.Module):
r"""Decodes data from latent space of ``n_input`` dimensions ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
:param n_input: The dimensionality of the input (latent space)
:param n_output: The dimensionality of the output (data space)
:param n_cat_list: A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
:param n_layers: The number of fully-connected hidden layers
:param n_hidden: The number of nodes per hidden layer
:param dropout_rate: Dropout rate to apply to each of the hidden layers
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super().__init__()
self.px_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=0,
)
# mean gamma
self.px_scale_decoder = nn.Sequential(
nn.Linear(n_hidden, n_output), nn.Softmax(dim=-1)
)
# dispersion: here we only deal with gene-cell dispersion case
self.px_r_decoder = nn.Linear(n_hidden, n_output)
# dropout
self.px_dropout_decoder = nn.Linear(n_hidden, n_output)
def forward(
self, dispersion: str, z: torch.Tensor, library: torch.Tensor, *cat_list: int
):
r"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns parameters for the ZINB distribution of expression
#. If ``dispersion != 'gene-cell'`` then value for that param will be ``None``
:param dispersion: One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
:param z: tensor with shape ``(n_input,)``
:param library: library size
:param cat_list: list of category membership(s) for this sample
:return: parameters for the ZINB distribution of expression
:rtype: 4-tuple of :py:class:`torch.Tensor`
"""
# The decoder returns values for the parameters of the ZINB distribution
px = self.px_decoder(z, *cat_list)
px_scale = self.px_scale_decoder(px)
px_dropout = self.px_dropout_decoder(px)
# Clamp to high value: exp(12) ~ 160000 to avoid nans (computational stability)
px_rate = torch.exp(library) * px_scale # torch.clamp( , max=12)
px_r = self.px_r_decoder(px) if dispersion == "gene-cell" else None
return px_scale, px_r, px_rate, px_dropout
class LinearDecoderSCVI(nn.Module):
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super(LinearDecoderSCVI, self).__init__()
# mean gamma
self.n_batches = n_cat_list[0] # Just try a simple case for now
if self.n_batches > 1:
self.batch_regressor = nn.Linear(self.n_batches - 1, n_output, bias=False)
else:
self.batch_regressor = None
self.factor_regressor = nn.Linear(n_input, n_output)
# dropout
self.px_dropout_decoder = nn.Linear(n_input, n_output)
def forward(
self, dispersion: str, z: torch.Tensor, library: torch.Tensor, *cat_list: int
):
# The decoder returns values for the parameters of the ZINB distribution
p1_ = self.factor_regressor(z)
if self.n_batches > 1:
one_hot_cat = one_hot(cat_list[0], self.n_batches)[:, :-1]
p2_ = self.batch_regressor(one_hot_cat)
raw_px_scale = p1_ + p2_
else:
raw_px_scale = p1_
px_scale = torch.softmax(raw_px_scale, dim=-1)
px_dropout = self.px_dropout_decoder(z)
px_rate = torch.exp(library) * px_scale
px_r = None
return px_scale, px_r, px_rate, px_dropout
# Decoder
class Decoder(nn.Module):
r"""Decodes data from latent space of ``n_input`` dimensions to ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
Output is the mean and variance of a multivariate Gaussian
:param n_input: The dimensionality of the input (latent space)
:param n_output: The dimensionality of the output (data space)
:param n_cat_list: A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
:param n_layers: The number of fully-connected hidden layers
:param n_hidden: The number of nodes per hidden layer
:param dropout_rate: Dropout rate to apply to each of the hidden layers
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super().__init__()
self.decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=0,
)
self.mean_decoder = nn.Linear(n_hidden, n_output)
self.var_decoder = nn.Linear(n_hidden, n_output)
def forward(self, x: torch.Tensor, *cat_list: int):
r"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns tensors for the mean and variance of a multivariate distribution
:param x: tensor with shape ``(n_input,)``
:param cat_list: list of category membership(s) for this sample
:return: Mean and variance tensors of shape ``(n_output,)``
:rtype: 2-tuple of :py:class:`torch.Tensor`
"""
# Parameters for latent distribution
p = self.decoder(x, *cat_list)
p_m = self.mean_decoder(p)
p_v = torch.exp(self.var_decoder(p))
return p_m, p_v
class MultiEncoder(nn.Module):
def __init__(
self,
n_heads: int,
n_input_list: List[int],
n_output: int,
n_hidden: int = 128,
n_layers_individual: int = 1,
n_layers_shared: int = 2,
n_cat_list: Iterable[int] = None,
dropout_rate: float = 0.1,
):
super().__init__()
self.encoders = ModuleList(
[
FCLayers(
n_in=n_input_list[i],
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers_individual,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
use_batch_norm=True,
)
for i in range(n_heads)
]
)
self.encoder_shared = FCLayers(
n_in=n_hidden,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers_shared,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.mean_encoder = nn.Linear(n_hidden, n_output)
self.var_encoder = nn.Linear(n_hidden, n_output)
def forward(self, x: torch.Tensor, head_id: int, *cat_list: int):
q = self.encoders[head_id](x, *cat_list)
q = self.encoder_shared(q, *cat_list)
q_m = self.mean_encoder(q)
q_v = torch.exp(self.var_encoder(q))
latent = reparameterize_gaussian(q_m, q_v)
return q_m, q_v, latent
class MultiDecoder(nn.Module):
def __init__(
self,
n_input: int,
n_output: int,
n_hidden_conditioned: int = 32,
n_hidden_shared: int = 128,
n_layers_conditioned: int = 1,
n_layers_shared: int = 1,
n_cat_list: Iterable[int] = None,
dropout_rate: float = 0.2,
):
super().__init__()
n_out = n_hidden_conditioned if n_layers_shared else n_hidden_shared
if n_layers_conditioned:
self.px_decoder_conditioned = FCLayers(
n_in=n_input,
n_out=n_out,
n_cat_list=n_cat_list,
n_layers=n_layers_conditioned,
n_hidden=n_hidden_conditioned,
dropout_rate=dropout_rate,
use_batch_norm=True,
)
n_in = n_out
else:
self.px_decoder_conditioned = None
n_in = n_input
if n_layers_shared:
self.px_decoder_final = FCLayers(
n_in=n_in,
n_out=n_hidden_shared,
n_cat_list=[],
n_layers=n_layers_shared,
n_hidden=n_hidden_shared,
dropout_rate=dropout_rate,
use_batch_norm=True,
)
n_in = n_hidden_shared
else:
self.px_decoder_final = None
self.px_scale_decoder = nn.Sequential(
nn.Linear(n_in, n_output), nn.Softmax(dim=-1)
)
self.px_r_decoder = nn.Linear(n_in, n_output)
self.px_dropout_decoder = nn.Linear(n_in, n_output)
def forward(
self,
z: torch.Tensor,
dataset_id: int,
library: torch.Tensor,
dispersion: str,
*cat_list: int
):
px = z
if self.px_decoder_conditioned:
px = self.px_decoder_conditioned(px, *cat_list, instance_id=dataset_id)
if self.px_decoder_final:
px = self.px_decoder_final(px, *cat_list)
px_scale = self.px_scale_decoder(px)
px_dropout = self.px_dropout_decoder(px)
px_rate = torch.exp(library) * px_scale
px_r = self.px_r_decoder(px) if dispersion == "gene-cell" else None
return px_scale, px_r, px_rate, px_dropout
class DecoderTOTALVI(nn.Module):
r"""Decodes data from latent space of ``n_input`` dimensions ``n_output``
dimensions using a linear decoder
:param n_input: The dimensionality of the input (latent space)
:param n_output_genes: The dimensionality of the output (gene space)
:param n_output_proteins: The dimensionality of the output (protein space)
:param n_cat_list: A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
"""
def __init__(
self,
n_input: int,
n_output_genes: int,
n_output_proteins: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 256,
dropout_rate: float = 0,
):
super().__init__()
self.n_output_genes = n_output_genes
self.n_output_proteins = n_output_proteins
super().__init__()
self.px_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
# mean gamma
self.px_scale_decoder = nn.Sequential(
nn.Linear(n_hidden + n_input, n_output_genes), nn.Softmax(dim=-1)
)
# background mean first decoder
self.py_back_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
# background mean parameters second decoder
self.py_back_mean_log_alpha = nn.Linear(n_hidden + n_input, n_output_proteins)
self.py_back_mean_log_beta = nn.Linear(n_hidden + n_input, n_output_proteins)
# foreground increment decoder step 1
self.py_fore_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
# foreground increment decoder step 2
self.py_fore_scale_decoder = nn.Sequential(
nn.Linear(n_hidden + n_input, n_output_proteins), nn.ReLU()
)
# dropout (mixture component for proteins, ZI probability for genes)
self.sigmoid_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.px_dropout_decoder_gene = nn.Linear(n_hidden + n_input, n_output_genes)
self.py_background_decoder = nn.Linear(n_hidden + n_input, n_output_proteins)
def forward(self, z: torch.Tensor, library_gene: torch.Tensor, *cat_list: int):
r"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns local parameters for the ZINB distribution for genes
#. Returns local parameters for the Mixture NB distribution for proteins
We use the dictionary `px_` to contain the parameters of the ZINB/NB for genes.
The rate refers to the mean of the NB, dropout refers to Bernoulli mixing parameters.
`scale` refers to the quanity upon which differential expression is performed. For genes,
this can be viewed as the mean of the underlying gamma distribution.
We use the dictionary `py_` to contain the parameters of the Mixture NB distribution for proteins.
`rate_fore` refers to foreground mean, while `rate_back` refers to background mean. `scale` refers to
foreground mean adjusted for background probability and scaled to reside in simplex.
`back_alpha` and `back_beta` are the posterior parameters for `rate_back`. `fore_scale` is the scaling
factor that enforces `rate_fore` > `rate_back`.
:param z: tensor with shape ``(n_input,)``
:param library_gene: library size
:param cat_list: list of category membership(s) for this sample
:return: parameters for the ZINB distribution of expression
:rtype: 3-tuple (first 2-tuple :py:class:`dict`, last :py:class:`torch.Tensor`)
"""
px_ = {}
py_ = {}
px = self.px_decoder(z, *cat_list)
px_cat_z = torch.cat([px, z], dim=-1)
px_["scale"] = self.px_scale_decoder(px_cat_z)
px_["rate"] = library_gene * px_["scale"]
py_back = self.py_back_decoder(z, *cat_list)
py_back_cat_z = torch.cat([py_back, z], dim=-1)
py_["back_alpha"] = self.py_back_mean_log_alpha(py_back_cat_z)
py_["back_beta"] = torch.exp(self.py_back_mean_log_beta(py_back_cat_z))
log_pro_back_mean = Normal(py_["back_alpha"], py_["back_beta"]).rsample()
py_["rate_back"] = torch.exp(log_pro_back_mean)
py_fore = self.py_fore_decoder(z, *cat_list)
py_fore_cat_z = torch.cat([py_fore, z], dim=-1)
py_["fore_scale"] = self.py_fore_scale_decoder(py_fore_cat_z) + 1
py_["rate_fore"] = py_["rate_back"] * py_["fore_scale"]
p_mixing = self.sigmoid_decoder(z, *cat_list)
p_mixing_cat_z = torch.cat([p_mixing, z], dim=-1)
px_["dropout"] = self.px_dropout_decoder_gene(p_mixing_cat_z)
py_["mixing"] = self.py_background_decoder(p_mixing_cat_z)
return (px_, py_, log_pro_back_mean)
# Encoder
class EncoderTOTALVI(nn.Module):
r"""Encodes data of ``n_input`` dimensions into a latent space of ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
:param n_input: The dimensionality of the input (data space)
:param n_output: The dimensionality of the output (latent space)
:param n_cat_list: A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
:param n_layers: The number of fully-connected hidden layers
:param n_hidden: The number of nodes per hidden layer
:dropout_rate: Dropout rate to apply to each of the hidden layers
:distribution: Distribution of the latent space, one of
* ``'normal'`` - Normal distribution
* ``'ln'`` - Logistic normal
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 2,
n_hidden: int = 256,
dropout_rate: float = 0.1,
distribution: str = "ln",
):
super().__init__()
self.encoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.z_encoder = nn.Sequential(
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(),
nn.Dropout(p=dropout_rate),
)
self.z_mean_encoder = nn.Linear(n_hidden, n_output)
self.z_var_encoder = nn.Linear(n_hidden, n_output)
self.l_gene_encoder = nn.Sequential(
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(),
nn.Dropout(p=dropout_rate),
)
self.l_gene_mean_encoder = nn.Linear(n_hidden, 1)
self.l_gene_var_encoder = nn.Linear(n_hidden, 1)
self.distribution = distribution
def identity(x):
return x
if distribution == "ln":
self.z_transformation = nn.Softmax(dim=-1)
else:
self.z_transformation = identity
self.l_transformation = torch.exp
def reparameterize_transformation(self, mu, var):
untran_z = Normal(mu, var.sqrt()).rsample()
z = self.z_transformation(untran_z)
return z, untran_z
def forward(self, data: torch.Tensor, *cat_list: int):
r"""The forward computation for a single sample.
#. Encodes the data into latent space using the encoder network
#. Generates a mean \\( q_m \\) and variance \\( q_v \\)
#. Samples a new value from an i.i.d. latent distribution
The dictionary `latent` contains the samples of the latent variables, while `untran_latent`
contains the untransformed versions of these latent variables. For example, the library size is log normally distributed,
so `untran_latent["l"]` gives the normal sample that was later exponentiated to become `latent["l"]`.
The logistic normal distribution is equivalent to applying softmax to a normal sample.
:param data: tensor with shape (n_input,)
:param cat_list: list of category membership(s) for this sample
:return: tensors of shape ``(n_latent,)`` for mean and var, and sample
:rtype: 6-tuple. First 4 of :py:class:`torch.Tensor`, next 2 are `dict` of :py:class:`torch.Tensor`
"""
# Parameters for latent distribution
q = self.encoder(data, *cat_list)
qz = self.z_encoder(q)
qz_m = self.z_mean_encoder(qz)
qz_v = torch.exp(self.z_var_encoder(qz)) + 1e-4
z, untran_z = self.reparameterize_transformation(qz_m, qz_v)
ql_gene = self.l_gene_encoder(q)
ql_m = self.l_gene_mean_encoder(ql_gene)
ql_v = torch.exp(self.l_gene_var_encoder(ql_gene)) + 1e-4
log_library_gene = torch.clamp(reparameterize_gaussian(ql_m, ql_v), max=15)
library_gene = self.l_transformation(log_library_gene)
latent = {}
untran_latent = {}
latent["z"] = z
latent["l"] = library_gene
untran_latent["z"] = untran_z
untran_latent["l"] = log_library_gene
return qz_m, qz_v, ql_m, ql_v, latent, untran_latent
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.Softmax",
"torch.is_tensor",
"torch.distributions.Normal",
"torch.softmax",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.exp"
] | 1.0.1 | lgyzngc/scvi | b4472e7d02a3889c405078cdd7ab4d4378309c2c |
1.4 | import torch
from speechjoey.embeddings import Embeddings
from .test_helpers import TensorTestCase
class TestEmbeddings(TensorTestCase):
def setUp(self):
self.emb_size = 10
self.vocab_size = 11
self.pad_idx = 1
seed = 42
torch.manual_seed(seed)
def test_size(self):
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
self.assertEqual(emb.lut.weight.shape,
torch.Size([self.vocab_size, self.emb_size]))
def test_pad_zeros(self):
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
# pad embedding should be zeros
self.assertTensorEqual(emb.lut.weight[self.pad_idx],
torch.zeros([self.emb_size]))
def test_freeze(self):
encoder = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx,
freeze=True)
for n, p in encoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_forward(self):
# fix the embedding weights
weights = self._get_random_embedding_weights()
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
self._fill_embeddings(emb, weights)
indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
embedded = emb.forward(x=indices)
# embedding operation is just slicing from weights matrix
self.assertTensorEqual(embedded, torch.index_select(input=weights,
index=indices, dim=0))
# after embedding, representations for PAD should still be zero
self.assertTensorEqual(embedded[2], torch.zeros([self.emb_size]))
def test_scale(self):
# fix the embedding weights
weights = self._get_random_embedding_weights()
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx,
scale=True)
emb.lut.weight.data = weights
indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
embedded = emb.forward(x=indices)
# now scaled
self.assertTensorNotEqual(
torch.index_select(input=weights, index=indices, dim=0), embedded)
self.assertTensorEqual(
torch.index_select(input=weights, index=indices, dim=0)*
(self.emb_size**0.5), embedded)
def _fill_embeddings(self, embeddings, weights):
embeddings.lut.weight.data = weights
def _get_random_embedding_weights(self):
weights = torch.rand([self.vocab_size, self.emb_size])
weights[self.pad_idx] = torch.zeros([self.emb_size])
return weights
| [
"torch.Size",
"torch.rand",
"torch.zeros",
"torch.manual_seed",
"torch.index_select",
"torch.Tensor"
] | 1.4.0 | B-Czarnetzki/speechjoey | 97b0b98137bfaf0ffe15db9de6b38e37c7fb5572 |
1.4 | from torch.nn import GRU, LSTM
import torch
from torch import nn
import numpy as np
from speechjoey.encoders import RecurrentEncoder
from .test_helpers import TensorTestCase
from speechjoey.model import build_model
from speechjoey.vocabulary import Vocabulary
import copy
class TestModelInit(TensorTestCase):
def setUp(self):
self.seed = 42
vocab_size = 30
tokens = ["tok{:02d}".format(i) for i in range(vocab_size)]
self.vocab = Vocabulary(tokens=tokens)
self.hidden_size = 64
self.cfg = {
"model": {
"tied_embeddings": False,
"tied_softmax": False,
"encoder": {
"type": "transformer",
"hidden_size": self.hidden_size,
"embeddings": {"embedding_dim": self.hidden_size},
"num_layers": 1,
},
"decoder": {
"type": "transformer",
"hidden_size": self.hidden_size,
"embeddings": {"embedding_dim": self.hidden_size},
"num_layers": 1,
},
}
}
def test_transformer_layer_norm_init(self):
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
src_vocab = trg_vocab = self.vocab
model = build_model(cfg["model"],
src_vocab=src_vocab, trg_vocab=trg_vocab)
def check_layer_norm(m: nn.Module):
for name, child in m.named_children():
if isinstance(child, nn.LayerNorm):
self.assertTensorEqual(child.weight,
torch.ones([self.hidden_size]))
self.assertTensorEqual(child.bias,
torch.zeros([self.hidden_size]))
else:
check_layer_norm(child)
check_layer_norm(model)
| [
"torch.manual_seed",
"torch.zeros",
"torch.ones"
] | 1.4.0 | B-Czarnetzki/speechjoey | 97b0b98137bfaf0ffe15db9de6b38e37c7fb5572 |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
from IPython import embed
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
embed(header='matcher')
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
def build_matcher(args):
return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)
| [
"torch.cat",
"torch.no_grad",
"torch.as_tensor",
"torch.cdist"
] | 1.5.0 | xieenze/detr | 13bdf0bf59fead571cd793a01eae50e7620fc6a2 |
1.8 | import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import pickle
import diffoptics as optics
from diffoptics import Rays
import sys
# Path to your local clone of the magis simulator
SIMULATOR_PATH = '/sdf/home/s/sgaz/Magis-simulator'
sys.path.insert(0, SIMULATOR_PATH)
from magis.main_helpers import make_scene, get_sensor_index_positions, get_positions
from magis.mirror_utils import get_views_given_fixed_mirrors_smooth
class Dataset:
def __init__(self, conf):
super(Dataset, self).__init__()
print('Load data: Begin')
self.device = torch.device('cuda')
self.conf = conf
self.dset_name = conf['dset_path']
self.calib_name = conf['calib_path']
with open(self.dset_name,'rb') as f:
in_dataset = pickle.load(f)
#with open(calib_name, 'rb') as f:
# calib_dict = pickle.load(f)
self.H, self.W = in_dataset.shape[1:3]
self.image_pixels = self.H * self.W
in_dataset = in_dataset.reshape(in_dataset.shape[0], -1, 6)
assert in_dataset.shape == (in_dataset.shape[0], self.H*self.W, 1+1+1+3)
self.in_dataset = torch.from_numpy(in_dataset)
self.n_images = len(self.in_dataset)
#Let's try to put a MAGIS scene in here
# Get mirror parameters
m = conf['m']
f = conf['f']
xm,ym,zm,mirror_radii,angles,theta,phi,foc,obj = get_views_given_fixed_mirrors_smooth(
m = m,
f = f * 100,
fn = 1.23,
sv = 24/10,
sh = 24/10,
skipmirrs=5,
extreme_view_angle = np.radians(55),
window_pos = 3.0,
fixed_radii = [.25],
num_mirrors = [500])
#assert len(angles) == self.in_dataset.shape[0]
normals = torch.zeros((len(angles), 3))
for i in range(len(theta)):
normal_angles = angles
normal = optics.vector(np.cos(normal_angles[i]),
np.cos(theta[i]) * np.sin(normal_angles[i]),
np.sin(theta[i]) * np.sin(normal_angles[i]))
normals[i] = optics.normalize_vector(normal)
mirror_parameters = normals, torch.tensor(xm / 100, dtype=torch.float), torch.tensor(ym / 100, dtype=torch.float), torch.tensor(zm / 100, dtype=torch.float), torch.tensor(mirror_radii / 100, dtype=torch.float)
# @Todo check sensor parameters
pixel_size = conf['pixel_size']
self.scene = make_scene(object_x_pos=obj/100, f=f, m=m, na=1 / 1.4, nb_mirror=None, sensor_resolution=(conf['sensor_resolution_x'],conf['sensor_resolution_y']),
sensor_pixel_size=(pixel_size, pixel_size), poisson_noise_mean=2, quantum_efficiency=0.77,
mirror_parameters=mirror_parameters)
self.continuous_positions = get_positions(self.scene)
rad = conf['rad']
trans_mat = torch.eye(4)
trans_mat[0][3] = -obj/100* 1/rad
scale_mat = torch.eye(4)
scale_mat[0][0] = 1/rad
scale_mat[1][1] = 1/rad
scale_mat[2][2] = 1/rad
full_scale_mat = torch.matmul(trans_mat, scale_mat)[:-1]
self.full_scale_mat = full_scale_mat
object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])
object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])
self.object_bbox_min = object_bbox_min[:, None][:3, 0]
self.object_bbox_max = object_bbox_max[:, None][:3, 0]
print('Load data: End')
def gen_rays_at(self, img_idx, resolution_level=1):
"""
Generate rays at world space from one camera.
"""
l = resolution_level
tx = torch.linspace(0, self.W - 1, self.W // l)
ty = torch.linspace(0, self.H - 1, self.H // l)
pixels_x, pixels_y = torch.meshgrid(tx, ty)
p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3
p = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3
rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3
rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3
return rays_o.transpose(0, 1), rays_v.transpose(0, 1)
def gen_random_rays_at(self, img_idx, batch_size):
"""
Generate random rays at world space from one camera.
"""
pixels_x = torch.randint(low=0, high=self.W, size=[batch_size])
pixels_y = torch.randint(low=0, high=self.H, size=[batch_size])
color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3
mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3
p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float() # batch_size, 3
p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3
rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3
rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3
return torch.cat([rays_o.cpu(), rays_v.cpu(), color, mask[:, :1]], dim=-1).cuda() # batch_size, 10
def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):
"""
Interpolate pose between two cameras.
"""
l = resolution_level
tx = torch.linspace(0, self.W - 1, self.W // l)
ty = torch.linspace(0, self.H - 1, self.H // l)
pixels_x, pixels_y = torch.meshgrid(tx, ty)
p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3
p = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3
trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio
pose_0 = self.pose_all[idx_0].detach().cpu().numpy()
pose_1 = self.pose_all[idx_1].detach().cpu().numpy()
pose_0 = np.linalg.inv(pose_0)
pose_1 = np.linalg.inv(pose_1)
rot_0 = pose_0[:3, :3]
rot_1 = pose_1[:3, :3]
rots = Rot.from_matrix(np.stack([rot_0, rot_1]))
key_times = [0, 1]
slerp = Slerp(key_times, rots)
rot = slerp(ratio)
pose = np.diag([1.0, 1.0, 1.0, 1.0])
pose = pose.astype(np.float32)
pose[:3, :3] = rot.as_matrix()
pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]
pose = np.linalg.inv(pose)
rot = torch.from_numpy(pose[:3, :3]).cuda()
trans = torch.from_numpy(pose[:3, 3]).cuda()
rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3
rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3
return rays_o.transpose(0, 1), rays_v.transpose(0, 1)
def near_far_from_sphere(self, rays_o, rays_d):
a = torch.sum(rays_d**2, dim=-1, keepdim=True)
b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)
mid = 0.5 * (-b) / a
near = mid - 1.0
far = mid + 1.0
return near, far
def image_at(self, idx, resolution_level):
#img = cv.imread(self.images_lis[idx])
img = self.in_dataset[idx, :, -3:].reshape((self.W,self.W, 3)).numpy()*256
return (cv.resize(img, (self.W // resolution_level, self.W // resolution_level))).clip(0, 255).astype(np.uint8)
#return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)
def gen_rays_at_magis(self, lens, mirror, data_id, mirror_id):
"""
Generate rays at world space from one camera.
"""
ind = torch.arange(self.H*self.W)
# Sampling rays from the sensor to the lens
origins = torch.zeros(ind.shape[0], 3, device=self.device)
origins[:, 0] = self.continuous_positions[mirror_id][0]
origins[:, 1] = self.in_dataset[data_id, ind, 1]
origins[:, 2] = self.in_dataset[data_id, ind, 2]
points_on_lens = lens.sample_points_on_lens(ind.shape[0], device=self.device)
directions = optics.batch_vector(points_on_lens[:, 0] - origins[:, 0],
points_on_lens[:, 1]*0 - origins[:, 1],
points_on_lens[:, 2]*0 - origins[:, 2])
rays_sensor_to_lens = Rays(origins, directions, device=self.device,
meta = {'target' : self.in_dataset[data_id, ind, -3:].to(self.device),
'ind' : ind})
# Intersection with lens
t1 = lens.get_ray_intersection(rays_sensor_to_lens)
mask_t1 = ~torch.isnan(t1)
ray_lens_to_mirror = lens.intersect(rays_sensor_to_lens.get_at(mask_t1), t1[mask_t1])
# Intersection with mirror
t2 = mirror.get_ray_intersection(ray_lens_to_mirror)
mask = ~torch.isnan(t2)
assert mask.shape[0] == ind[mask_t1].shape[0]
#rays_mirror_to_object = mirror.intersect(ray_lens_to_mirror.get_at(mask), t2[mask])
rays_mirror_to_object = mirror.intersect(ray_lens_to_mirror, t2)
color = self.in_dataset[data_id, ind[mask_t1], -3:]
rays_mirror_to_object.origins = torch.matmul(self.full_scale_mat,
torch.cat((rays_mirror_to_object.origins, torch.ones((rays_mirror_to_object.origins.shape[0],1))), dim=1)[:, :, np.newaxis]).squeeze(dim=-1)
rays_mirror_to_object.directions = torch.matmul(self.full_scale_mat,
torch.cat((rays_mirror_to_object.directions, torch.zeros((rays_mirror_to_object.origins.shape[0],1))), dim=1)[:, :, np.newaxis]).squeeze(dim=-1)
rays_mirror_to_object.directions = rays_mirror_to_object.directions/torch.sqrt(torch.sum(rays_mirror_to_object.directions**2, dim=1, keepdim=True))
return rays_mirror_to_object, color.cuda()
def gen_random_rays_at_magis(self, lens, mirror, ind, data_id, mirror_id):
"""
Generate random rays at world space from one camera.
"""
# Sampling rays from the sensor to the lens
origins = torch.zeros(ind.shape[0], 3, device=self.device)
origins[:, 0] = self.continuous_positions[mirror_id][0]
origins[:, 1] = self.in_dataset[data_id, ind, 1]
origins[:, 2] = self.in_dataset[data_id, ind, 2]
points_on_lens = lens.sample_points_on_lens(ind.shape[0], device=self.device)
directions = optics.batch_vector(points_on_lens[:, 0] - origins[:, 0],
points_on_lens[:, 1]*0 - origins[:, 1],
points_on_lens[:, 2]*0 - origins[:, 2])
rays_sensor_to_lens = Rays(origins, directions, device=self.device,
meta = {'target' : self.in_dataset[data_id, ind, -3:].to(self.device),
'ind' : ind})
# Intersection with lens
t1 = lens.get_ray_intersection(rays_sensor_to_lens)
mask_t1 = ~torch.isnan(t1)
ray_lens_to_mirror = lens.intersect(rays_sensor_to_lens.get_at(mask_t1), t1[mask_t1])
# Intersection with mirror
t2 = mirror.get_ray_intersection(ray_lens_to_mirror)
mask = ~torch.isnan(t2)
assert mask.shape[0] == ind[mask_t1].shape[0]
rays_mirror_to_object = mirror.intersect(ray_lens_to_mirror.get_at(mask), t2[mask])
color = self.in_dataset[data_id, ind[mask_t1][mask], -3:]
rays_mirror_to_object.origins = torch.matmul(self.full_scale_mat,
torch.cat((rays_mirror_to_object.origins, torch.ones((rays_mirror_to_object.origins.shape[0],1))), dim=1)[:, :, np.newaxis]).squeeze(dim=-1)
rays_mirror_to_object.directions = torch.matmul(self.full_scale_mat,
torch.cat((rays_mirror_to_object.directions, torch.zeros((rays_mirror_to_object.origins.shape[0],1))), dim=1)[:, :, np.newaxis]).squeeze(dim=-1)
rays_mirror_to_object.directions = rays_mirror_to_object.directions/torch.sqrt(torch.sum(rays_mirror_to_object.directions**2, dim=1, keepdim=True))
return rays_mirror_to_object, color.cuda()
| [
"torch.isnan",
"torch.ones",
"torch.eye",
"torch.meshgrid",
"torch.sum",
"torch.randint",
"torch.tensor",
"torch.zeros",
"torch.device",
"torch.linspace",
"torch.matmul",
"torch.arange",
"torch.from_numpy",
"torch.ones_like",
"torch.linalg.norm"
] | 1.8.0 | magis-slac/NeuS | f3ef3c089b2076ea8d73679bf37a94ef44a08939 |
1.0 | """AttentionWalk class."""
import torch
import numpy as np
import pandas as pd
from tqdm import trange
from utils import read_graph, feature_calculator, adjacency_opposite_calculator
class AttentionWalkLayer(torch.nn.Module):
"""
Attention Walk Layer.
For details see the paper.
"""
def __init__(self, args, shapes):
"""
Setting up the layer.
:param args: Arguments object.
:param shapes: Shape of the target tensor.
"""
super(AttentionWalkLayer, self).__init__()
self.args = args
self.shapes = shapes
self.define_weights()
self.initialize_weights()
def define_weights(self):
"""
Define the model weights.
"""
half_dim = int(self.args.dimensions/2)
self.left_factors = torch.nn.Parameter(torch.Tensor(self.shapes[1], half_dim))
self.right_factors = torch.nn.Parameter(torch.Tensor(half_dim, self.shapes[1]))
self.attention = torch.nn.Parameter(torch.Tensor(self.shapes[0], 1))
def initialize_weights(self):
"""
Initializing the weights.
"""
torch.nn.init.uniform_(self.left_factors, -0.01, 0.01)
torch.nn.init.uniform_(self.right_factors, -0.01, 0.01)
torch.nn.init.uniform_(self.attention, -0.01, 0.01)
def forward(self, weighted_target_tensor, adjacency_opposite):
"""
Doing a forward propagation pass.
:param weighted_target_tensor: Target tensor factorized.
:param adjacency_opposite: No-edge indicator matrix.
:return loss: Loss being minimized.
"""
self.attention_probs = torch.nn.functional.softmax(self.attention, dim=0)
probs = self.attention_probs.unsqueeze(1).expand_as(weighted_target_tensor)
weighted_target_tensor = weighted_target_tensor * probs
weighted_tar_mat = torch.sum(weighted_target_tensor, dim=0)
weighted_tar_mat = weighted_tar_mat.view(self.shapes[1], self.shapes[2])
estimate = torch.mm(self.left_factors, self.right_factors)
loss_on_target = - weighted_tar_mat* torch.log(torch.sigmoid(estimate))
loss_opposite = -adjacency_opposite * torch.log(1-torch.sigmoid(estimate))
loss_on_mat = self.args.num_of_walks*weighted_tar_mat.shape[0]*loss_on_target+loss_opposite
abs_loss_on_mat = torch.abs(loss_on_mat)
average_loss_on_mat = torch.mean(abs_loss_on_mat)
norms = torch.mean(torch.abs(self.left_factors))+torch.mean(torch.abs(self.right_factors))
loss_on_regularization = self.args.beta * (self.attention.norm(2)**2)
loss = average_loss_on_mat + loss_on_regularization + self.args.gamma*norms
return loss
class AttentionWalkTrainer(object):
"""
Class for training the AttentionWalk model.
"""
def __init__(self, args):
"""
Initializing the training object.
:param args: Arguments object.
"""
self.args = args
self.graph = read_graph(self.args.edge_path)
self.initialize_model_and_features()
def initialize_model_and_features(self):
"""
Creating data tensors and factroization model.
"""
self.target_tensor = feature_calculator(self.args, self.graph)
self.target_tensor = torch.FloatTensor(self.target_tensor)
self.adjacency_opposite = adjacency_opposite_calculator(self.graph)
self.adjacency_opposite = torch.FloatTensor(self.adjacency_opposite)
self.model = AttentionWalkLayer(self.args, self.target_tensor.shape)
def fit(self):
"""
Fitting the model
"""
print("\nTraining the model.\n")
self.model.train()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
self.epochs = trange(self.args.epochs, desc="Loss")
for _ in self.epochs:
self.optimizer.zero_grad()
loss = self.model(self.target_tensor, self.adjacency_opposite)
loss.backward()
self.optimizer.step()
self.epochs.set_description("Attention Walk (Loss=%g)" % round(loss.item(), 4))
def save_model(self):
"""
Saving the embedding and attention vector.
"""
self.save_embedding()
self.save_attention()
def save_embedding(self):
"""
Saving the embedding matrices as one unified embedding.
"""
print("\nSaving the model.\n")
left = self.model.left_factors.detach().numpy()
right = self.model.right_factors.detach().numpy().T
indices = np.array([range(len(self.graph))]).reshape(-1, 1)
embedding = np.concatenate([indices, left, right], axis=1)
columns = ["id"] + ["x_" + str(x) for x in range(self.args.dimensions)]
embedding = pd.DataFrame(embedding, columns=columns)
embedding.to_csv(self.args.embedding_path, index=None)
def save_attention(self):
"""
Saving the attention vector.
"""
attention = self.model.attention_probs.detach().numpy()
indices = np.array([range(self.args.window_size)]).reshape(-1, 1)
attention = np.concatenate([indices, attention], axis=1)
attention = pd.DataFrame(attention, columns=["Order", "Weight"])
attention.to_csv(self.args.attention_path, index=None)
| [
"torch.sigmoid",
"torch.FloatTensor",
"torch.mm",
"torch.abs",
"torch.nn.functional.softmax",
"torch.nn.init.uniform_",
"torch.Tensor",
"torch.mean",
"torch.sum"
] | 1.0.0 | erdiolmezogullari/AttentionWalk | d8c8297018374d965c0a024c3f1833f54347504e |
1.1 | import torch
import torch.nn as nn
import torch.nn.functional as F
class DummyNet(nn.Module):
def __init__(self):
super(DummyNet, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(10, 5, kernel_size=5, padding=2)
self.softmax = nn.Softmax2d()
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.conv2(x)
x = self.softmax(x)
return x
| [
"torch.nn.Conv2d",
"torch.nn.Softmax2d"
] | 1.1.0 | LendelTheGreat/weak-segmentation | 0ff6015f1af741cfb50ef8fb6f55cea822f68f7a |
1.6 | from abc import ABCMeta, abstractmethod
from typing import Any, Optional, Sequence
import numpy as np
import torch
from torch.optim import Optimizer
from ...augmentation import AugmentationPipeline
from ...gpu import Device
from ...models.builders import (
create_categorical_policy,
create_squashed_normal_policy,
create_value_function,
)
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.torch import (
CategoricalPolicy,
Policy,
SquashedNormalPolicy,
ValueFunction,
squash_action,
)
from ...preprocessing import ActionScaler, Scaler
from ...torch_utility import augmentation_api, eval_api, torch_api, train_api
from .base import TorchImplBase
class AWRBaseImpl(TorchImplBase, metaclass=ABCMeta):
_actor_learning_rate: float
_critic_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_use_gpu: Optional[Device]
_v_func: Optional[ValueFunction]
_policy: Optional[Policy]
_critic_optim: Optional[Optimizer]
_actor_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
augmentation: AugmentationPipeline,
):
super().__init__(
observation_shape, action_size, scaler, action_scaler, augmentation
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._actor_encoder_factory = actor_encoder_factory
self._critic_encoder_factory = critic_encoder_factory
self._use_gpu = use_gpu
# initialized in build
self._v_func = None
self._policy = None
self._critic_optim = None
self._actor_optim = None
def build(self) -> None:
# setup torch models
self._build_critic()
self._build_actor()
if self._use_gpu:
self.to_gpu(self._use_gpu)
else:
self.to_cpu()
# setup optimizer after the parameters move to GPU
self._build_critic_optim()
self._build_actor_optim()
def _build_critic(self) -> None:
self._v_func = create_value_function(
self._observation_shape, self._critic_encoder_factory
)
def _build_critic_optim(self) -> None:
assert self._v_func is not None
self._critic_optim = self._critic_optim_factory.create(
self._v_func.parameters(), lr=self._critic_learning_rate
)
@abstractmethod
def _build_actor(self) -> None:
pass
def _build_actor_optim(self) -> None:
assert self._policy is not None
self._actor_optim = self._actor_optim_factory.create(
self._policy.parameters(), lr=self._actor_learning_rate
)
@train_api
@torch_api(scaler_targets=["observation"])
def update_critic(
self, observation: torch.Tensor, value: torch.Tensor
) -> np.ndarray:
assert self._critic_optim is not None
self._critic_optim.zero_grad()
loss = self.compute_critic_loss(observation, value)
loss.backward()
self._critic_optim.step()
return loss.cpu().detach().numpy()
@augmentation_api(targets=["observation"])
def compute_critic_loss(
self, observation: torch.Tensor, value: torch.Tensor
) -> torch.Tensor:
assert self._v_func is not None
return self._v_func.compute_error(observation, value)
@train_api
@torch_api(scaler_targets=["observation"], action_scaler_targets=["action"])
def update_actor(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> np.ndarray:
assert self._actor_optim is not None
self._actor_optim.zero_grad()
loss = self.compute_actor_loss(observation, action, weight)
loss.backward()
self._actor_optim.step()
return loss.cpu().detach().numpy()
@augmentation_api(targets=["observation"])
def compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
return self._compute_actor_loss(observation, action, weight)
@abstractmethod
def _compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
pass
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
return self._policy.best_action(x)
def _sample_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
return self._policy.sample(x)
@eval_api
@torch_api(scaler_targets=["x"])
def predict_value(
self, x: torch.Tensor, *args: Any, **kwargs: Any
) -> np.ndarray:
assert self._v_func is not None
with torch.no_grad():
return self._v_func(x).view(-1).cpu().detach().numpy()
class AWRImpl(AWRBaseImpl):
_policy: Optional[SquashedNormalPolicy]
def _build_actor(self) -> None:
self._policy = create_squashed_normal_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
)
def _compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
assert self._policy is not None
dist = self._policy.dist(observation)
# unnormalize action via inverse tanh function
unnormalized_action = torch.atanh(action.clamp(-0.999999, 0.999999))
# compute log probability
_, log_probs = squash_action(dist, unnormalized_action)
return -(weight * log_probs).mean()
class DiscreteAWRImpl(AWRBaseImpl):
_policy: Optional[CategoricalPolicy]
def _build_actor(self) -> None:
self._policy = create_categorical_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
)
def _compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
assert self._policy is not None
dist = self._policy.dist(observation)
log_probs = dist.log_prob(action).view(observation.shape[0], -1)
return -(weight * log_probs.sum(dim=1, keepdim=True)).mean()
| [
"torch.no_grad"
] | 1.6.0 | YangRui2015/d3rlpy | da778b2a2b0afbafe25395296baecd0d4d0cd0d5 |
1.6 | import gym
import numpy as np
import pytest
import torch
from d3rlpy.dataset import Episode, MDPDataset
from d3rlpy.preprocessing import (
MinMaxScaler,
PixelScaler,
StandardScaler,
create_scaler,
)
@pytest.mark.parametrize("scaler_type", ["pixel", "min_max", "standard"])
def test_create_scaler(scaler_type):
scaler = create_scaler(scaler_type)
if scaler_type == "pixel":
assert isinstance(scaler, PixelScaler)
elif scaler_type == "min_max":
assert isinstance(scaler, MinMaxScaler)
elif scaler_type == "standard":
assert isinstance(scaler, StandardScaler)
@pytest.mark.parametrize("observation_shape", [(4, 84, 84)])
def test_pixel_scaler(observation_shape):
scaler = PixelScaler()
x = torch.randint(high=255, size=observation_shape)
y = scaler.transform(x)
assert torch.all(y == x.float() / 255.0)
assert scaler.get_type() == "pixel"
assert scaler.get_params() == {}
assert torch.all(scaler.reverse_transform(y) == x)
@pytest.mark.parametrize("observation_shape", [(100,)])
@pytest.mark.parametrize("batch_size", [32])
def test_min_max_scaler(observation_shape, batch_size):
shape = (batch_size,) + observation_shape
observations = np.random.random(shape).astype("f4")
max = observations.max(axis=0)
min = observations.min(axis=0)
scaler = MinMaxScaler(maximum=max, minimum=min)
# check range
y = scaler.transform(torch.tensor(observations))
assert np.all(y.numpy() >= 0.0)
assert np.all(y.numpy() <= 1.0)
x = torch.rand((batch_size,) + observation_shape)
y = scaler.transform(x)
ref_y = (x.numpy() - min.reshape((1, -1))) / (max - min).reshape((1, -1))
assert np.allclose(y.numpy(), ref_y)
assert scaler.get_type() == "min_max"
params = scaler.get_params()
assert np.all(params["minimum"] == min)
assert np.all(params["maximum"] == max)
assert torch.allclose(scaler.reverse_transform(y), x)
@pytest.mark.parametrize("observation_shape", [(100,)])
@pytest.mark.parametrize("batch_size", [32])
def test_min_max_scaler_with_episode(observation_shape, batch_size):
shape = (batch_size,) + observation_shape
observations = np.random.random(shape).astype("f4")
actions = np.random.random((batch_size, 1))
rewards = np.random.random(batch_size)
terminals = np.random.randint(2, size=batch_size)
terminals[-1] = 1.0
dataset = MDPDataset(
observations=observations,
actions=actions,
rewards=rewards,
terminals=terminals,
)
max = observations.max(axis=0)
min = observations.min(axis=0)
scaler = MinMaxScaler()
scaler.fit(dataset.episodes)
x = torch.rand((batch_size,) + observation_shape)
y = scaler.transform(x)
ref_y = (x.numpy() - min.reshape((1, -1))) / (max - min).reshape((1, -1))
assert np.allclose(y.numpy(), ref_y)
def test_min_max_scaler_with_env():
env = gym.make("BreakoutNoFrameskip-v4")
scaler = MinMaxScaler()
scaler.fit_with_env(env)
x = torch.tensor(env.reset().reshape((1,) + env.observation_space.shape))
y = scaler.transform(x)
assert torch.all(x / 255.0 == y)
@pytest.mark.parametrize("observation_shape", [(100,)])
@pytest.mark.parametrize("batch_size", [32])
def test_standard_scaler(observation_shape, batch_size):
shape = (batch_size,) + observation_shape
observations = np.random.random(shape).astype("f4")
mean = observations.mean(axis=0)
std = observations.std(axis=0)
scaler = StandardScaler(mean=mean, std=std)
x = torch.rand((batch_size,) + observation_shape)
y = scaler.transform(x)
ref_y = (x.numpy() - mean.reshape((1, -1))) / std.reshape((1, -1))
assert np.allclose(y.numpy(), ref_y)
assert scaler.get_type() == "standard"
params = scaler.get_params()
assert np.all(params["mean"] == mean)
assert np.all(params["std"] == std)
assert torch.allclose(scaler.reverse_transform(y), x, atol=1e-6)
@pytest.mark.parametrize("observation_shape", [(100,)])
@pytest.mark.parametrize("batch_size", [32])
def test_standard_scaler_with_episode(observation_shape, batch_size):
shape = (batch_size,) + observation_shape
observations = np.random.random(shape).astype("f4")
actions = np.random.random((batch_size, 1)).astype("f4")
rewards = np.random.random(batch_size).astype("f4")
terminals = np.random.randint(2, size=batch_size)
terminals[-1] = 1.0
dataset = MDPDataset(
observations=observations,
actions=actions,
rewards=rewards,
terminals=terminals,
)
mean = observations.mean(axis=0)
std = observations.std(axis=0)
scaler = StandardScaler()
scaler.fit(dataset.episodes)
x = torch.rand((batch_size,) + observation_shape)
y = scaler.transform(x)
ref_y = (x.numpy() - mean.reshape((1, -1))) / std.reshape((1, -1))
assert np.allclose(y.numpy(), ref_y, atol=1e-6)
| [
"torch.rand",
"torch.randint",
"torch.all",
"torch.tensor"
] | 1.6.0 | YangRui2015/d3rlpy | da778b2a2b0afbafe25395296baecd0d4d0cd0d5 |
1.7 | # -*- coding: utf-8 -*-
"""CBOW Embedding"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from models.base_model import BaseModel
from models.networks.cbow_embedder import Net as CBOW
class Net(nn.Module):
"""Network for CBOW"""
""" CBOW """
def __init__(self, embedder):
super().__init__()
"""
Args:
vocab_size
emb_size
"""
self.embedding = embedder.embedding
self.embedding.weight.requires_grad = False
self.emb_size = embedder.emb_size
self.vocab_size = embedder.vocab_size
self.net = nn.Sequential(
nn.Linear(self.emb_size, 128, bias=False),
nn.Dropout(p=0.2, inplace=False),
nn.Linear(128, self.vocab_size, bias=False),
nn.Softmax(dim=-1)
)
def forward(self, x):
x = self.embedding(x)
x = torch.sum(x, dim=1)
x = self.net(x)
return x
class SimpleNN(BaseModel):
"""SimpleNN"""
def __init__(self, cfg: object) -> None:
"""Initialization
Build model.
Args:
cfg: Config.
"""
super().__init__(cfg)
self.embedder = CBOW(vocab_size=self.cfg.model.embedder.vocab_size, emb_size=self.cfg.model.embedder.emb_size)
ckpt_path = self.cfg.model.embedder.initial_ckpt
if torch.cuda.is_available():
ckpt = torch.load(ckpt_path)
else:
ckpt = torch.load(ckpt_path, torch.device('cpu'))
self.embedder.load_state_dict(ckpt['model_state_dict'])
self.num_class = self.cfg.data.dataset.num_class
self.network = Net(embedder=self.embedder)
self.build() | [
"torch.nn.Linear",
"torch.device",
"torch.nn.Dropout",
"torch.nn.Softmax",
"torch.cuda.is_available",
"torch.load",
"torch.sum"
] | 1.7.1 | Piko-Piko-Pon-Taro/navict-recommender | 7eeaf0f77e500c1c0ecb15f9613aa08c2ef5c83c |
1.7 | import torch.nn as nn
class Network(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=8,
kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(num_features=8),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.layer2 = nn.Sequential(
nn.Conv2d(in_channels=8, out_channels=16,
kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(num_features=16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc1 = nn.Sequential(
nn.Linear(in_features=7*7*16, out_features=128),
nn.BatchNorm1d(num_features=128),
nn.Dropout(0.2),
nn.ReLU()
)
self.fc2 = nn.Sequential(
nn.Linear(in_features=128, out_features=64),
nn.BatchNorm1d(num_features=64),
nn.ReLU()
)
self.out = nn.Linear(in_features=64, out_features=10)
def forward(self, t):
t = self.layer1(t)
t = self.layer2(t)
t = t.reshape(t.size(0), -1)
t = self.fc1(t)
t = self.fc2(t)
t = self.out(t)
return t
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d"
] | 1.7.0 | priyavrat-misra/fashion-mnist | 9e9d18612b7556dbff5849be87cb35c296993d9e |
1.1 | from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
from sklearn.cluster import KMeans
from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from mmt import datasets
from mmt import models
from mmt.trainers import MMTTrainer
from mmt.evaluators import Evaluator, extract_features
from mmt.utils.data import IterLoader
from mmt.utils.data import transforms as T
from mmt.utils.data.sampler import RandomMultipleGallerySampler
from mmt.utils.data.preprocessor import Preprocessor
from mmt.utils.logging import Logger
from mmt.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
import os
best_mAP = 0
def get_data(name, data_dir):
root = osp.join(data_dir, name)
dataset = datasets.create(name, root)
return dataset
def get_train_loader(dataset, height, width, batch_size, workers,
num_instances, iters):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406])
])
train_set = sorted(dataset.train, key=lambda x:x[1])
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer, mutual=True),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
else:
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer, mutual=False, cluster=True),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def create_model(args):
model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)
model_2 = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)
model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)
model_2_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)
model_1.cuda()
model_2.cuda()
model_1_ema.cuda()
model_2_ema.cuda()
model_1 = nn.DataParallel(model_1)
model_2 = nn.DataParallel(model_2)
model_1_ema = nn.DataParallel(model_1_ema)
model_2_ema = nn.DataParallel(model_2_ema)
initial_weights = load_checkpoint(args.init_1)
copy_state_dict(initial_weights['state_dict'], model_1)
copy_state_dict(initial_weights['state_dict'], model_1_ema)
model_1_ema.module.classifier.weight.data.copy_(model_1.module.classifier.weight.data)
initial_weights = load_checkpoint(args.init_2)
copy_state_dict(initial_weights['state_dict'], model_2)
copy_state_dict(initial_weights['state_dict'], model_2_ema)
model_2_ema.module.classifier.weight.data.copy_(model_2.module.classifier.weight.data)
for param in model_1_ema.parameters():
param.detach_()
for param in model_2_ema.parameters():
param.detach_()
return model_1, model_2, model_1_ema, model_2_ema
def main():
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' # CUDA environment
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global best_mAP
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create data loaders
iters = args.iters if (args.iters>0) else None
dataset_target = get_data(args.dataset_target, args.data_dir)
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers, testset=dataset_target.train)
# Create model
model_1, model_2, model_1_ema, model_2_ema = create_model(args)
# Evaluator
evaluator_1_ema = Evaluator(model_1_ema)
evaluator_2_ema = Evaluator(model_2_ema)
for epoch in range(args.epochs):
dict_f, _ = extract_features(model_1_ema, cluster_loader, print_freq=50)
cf_1 = torch.stack(list(dict_f.values())).numpy()
dict_f, _ = extract_features(model_2_ema, cluster_loader, print_freq=50)
cf_2 = torch.stack(list(dict_f.values())).numpy()
cf = (cf_1+cf_2)/2
print('\n Clustering into {} classes \n'.format(args.num_clusters))
km = KMeans(n_clusters=args.num_clusters, random_state=args.seed, n_jobs=2).fit(cf)
model_1.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda())
model_2.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda())
model_1_ema.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda())
model_2_ema.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda())
target_label = km.labels_
# change pseudo labels
for i in range(len(dataset_target.train)):
dataset_target.train[i] = list(dataset_target.train[i])
dataset_target.train[i][2] = int(target_label[i]) ## change dataset_target.train[i][1] --> [2]
dataset_target.train[i] = tuple(dataset_target.train[i])
# the place to re-compute cluster centers (e.g.500) with re-assigned pseudo labels
# based on the memory slot which contains the features of each target training images
train_loader_target = get_train_loader(dataset_target, args.height, args.width,
args.batch_size, args.workers, args.num_instances, iters)
# Optimizer
params = []
for key, value in model_1.named_parameters():
if not value.requires_grad:
continue
params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
for key, value in model_2.named_parameters():
if not value.requires_grad:
continue
params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
# Trainer
##
trainer = MMTTrainer(model_1, model_2, model_1_ema, model_2_ema,
num_cluster=args.num_clusters, alpha=args.alpha, cf=cf, f_memory_label=target_label)
##
train_loader_target.new_epoch()
trainer.train(epoch, train_loader_target, optimizer,
ce_soft_weight=args.soft_ce_weight, tri_soft_weight=args.soft_tri_weight,
print_freq=args.print_freq, train_iters=len(train_loader_target))
def save_model(model_ema, is_best, best_mAP, mid):
save_checkpoint({
'state_dict': model_ema.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'model'+str(mid)+'_checkpoint.pth.tar'))
if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
mAP_1 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=False)
mAP_2 = evaluator_2_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=False)
is_best = (mAP_1>best_mAP) or (mAP_2>best_mAP)
best_mAP = max(mAP_1, mAP_2, best_mAP)
save_model(model_1_ema, (is_best and (mAP_1>mAP_2)), best_mAP, 1)
save_model(model_2_ema, (is_best and (mAP_1<=mAP_2)), best_mAP, 2)
print('\n * Finished epoch {:3d} model no.1 mAP: {:5.1%} model no.2 mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP_1, mAP_2, best_mAP, ' *' if is_best else ''))
print ('Test on the best model.')
checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
model_1_ema.load_state_dict(checkpoint['state_dict'])
evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="MMT Training")
# data
parser.add_argument('-dt', '--dataset-target', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--num-clusters', type=int, default=500)
parser.add_argument('--height', type=int, default=256,
help="input height")
parser.add_argument('--width', type=int, default=128,
help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained "
"parameters it is 10 times smaller than this")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--alpha', type=float, default=0.999)
parser.add_argument('--moving-avg-momentum', type=float, default=0)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--soft-ce-weight', type=float, default=0.5)
parser.add_argument('--soft-tri-weight', type=float, default=0.8)
parser.add_argument('--epochs', type=int, default=40)
parser.add_argument('--iters', type=int, default=800)
# training configs
parser.add_argument('--init-1', type=str, default='', metavar='PATH')
parser.add_argument('--init-2', type=str, default='', metavar='PATH')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=1)
parser.add_argument('--eval-step', type=int, default=1)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
#parser.add_argument('--memory_size', type=int, default=12936)
main()
| [
"torch.optim.Adam",
"torch.manual_seed",
"torch.nn.DataParallel"
] | 1.1.0 | Dingyuan-Zheng/ctf-UDA | 3e3c67f68d7eb0b52a16a259e5a77e153062c4fd |
1.6 | import torch
from torch import nn
from transformers import BertModel, ElectraModel
from transformers.models.bert.modeling_bert import BertLayer
from capreolus import ConfigOption, Dependency
from capreolus.reranker import Reranker
class PTParade_Class(nn.Module):
def __init__(self, extractor, config, *args, **kwargs):
super().__init__(*args, **kwargs)
self.extractor = extractor
self.config = config
if config["pretrained"] == "electra-base-msmarco":
self.bert = ElectraModel.from_pretrained("Capreolus/electra-base-msmarco")
elif config["pretrained"] == "bert-base-msmarco":
self.bert = BertModel.from_pretrained("Capreolus/bert-base-msmarco")
elif config["pretrained"] == "bert-base-uncased":
self.bert = BertModel.from_pretrained("bert-base-uncased")
else:
raise ValueError(
f"unsupported model: {config['pretrained']}; need to ensure correct tokenizers will be used before arbitrary hgf models are supported"
)
self.transformer_layer_1 = BertLayer(self.bert.config)
self.transformer_layer_2 = BertLayer(self.bert.config)
self.num_passages = extractor.config["numpassages"]
self.maxseqlen = extractor.config["maxseqlen"]
self.linear = nn.Linear(self.bert.config.hidden_size, 1)
if config["aggregation"] == "max":
raise NotImplementedError()
elif config["aggregation"] == "avg":
raise NotImplementedError()
elif config["aggregation"] == "attn":
raise NotImplementedError()
elif config["aggregation"] == "transformer":
self.aggregation = self.aggregate_using_transformer
input_embeddings = self.bert.get_input_embeddings()
# TODO hardcoded CLS token id
cls_token_id = torch.tensor([[101]])
self.initial_cls_embedding = input_embeddings(cls_token_id).view(1, self.bert.config.hidden_size)
self.full_position_embeddings = torch.zeros(
(1, self.num_passages + 1, self.bert.config.hidden_size), requires_grad=True, dtype=torch.float
)
torch.nn.init.normal_(self.full_position_embeddings, mean=0.0, std=0.02)
self.initial_cls_embedding = nn.Parameter(self.initial_cls_embedding, requires_grad=True)
self.full_position_embeddings = nn.Parameter(self.full_position_embeddings, requires_grad=True)
else:
raise ValueError(f"unknown aggregation type: {self.config['aggregation']}")
def aggregate_using_transformer(self, cls):
expanded_cls = cls.view(-1, self.num_passages, self.bert.config.hidden_size)
# TODO make sure batch size here is correct
batch_size = expanded_cls.shape[0]
tiled_initial_cls = self.initial_cls_embedding.repeat(batch_size, 1)
merged_cls = torch.cat((tiled_initial_cls.view(batch_size, 1, self.bert.config.hidden_size), expanded_cls), dim=1)
merged_cls = merged_cls + self.full_position_embeddings
(transformer_out_1,) = self.transformer_layer_1(merged_cls, None, None, None)
(transformer_out_2,) = self.transformer_layer_2(transformer_out_1, None, None, None)
aggregated = transformer_out_2[:, 0, :]
return aggregated
def forward(self, doc_input, doc_mask, doc_seg):
batch_size = doc_input.shape[0]
doc_input = doc_input.view((batch_size * self.num_passages, self.maxseqlen))
doc_mask = doc_mask.view((batch_size * self.num_passages, self.maxseqlen))
doc_seg = doc_seg.view((batch_size * self.num_passages, self.maxseqlen))
cls = self.bert(doc_input, attention_mask=doc_mask, token_type_ids=doc_seg)[0][:, 0, :]
aggregated = self.aggregation(cls)
return self.linear(aggregated)
@Reranker.register
class PTParade(Reranker):
"""
PyTorch implementation of PARADE.
PARADE: Passage Representation Aggregation for Document Reranking.
Canjia Li, Andrew Yates, Sean MacAvaney, Ben He, and Yingfei Sun. arXiv 2020.
https://arxiv.org/pdf/2008.09093.pdf
"""
module_name = "ptparade"
dependencies = [
Dependency(key="extractor", module="extractor", name="pooledbertpassage"),
Dependency(key="trainer", module="trainer", name="pytorch"),
]
config_spec = [
ConfigOption(
"pretrained", "bert-base-uncased", "Pretrained model: bert-base-uncased, bert-base-msmarco, or electra-base-msmarco"
),
ConfigOption("aggregation", "transformer"),
]
def build_model(self):
if not hasattr(self, "model"):
self.model = PTParade_Class(self.extractor, self.config)
return self.model
def score(self, d):
return [
self.model(d["pos_bert_input"], d["pos_mask"], d["pos_seg"]).view(-1),
self.model(d["neg_bert_input"], d["neg_mask"], d["neg_seg"]).view(-1),
]
def test(self, d):
return self.model(d["pos_bert_input"], d["pos_mask"], d["pos_seg"]).view(-1)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Parameter",
"torch.nn.init.normal_",
"torch.tensor"
] | 1.6.0 | nimasadri11/capreolus | 27b081ec1a37d2af6afa6b61eb1cb7cc4ec9db1c |
1.7 |
import os
from imageio import imread, imsave
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
def plot_text(txt, size=224):
fig = plt.figure(figsize=(1,1), dpi=size)
fontsize = size//len(txt) if len(txt) < 15 else 8
plt.text(0.5, 0.5, txt, fontsize=fontsize, ha='center', va='center', wrap=True)
plt.axis('off')
fig.tight_layout(pad=0)
fig.canvas.draw()
img = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return img
def txt_clean(txt):
return txt.translate(str.maketrans(dict.fromkeys(list("\n',.—|!?/:;\\"), ""))).replace(' ', '_').replace('"', '')
def basename(file):
return os.path.splitext(os.path.basename(file))[0]
def file_list(path, ext=None, subdir=None):
if subdir is True:
files = [os.path.join(dp, f) for dp, dn, fn in os.walk(path) for f in fn]
else:
files = [os.path.join(path, f) for f in os.listdir(path)]
if ext is not None:
if isinstance(ext, list):
files = [f for f in files if os.path.splitext(f.lower())[1][1:] in ext]
elif isinstance(ext, str):
files = [f for f in files if f.endswith(ext)]
else:
print(' Unknown extension/type for file list!')
return sorted([f for f in files if os.path.isfile(f)])
def img_list(path, subdir=None):
if subdir is True:
files = [os.path.join(dp, f) for dp, dn, fn in os.walk(path) for f in fn]
else:
files = [os.path.join(path, f) for f in os.listdir(path)]
files = [f for f in files if os.path.splitext(f.lower())[1][1:] in ['jpg', 'jpeg', 'png', 'ppm', 'tif']]
return sorted([f for f in files if os.path.isfile(f)])
def img_read(path):
img = imread(path)
# 8bit to 256bit
if (img.ndim == 2) or (img.shape[2] == 1):
img = np.dstack((img,img,img))
# rgba to rgb
if img.shape[2] == 4:
img = img[:,:,:3]
return img
def img_save(path, img, norm=True):
if norm == True and not np.issubdtype(img.dtype.kind, np.integer):
img = (img*255).astype(np.uint8)
imsave(path, img)
def minmax(x, torch=True):
if torch:
mn = torch.min(x).detach().cpu().numpy()
mx = torch.max(x).detach().cpu().numpy()
else:
mn = np.min(x.detach().cpu().numpy())
mx = np.max(x.detach().cpu().numpy())
return (mn, mx)
# Tiles an array around two points, allowing for pad lengths greater than the input length
# NB: if symm=True, every second tile is mirrored = messed up in GAN
# adapted from https://discuss.pytorch.org/t/symmetric-padding/19866/3
def tile_pad(xt, padding, symm=False):
h, w = xt.shape[-2:]
left, right, top, bottom = padding
def tile(x, minx, maxx):
rng = maxx - minx
if symm is True: # triangular reflection
double_rng = 2*rng
mod = np.fmod(x - minx, double_rng)
normed_mod = np.where(mod < 0, mod+double_rng, mod)
out = np.where(normed_mod >= rng, double_rng - normed_mod, normed_mod) + minx
else: # repeating tiles
mod = np.remainder(x - minx, rng)
out = mod + minx
return np.array(out, dtype=x.dtype)
x_idx = np.arange(-left, w+right)
y_idx = np.arange(-top, h+bottom)
x_pad = tile(x_idx, -0.5, w-0.5)
y_pad = tile(y_idx, -0.5, h-0.5)
xx, yy = np.meshgrid(x_pad, y_pad)
return xt[..., yy, xx]
def pad_up_to(x, size, type='centr'):
sh = x.shape[2:][::-1]
if list(x.shape[2:]) == list(size): return x
padding = []
for i, s in enumerate(size[::-1]):
if 'side' in type.lower():
padding = padding + [0, s-sh[i]]
else: # centr
p0 = (s-sh[i]) // 2
p1 = s-sh[i] - p0
padding = padding + [p0,p1]
y = tile_pad(x, padding, symm = ('symm' in type.lower()))
return y
def load_config(config_path, display=False):
config = OmegaConf.load(config_path)
if display:
print(yaml.dump(OmegaConf.to_container(config)))
return config
def load_vqgan(config, ckpt_path=None):
model = VQModel(**config.model.params)
if ckpt_path is not None:
sd = torch.load(ckpt_path, map_location="cpu")["state_dict"]
missing, unexpected = model.load_state_dict(sd, strict=False)
return model.eval()
def vqgan_image(model, z):
x = model.decode(z)
x = (x+1.)/2.
return x
def makevid(seq_dir, size=None):
out_video = seq_dir + '.mp4'
moviepy.editor.ImageSequenceClip(img_list(seq_dir), fps=25).write_videofile(out_video, verbose=False)
data_url = "data:video/mp4;base64," + b64encode(open(out_video,'rb').read()).decode()
wh = '' if size is None else 'width=%d height=%d' % (size, size)
return """<video %s controls><source src="%s" type="video/mp4"></video>""" % (wh, data_url) | [
"torch.min",
"torch.max",
"torch.load"
] | 1.7.1 | Adamkomar95/gans-clip-pw | 14694abd3a793b3e0fdfed76e2e12908e91ea484 |
1.7 | import torch
import math
from torch_geometric.nn.pool import fps
from lightconvpoint.knn import knn
import importlib
knn_c_func_spec = importlib.util.find_spec('lightconvpoint.knn_c_func')
if knn_c_func_spec is not None:
knn_c_func = importlib.util.module_from_spec(knn_c_func_spec)
knn_c_func_spec.loader.exec_module(knn_c_func)
def sampling_fps(points: torch.Tensor, nqueries: int):
if knn_c_func_spec is not None:
return knn_c_func.sampling_fps(points, nqueries)
bs, dim, nx = points.shape
ratio = nqueries / nx
batch_x = torch.arange(0, bs, dtype=torch.long, device=points.device).unsqueeze(1).expand(bs,nx)
x = points.transpose(1,2).reshape(-1, dim)
batch_x = batch_x.view(-1)
indices_queries = fps(x, batch_x, ratio)
indices_queries = indices_queries.view(bs, -1)
assert(indices_queries.shape[1] == nqueries)
return indices_queries
def sampling_knn_fps(points: torch.Tensor, nqueries: int, K: int):
if knn_c_func_spec is not None:
return knn_c_func.sampling_knn_fps(points, nqueries, K)
bs, dim, nx = points.shape
ratio = nqueries / nx
batch_x = torch.arange(0, bs, dtype=torch.long, device=points.device).unsqueeze(1).expand(bs,nx)
x = points.transpose(1,2).reshape(-1, dim)
batch_x = batch_x.view(-1)
indices_queries = fps(x, batch_x, ratio)
points_queries = x[indices_queries]
indices_queries = indices_queries.view(bs, -1)
points_queries = points_queries.view(bs,-1,3)
points_queries = points_queries.transpose(1,2)
assert(indices_queries.shape[1] == nqueries)
indices_knn = knn(points, points_queries, K)
return indices_queries, indices_knn, points_queries | [
"torch.arange"
] | 1.7.1 | valeoai/3DGenZ | 3368585e10f127f7a0d71af98994a6cff5235dab |
0.4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 18:40:39 2020
@author: kratochvila
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from base.base_net import BaseNet
class MY_LeNet(BaseNet):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 80 * 60, self.rep_dim, bias=False) # 20 * 15
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.fc1(x)
return x
class MY_LeNet_Autoencoder(BaseNet):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
# Encoder (must match the Deep SVDD network above)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 80 * 60, self.rep_dim, bias=False)
self.bn1d = nn.BatchNorm1d(self.rep_dim, eps=1e-04, affine=False)
# Decoder
self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)), 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv4.weight, gain=nn.init.calculate_gain('leaky_relu'))
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.bn1d(self.fc1(x))
x = x.view(x.size(0), int(self.rep_dim / (4 * 4)), 4, 4)
x = F.leaky_relu(x)
x = self.deconv1(x)
x = F.interpolate(F.leaky_relu(self.bn2d4(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.leaky_relu(self.bn2d5(x)), scale_factor=2)
x = self.deconv3(x)
x = F.interpolate(F.leaky_relu(self.bn2d6(x)), scale_factor=2)
x = self.deconv4(x)
x = torch.sigmoid(x)
return x
| [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.init.calculate_gain",
"torch.nn.functional.leaky_relu"
] | 0.4.1 | LukasKratochvila/Deep-SVDD-PyTorch | a94bd9b6be4d953706daf969b061ddf55d6cbf4c |
1.6 | import unittest
from onmt.translate import GeneratorLM
import torch
class TestGeneratorLM(unittest.TestCase):
def test_split_src_to_prevent_padding_target_prefix_is_none_when_equal_size( # noqa: E501
self,
):
src = torch.randint(0, 10, (5, 6))
src_lengths = 5 * torch.ones(5)
(
src,
src_lengths,
target_prefix,
) = GeneratorLM.split_src_to_prevent_padding(src, src_lengths)
self.assertIsNone(target_prefix)
def test_split_src_to_prevent_padding_target_prefix_is_ok_when_different_size( # noqa: E501
self,
):
default_length = 5
src = torch.randint(0, 10, (default_length, 6))
src_lengths = default_length * torch.ones(6, dtype=torch.int)
new_length = 4
src_lengths[1] = new_length
(
src,
src_lengths,
target_prefix,
) = GeneratorLM.split_src_to_prevent_padding(src, src_lengths)
self.assertTupleEqual(src.shape, (new_length, 6))
self.assertTupleEqual(target_prefix.shape, (1, 6))
self.assertTrue(
src_lengths.equal(new_length * torch.ones(6, dtype=torch.int))
)
| [
"torch.randint",
"torch.ones"
] | 1.6.0 | l-k-11235/OpenNMT-py | 4815f07fcd482af9a1fe1d3b620d144197178bc5 |
1.6 | import torch
class PenaltyBuilder(object):
"""Returns the Length and Coverage Penalty function for Beam Search.
Args:
length_pen (str): option name of length pen
cov_pen (str): option name of cov pen
Attributes:
has_cov_pen (bool): Whether coverage penalty is None (applying it
is a no-op). Note that the converse isn't true. Setting beta
to 0 should force coverage length to be a no-op.
has_len_pen (bool): Whether length penalty is None (applying it
is a no-op). Note that the converse isn't true. Setting alpha
to 1 should force length penalty to be a no-op.
coverage_penalty (callable[[FloatTensor, float], FloatTensor]):
Calculates the coverage penalty.
length_penalty (callable[[int, float], float]): Calculates
the length penalty.
"""
def __init__(self, cov_pen, length_pen):
self.has_cov_pen = not self._pen_is_none(cov_pen)
self.coverage_penalty = self._coverage_penalty(cov_pen)
self.has_len_pen = not self._pen_is_none(length_pen)
self.length_penalty = self._length_penalty(length_pen)
@staticmethod
def _pen_is_none(pen):
return pen == "none" or pen is None
def _coverage_penalty(self, cov_pen):
if cov_pen == "wu":
return self.coverage_wu
elif cov_pen == "summary":
return self.coverage_summary
elif self._pen_is_none(cov_pen):
return self.coverage_none
else:
raise NotImplementedError("No '{:s}' coverage penalty.".format(
cov_pen))
def _length_penalty(self, length_pen):
if length_pen == "wu":
return self.length_wu
elif length_pen == "avg":
return self.length_average
elif self._pen_is_none(length_pen):
return self.length_none
else:
raise NotImplementedError("No '{:s}' length penalty.".format(
length_pen))
# Below are all the different penalty terms implemented so far.
# Subtract coverage penalty from topk log probs.
# Divide topk log probs by length penalty.
def coverage_wu(self, cov, beta=0.):
"""GNMT coverage re-ranking score.
See "Google's Neural Machine Translation System" :cite:`wu2016google`.
``cov`` is expected to be sized ``(*, seq_len)``, where ``*`` is
probably ``batch_size x beam_size`` but could be several
dimensions like ``(batch_size, beam_size)``. If ``cov`` is attention,
then the ``seq_len`` axis probably sums to (almost) 1.
"""
penalty = -torch.min(cov, cov.clone().fill_(1.0)).log().sum(-1)
return beta * penalty
def coverage_summary(self, cov, beta=0.):
"""Our summary penalty."""
penalty = torch.max(cov, cov.clone().fill_(1.0)).sum(-1)
penalty -= cov.size(-1)
return beta * penalty
def coverage_none(self, cov, beta=0.):
"""Returns zero as penalty"""
none = torch.zeros((1,), device=cov.device,
dtype=torch.float)
if cov.dim() == 3:
none = none.unsqueeze(0)
return none
def length_wu(self, cur_len, alpha=0.):
"""GNMT length re-ranking score.
See "Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
return ((5 + cur_len) / 6.0) ** alpha
def length_average(self, cur_len, alpha=0.):
"""Returns the current sequence length."""
return cur_len
def length_none(self, cur_len, alpha=0.):
"""Returns unmodified scores."""
return 1.0
| [
"torch.zeros"
] | 1.6.0 | l-k-11235/OpenNMT-py | 4815f07fcd482af9a1fe1d3b620d144197178bc5 |
1.0 | from torch import nn
class PositionWise(nn.Module):
def __init__(self, dim_m, dim_i, dropout=0.1):
"""Position-wise Feed-Forward Network.
Args:
dim_m (int): input and output dimension.
dim_i (int): inner dimension.
dropout (float, optional): dropout probability.
Inputs:
- **input** of shape `(batch, *, dim_m)`: a float tensor.
Outputs:
- **output** of shape `(batch, *, dim_m)`: a float tensor.
"""
super(PositionWise, self).__init__()
self.feedforward = nn.Sequential(
nn.Linear(dim_m, dim_i), nn.ReLU(), nn.Linear(dim_i, dim_m),
nn.Dropout(dropout))
self.normalization = nn.LayerNorm(dim_m, eps=1e-12)
def forward(self, input):
# There's nothing difficult here.
residual = input
output = self.feedforward(input)
output = self.normalization(output + residual)
return output
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.ReLU"
] | 1.0.0 | khucnam/Efflux_TransVAE | 7da1cc614f016d5520648f4853e34e2362181aa7 |
1.4 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import shutil
import tempfile
import unittest
from test.generic.config_utils import get_fast_test_task_config, get_test_task_config
from test.generic.utils import compare_model_state, compare_samples, compare_states
import torch
from classy_vision.dataset import build_dataset
from classy_vision.generic.util import get_checkpoint_dict
from classy_vision.hooks import CheckpointHook, LossLrMeterLoggingHook
from classy_vision.losses import build_loss
from classy_vision.models import build_model
from classy_vision.optim import build_optimizer
from classy_vision.tasks import ClassificationTask, build_task
from classy_vision.trainer import LocalTrainer
class TestClassificationTask(unittest.TestCase):
def _compare_model_state(self, model_state_1, model_state_2, check_heads=True):
compare_model_state(self, model_state_1, model_state_2, check_heads)
def _compare_samples(self, sample_1, sample_2):
compare_samples(self, sample_1, sample_2)
def _compare_states(self, state_1, state_2, check_heads=True):
compare_states(self, state_1, state_2)
def setUp(self):
# create a base directory to write checkpoints to
self.base_dir = tempfile.mkdtemp()
def tearDown(self):
# delete all the temporary data created
shutil.rmtree(self.base_dir)
def test_build_task(self):
config = get_test_task_config()
task = build_task(config)
self.assertTrue(isinstance(task, ClassificationTask))
def test_hooks_config_builds_correctly(self):
config = get_test_task_config()
config["hooks"] = [{"name": "loss_lr_meter_logging"}]
task = build_task(config)
self.assertTrue(len(task.hooks) == 1)
self.assertTrue(isinstance(task.hooks[0], LossLrMeterLoggingHook))
def test_get_state(self):
config = get_test_task_config()
loss = build_loss(config["loss"])
task = (
ClassificationTask()
.set_num_epochs(1)
.set_loss(loss)
.set_model(build_model(config["model"]))
.set_optimizer(build_optimizer(config["optimizer"]))
)
for phase_type in ["train", "test"]:
dataset = build_dataset(config["dataset"][phase_type])
task.set_dataset(dataset, phase_type)
task.prepare()
task = build_task(config)
task.prepare()
def test_synchronize_losses_non_distributed(self):
"""
Tests that synchronize losses has no side effects in a non-distributed setting.
"""
test_config = get_fast_test_task_config()
task = build_task(test_config)
task.prepare()
old_losses = copy.deepcopy(task.losses)
task.synchronize_losses()
self.assertEqual(old_losses, task.losses)
def test_synchronize_losses_when_losses_empty(self):
config = get_fast_test_task_config()
task = build_task(config)
task.prepare()
task.set_use_gpu(torch.cuda.is_available())
# Losses should be empty when creating task
self.assertEqual(len(task.losses), 0)
task.synchronize_losses()
def test_checkpointing(self):
"""
Tests checkpointing by running train_steps to make sure the train_steps
run the same way after loading from a checkpoint.
"""
config = get_fast_test_task_config()
task = build_task(config).set_hooks([LossLrMeterLoggingHook()])
task_2 = build_task(config).set_hooks([LossLrMeterLoggingHook()])
task.set_use_gpu(torch.cuda.is_available())
# prepare the tasks for the right device
task.prepare()
# test in both train and test mode
for _ in range(2):
task.advance_phase()
# set task's state as task_2's checkpoint
task_2._set_checkpoint_dict(get_checkpoint_dict(task, {}, deep_copy=True))
task_2.prepare()
# task 2 should have the same state
self._compare_states(task.get_classy_state(), task_2.get_classy_state())
# this tests that both states' iterators return the same samples
sample = next(task.get_data_iterator())
sample_2 = next(task_2.get_data_iterator())
self._compare_samples(sample, sample_2)
# test that the train step runs the same way on both states
# and the loss remains the same
task.train_step()
task_2.train_step()
self._compare_states(task.get_classy_state(), task_2.get_classy_state())
def test_final_train_checkpoint(self):
"""Test that a train phase checkpoint with a where of 1.0 can be loaded"""
config = get_fast_test_task_config()
task = build_task(config).set_hooks(
[CheckpointHook(self.base_dir, {}, phase_types=["train"])]
)
task_2 = build_task(config)
task.set_use_gpu(torch.cuda.is_available())
trainer = LocalTrainer()
trainer.train(task)
# make sure fetching the where raises an exception, which means that
# where is >= 1.0
with self.assertRaises(Exception):
task.where
# set task_2's state as task's final train checkpoint
task_2.set_checkpoint(self.base_dir)
task_2.prepare()
# we should be able to train the task
trainer.train(task_2)
def test_test_only_checkpointing(self):
"""
Tests checkpointing by running train_steps to make sure the
train_steps run the same way after loading from a training
task checkpoint on a test_only task.
"""
train_config = get_fast_test_task_config()
train_config["num_epochs"] = 10
test_config = get_fast_test_task_config()
test_config["test_only"] = True
train_task = build_task(train_config).set_hooks([LossLrMeterLoggingHook()])
test_only_task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
# prepare the tasks for the right device
train_task.prepare()
# test in both train and test mode
trainer = LocalTrainer()
trainer.train(train_task)
# set task's state as task_2's checkpoint
test_only_task._set_checkpoint_dict(
get_checkpoint_dict(train_task, {}, deep_copy=True)
)
test_only_task.prepare()
test_state = test_only_task.get_classy_state()
# We expect the phase idx to be different for a test only task
self.assertEqual(test_state["phase_idx"], -1)
# We expect that test only state is test, no matter what train state is
self.assertFalse(test_state["train"])
# Num updates should be 0
self.assertEqual(test_state["num_updates"], 0)
# train_phase_idx should -1
self.assertEqual(test_state["train_phase_idx"], -1)
# Verify task will run
trainer = LocalTrainer()
trainer.train(test_only_task)
def test_test_only_task(self):
"""
Tests the task in test mode by running train_steps
to make sure the train_steps run as expected on a
test_only task
"""
test_config = get_fast_test_task_config()
test_config["test_only"] = True
# delete train dataset
del test_config["dataset"]["train"]
test_only_task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
test_only_task.prepare()
test_state = test_only_task.get_classy_state()
# We expect that test only state is test, no matter what train state is
self.assertFalse(test_state["train"])
# Num updates should be 0
self.assertEqual(test_state["num_updates"], 0)
# Verify task will run
trainer = LocalTrainer()
trainer.train(test_only_task)
def test_train_only_task(self):
"""
Tests that the task runs when only a train dataset is specified.
"""
test_config = get_fast_test_task_config()
# delete the test dataset from the config
del test_config["dataset"]["test"]
task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
task.prepare()
# verify the the task can still be trained
trainer = LocalTrainer()
trainer.train(task)
@unittest.skipUnless(torch.cuda.is_available(), "This test needs a gpu to run")
def test_checkpointing_different_device(self):
config = get_fast_test_task_config()
task = build_task(config)
task_2 = build_task(config)
for use_gpu in [True, False]:
task.set_use_gpu(use_gpu)
task.prepare()
# set task's state as task_2's checkpoint
task_2._set_checkpoint_dict(get_checkpoint_dict(task, {}, deep_copy=True))
# we should be able to run the trainer using state from a different device
trainer = LocalTrainer()
task_2.set_use_gpu(not use_gpu)
trainer.train(task_2)
| [
"torch.cuda.is_available"
] | 1.4 | jlin27/ClassyVision-1 | 113ddb0b66471eb84add9af53751d9067786a7f0 |
1.6 | import os
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from pl_bolts.datasets.imagenet_dataset import UnlabeledImagenet
from pl_bolts.transforms.dataset_normalizations import imagenet_normalization
from pl_bolts.utils import _TORCHVISION_AVAILABLE
from pl_bolts.utils.warnings import warn_missing_pkg
if _TORCHVISION_AVAILABLE:
from torchvision import transforms as transform_lib
else:
warn_missing_pkg('torchvision') # pragma: no-cover
class SSLImagenetDataModule(LightningDataModule): # pragma: no cover
name = 'imagenet'
def __init__(
self,
data_dir,
meta_dir=None,
num_workers=16,
batch_size: int = 32,
shuffle: bool = False,
pin_memory: bool = False,
drop_last: bool = False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
if not _TORCHVISION_AVAILABLE:
raise ModuleNotFoundError( # pragma: no-cover
'You want to use ImageNet dataset loaded from `torchvision` which is not installed yet.'
)
self.data_dir = data_dir
self.num_workers = num_workers
self.meta_dir = meta_dir
self.batch_size = batch_size
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
@property
def num_classes(self):
return 1000
def _verify_splits(self, data_dir, split):
dirs = os.listdir(data_dir)
if split not in dirs:
raise FileNotFoundError(
f'a {split} Imagenet split was not found in {data_dir}, make sure the'
f' folder contains a subfolder named {split}'
)
def prepare_data(self):
# imagenet cannot be downloaded... must provide path to folder with the train/val splits
self._verify_splits(self.data_dir, 'train')
self._verify_splits(self.data_dir, 'val')
for split in ['train', 'val']:
files = os.listdir(os.path.join(self.data_dir, split))
if 'meta.bin' not in files:
raise FileNotFoundError(
"""
no meta.bin present. Imagenet is no longer automatically downloaded by PyTorch.
To get imagenet:
1. download yourself from http://www.image-net.org/challenges/LSVRC/2012/downloads
2. download the devkit (ILSVRC2012_devkit_t12.tar.gz)
3. generate the meta.bin file using the devkit
4. copy the meta.bin file into both train and val split folders
To generate the meta.bin do the following:
from pl_bolts.datamodules.imagenet_dataset import UnlabeledImagenet
path = '/path/to/folder/with/ILSVRC2012_devkit_t12.tar.gz/'
UnlabeledImagenet.generate_meta_bins(path)
"""
)
def train_dataloader(self, num_images_per_class=-1, add_normalize=False):
transforms = self._default_transforms() if self.train_transforms is None else self.train_transforms
dataset = UnlabeledImagenet(
self.data_dir,
num_imgs_per_class=num_images_per_class,
meta_dir=self.meta_dir,
split='train',
transform=transforms
)
loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
def val_dataloader(self, num_images_per_class=50, add_normalize=False):
transforms = self._default_transforms() if self.val_transforms is None else self.val_transforms
dataset = UnlabeledImagenet(
self.data_dir,
num_imgs_per_class_val_split=num_images_per_class,
meta_dir=self.meta_dir,
split='val',
transform=transforms
)
loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
def test_dataloader(self, num_images_per_class, add_normalize=False):
transforms = self._default_transforms() if self.test_transforms is None else self.test_transforms
dataset = UnlabeledImagenet(
self.data_dir,
num_imgs_per_class=num_images_per_class,
meta_dir=self.meta_dir,
split='test',
transform=transforms
)
loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
def _default_transforms(self):
mnist_transforms = transform_lib.Compose([transform_lib.ToTensor(), imagenet_normalization()])
return mnist_transforms
| [
"torch.utils.data.DataLoader"
] | 1.6 | btwardow/pytorch-lightning-bolts | 4a7b6ffe0fcbeee37f8bac6af1e926469b2052bf |
1.8 | import torch.nn as nn
import torch.nn.functional as F
import torch
def get_loss(loss_type):
if loss_type == 'focal_loss':
return FocalLoss(ignore_index=255, size_average=True)
elif loss_type == 'cross_entropy':
return nn.CrossEntropyLoss(ignore_index=255, reduction='mean')
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, size_average=True, ignore_index=255):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.ignore_index=ignore_index
self.size_average=size_average
def forward(self, inputs, targets):
ce_loss = F.cross_entropy(inputs, targets, reduction='none', ignore_index=self.ignore_index)
pt = torch.exp(-ce_loss)
focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss
if self.size_average:
return focal_loss.mean()
else:
return focal_loss.sum()
class BCEWithLogitsLossWithIgnoreIndex(nn.Module):
def __init__(self, reduction='mean', ignore_index=255):
super().__init__()
self.reduction = reduction
self.ignore_index = ignore_index
def forward(self, inputs, targets):
# inputs of size B x C x H x W
n_cl = torch.tensor(inputs.shape[1]).to(inputs.device)
labels_new = torch.where(targets != self.ignore_index, targets, n_cl)
# replace ignore with numclasses + 1 (to enable one hot and then remove it)
targets = F.one_hot(labels_new, inputs.shape[1] + 1).float().permute(0, 3, 1, 2)
targets = targets[:, :inputs.shape[1], :, :] # remove 255 from 1hot
# targets is B x C x H x W so shape[1] is C
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
# loss has shape B x C x H x W
loss = loss.sum(dim=1) # sum the contributions of the classes
if self.reduction == 'mean':
# if targets have only zeros, we skip them
return torch.masked_select(loss, targets.sum(dim=1) != 0).mean()
elif self.reduction == 'sum':
return torch.masked_select(loss, targets.sum(dim=1) != 0).sum()
else:
return loss * targets.sum(dim=1)
class IcarlLoss(nn.Module):
def __init__(self, reduction='mean', ignore_index=255, bkg=False):
super().__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.bkg = bkg
def forward(self, inputs, targets, output_old):
# inputs of size B x C x H x W
n_cl = torch.tensor(inputs.shape[1]).to(inputs.device)
labels_new = torch.where(targets != self.ignore_index, targets, n_cl)
# replace ignore with numclasses + 1 (to enable one hot and then remove it)
targets = F.one_hot(labels_new, inputs.shape[1] + 1).float().permute(0, 3, 1, 2)
targets = targets[:, :inputs.shape[1], :, :] # remove 255 from 1hot
if self.bkg:
targets[:, 1:output_old.shape[1], :, :] = output_old[:, 1:, :, :]
else:
targets[:, :output_old.shape[1], :, :] = output_old
# targets is B x C x H x W so shape[1] is C
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
# loss has shape B x C x H x W
loss = loss.sum(dim=1) # sum the contributions of the classes
if self.reduction == 'mean':
# if targets have only zeros, we skip them
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else:
return loss
class UnbiasedCrossEntropy(nn.Module):
def __init__(self, old_cl=None, reduction='mean', ignore_index=255):
super().__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.old_cl = old_cl
def forward(self, inputs, targets):
old_cl = self.old_cl
outputs = torch.zeros_like(inputs) # B, C (1+V+N), H, W
den = torch.logsumexp(inputs, dim=1) # B, H, W den of softmax
outputs[:, 0] = torch.logsumexp(inputs[:, 0:old_cl], dim=1) - den # B, H, W p(O)
outputs[:, old_cl:] = inputs[:, old_cl:] - den.unsqueeze(dim=1) # B, N, H, W p(N_i)
labels = targets.clone() # B, H, W
labels[targets < old_cl] = 0 # just to be sure that all labels old belongs to zero
loss = F.nll_loss(outputs, labels, ignore_index=self.ignore_index, reduction=self.reduction)
return loss
class UnbiasedFocalLoss(nn.Module):
def __init__(self, old_cl=None, reduction="mean", ignore_index=255, alpha=1, gamma=2):
super().__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.old_cl = old_cl
self.alpha = alpha
self.gamma = gamma
def forward(self, inputs, targets):
old_cl = self.old_cl
outputs = torch.zeros_like(inputs) # B, C (1+V+N), H, W
den = torch.logsumexp(inputs, dim=1) # B, H, W den of softmax
outputs[:, 0] = torch.logsumexp(inputs[:, 0:old_cl], dim=1) - den # B, H, W p(O)
outputs[:, old_cl:] = inputs[:, old_cl:] - den.unsqueeze(dim=1) # B, N, H, W p(N_i)
labels = targets.clone() # B, H, W
labels[targets < old_cl] = 0 # just to be sure that all labels old belongs to zero
ce = F.nll_loss(outputs, labels, ignore_index=self.ignore_index, reduction="none")
pt = torch.exp(-ce)
loss = self.alpha * (1-pt)**self.gamma * ce
return loss
class KnowledgeDistillationLoss(nn.Module):
def __init__(self, reduction='mean', alpha=1.):
super().__init__()
self.reduction = reduction
self.alpha = alpha
def forward(self, inputs, targets, mask=None):
inputs = inputs.narrow(1, 0, targets.shape[1])
outputs = torch.log_softmax(inputs, dim=1)
labels = torch.softmax(targets * self.alpha, dim=1)
loss = (outputs * labels).mean(dim=1)
if mask is not None:
loss = loss * mask.float()
if self.reduction == 'mean':
outputs = -torch.mean(loss)
elif self.reduction == 'sum':
outputs = -torch.sum(loss)
else:
outputs = -loss
return outputs
class UnbiasedKnowledgeDistillationLoss(nn.Module):
def __init__(self, reduction='mean', alpha=1.):
super().__init__()
self.reduction = reduction
self.alpha = alpha
def forward(self, inputs, targets, mask=None):
new_cl = inputs.shape[1] - targets.shape[1]
targets = targets * self.alpha
new_bkg_idx = torch.tensor([0] + [x for x in range(targets.shape[1], inputs.shape[1])]).to(inputs.device)
den = torch.logsumexp(inputs, dim=1) # B, H, W
outputs_no_bgk = inputs[:, 1:-new_cl] - den.unsqueeze(dim=1) # B, OLD_CL, H, W
outputs_bkg = torch.logsumexp(torch.index_select(inputs, index=new_bkg_idx, dim=1), dim=1) - den # B, H, W
labels = torch.softmax(targets, dim=1) # B, BKG + OLD_CL, H, W
# make the average on the classes 1/n_cl \sum{c=1..n_cl} L_c
loss = (labels[:, 0] * outputs_bkg + (labels[:, 1:] * outputs_no_bgk).sum(dim=1)) / targets.shape[1]
if mask is not None:
loss = loss * mask.float()
if self.reduction == 'mean':
outputs = -torch.mean(loss)
elif self.reduction == 'sum':
outputs = -torch.sum(loss)
else:
outputs = -loss
return outputs
| [
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.functional.one_hot",
"torch.log_softmax",
"torch.softmax",
"torch.logsumexp",
"torch.sum",
"torch.mean",
"torch.nn.functional.cross_entropy",
"torch.tensor",
"torch.zeros_like",
"torch.nn.functional.nll_loss",
"torch.exp",
"torch.nn.CrossEntropyLoss",
"torch.where",
"torch.index_select"
] | 1.8.1 | edornd/satellite-mib | a4423dc866ecfb77dc62548764917c048006dd8a |
0.4 | import os
import sys
import yaml
import time
import shutil
import torch
import random
import argparse
import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# import torchvision.models as models
# import torchvision
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from PIL import Image
# from visdom import Visdom
_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'utils')
sys.path.append(_path)
from torch.utils import data
from tqdm import tqdm
from data import create_dataset
from models import create_model
from utils.utils import get_logger
from augmentations import get_composed_augmentations
from models.adaptation_model import CustomModel, CustomMetrics
from optimizers import get_optimizer
from schedulers import get_scheduler
from metrics import runningScore, averageMeter
from loss import get_loss_function
from utils import sync_batchnorm
from tensorboardX import SummaryWriter
def test(cfg, writer, logger):
torch.manual_seed(cfg.get('seed', 1337))
torch.cuda.manual_seed(cfg.get('seed', 1337))
np.random.seed(cfg.get('seed', 1337))
random.seed(cfg.get('seed', 1337))
## create dataset
default_gpu = cfg['model']['default_gpu']
device = torch.device("cuda:{}".format(default_gpu) if torch.cuda.is_available() else 'cpu')
datasets = create_dataset(cfg, writer, logger) #source_train\ target_train\ source_valid\ target_valid + _loader
model = CustomModel(cfg, writer, logger)
running_metrics_val = runningScore(cfg['data']['target']['n_class'])
source_running_metrics_val = runningScore(cfg['data']['target']['n_class'])
val_loss_meter = averageMeter()
source_val_loss_meter = averageMeter()
time_meter = averageMeter()
loss_fn = get_loss_function(cfg)
path = cfg['test']['path']
checkpoint = torch.load(path)
model.adaptive_load_nets(model.BaseNet, checkpoint['DeepLab']['model_state'])
validation(
model, logger, writer, datasets, device, running_metrics_val, val_loss_meter, loss_fn,\
source_val_loss_meter, source_running_metrics_val, iters = model.iter
)
def validation(model, logger, writer, datasets, device, running_metrics_val, val_loss_meter, loss_fn,\
source_val_loss_meter, source_running_metrics_val, iters):
iters = iters
_k = -1
model.eval(logger=logger)
torch.cuda.empty_cache()
with torch.no_grad():
validate(
datasets.target_valid_loader, device, model, running_metrics_val,
val_loss_meter, loss_fn
)
writer.add_scalar('loss/val_loss', val_loss_meter.avg, iters+1)
logger.info("Iter %d Loss: %.4f" % (iters + 1, val_loss_meter.avg))
writer.add_scalar('loss/source_val_loss', source_val_loss_meter.avg, iters+1)
logger.info("Iter %d Source Loss: %.4f" % (iters + 1, source_val_loss_meter.avg))
score, class_iou = running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
logger.info('{}: {}'.format(k, v))
writer.add_scalar('val_metrics/{}'.format(k), v, iters+1)
for k, v in class_iou.items():
logger.info('{}: {}'.format(k, v))
writer.add_scalar('val_metrics/cls_{}'.format(k), v, iters+1)
val_loss_meter.reset()
running_metrics_val.reset()
source_val_loss_meter.reset()
source_running_metrics_val.reset()
torch.cuda.empty_cache()
return score["Mean IoU : \t"]
def validate(valid_loader, device, model, running_metrics_val, val_loss_meter, loss_fn):
for (images_val, labels_val, filename) in tqdm(valid_loader):
images_val = images_val.to(device)
labels_val = labels_val.to(device)
_, _, feat_cls, outs = model.forward(images_val)
outputs = F.interpolate(outs, size=images_val.size()[2:], mode='bilinear', align_corners=True)
val_loss = loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels_val.data.cpu().numpy()
running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="config")
parser.add_argument(
"--config",
nargs="?",
type=str,
# default="configs/pspnet_cityscapes.yml",
# default="configs/pspnet_gta5.yml",
default='configs/test_from_gta_to_city.yml',
help="Configuration file to use"
)
args = parser.parse_args()
with open(args.config) as fp:
cfg = yaml.load(fp)
run_id = random.randint(1, 100000)
# path = cfg['training']['save_path']
logdir = os.path.join('runs', os.path.basename(args.config)[:-4], str(run_id))
writer = SummaryWriter(log_dir=logdir)
print('RUNDIR: {}'.format(logdir))
shutil.copy(args.config, logdir)
logger = get_logger(logdir)
logger.info('Let the games begin')
# train(cfg, writer, logger)
test(cfg, writer, logger)
| [
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.no_grad",
"torch.load"
] | 0.4.1 | RogerZhangzz/CAG_UDA | 422f99e2e0a5cb26a40d4f17ee5832f81580f7f0 |
0.4 | import os
import abc
import sys
from copy import deepcopy
from functools import reduce
import numpy as np
import torch
from torchvision import utils as vutils
from tqdm.autonotebook import tqdm
from autokeras.constant import Constant
from autokeras.utils import get_device
class ModelTrainerBase(abc.ABC):
def __init__(self,
loss_function,
train_data,
test_data=None,
metric=None,
verbose=False):
self.device = get_device()
self.metric = metric
self.verbose = verbose
self.loss_function = loss_function
self.train_loader = train_data
self.test_loader = test_data
@abc.abstractmethod
def train_model(self,
max_iter_num=Constant.MAX_ITER_NUM,
max_no_improvement_num=Constant.MAX_NO_IMPROVEMENT_NUM):
pass
class ModelTrainer(ModelTrainerBase):
"""A class that is used to train the model.
This class can train a Pytorch model with the given data loaders.
The metric, loss_function, and model must be compatible with each other.
Please see the details in the Attributes.
Attributes:
device: A string. Indicating the device to use. 'cuda' or 'cpu'.
model: An instance of Pytorch Module. The model that will be trained.
train_loader: Training data wrapped in batches in Pytorch Dataloader.
test_loader: Testing data wrapped in batches in Pytorch Dataloader.
loss_function: A function with two parameters (prediction, target).
There is no specific requirement for the types of the parameters,
as long as they are compatible with the model and the data loaders.
The prediction should be the output of the model for a batch.
The target should be a batch of targets packed in the data loaders.
optimizer: The optimizer is chosen to use the Pytorch Adam optimizer.
early_stop: An instance of class EarlyStop.
metric: It should be a subclass of class autokeras.metric.Metric.
In the compute(prediction, target) function, prediction and targets are
all numpy arrays converted from the output of the model and the targets packed in the data loaders.
verbose: Verbosity mode.
"""
def __init__(self, model, path, **kwargs):
"""Init the ModelTrainer with `model`, `x_train`, `y_train`, `x_test`, `y_test`, `verbose`"""
super().__init__(**kwargs)
self.model = model
self.model.to(self.device)
self.optimizer = None
self.early_stop = None
self.current_epoch = 0
self.current_metric_value = 0
self.temp_model_path = os.path.join(path, 'temp_model')
def train_model(self,
max_iter_num=None,
max_no_improvement_num=None):
"""Train the model.
Args:
max_iter_num: An integer. The maximum number of epochs to train the model.
The training will stop when this number is reached.
max_no_improvement_num: An integer. The maximum number of epochs when the loss value doesn't decrease.
The training will stop when this number is reached.
"""
if max_iter_num is None:
max_iter_num = Constant.MAX_ITER_NUM
if max_no_improvement_num is None:
max_no_improvement_num = Constant.MAX_NO_IMPROVEMENT_NUM
self.early_stop = EarlyStop(max_no_improvement_num)
self.early_stop.on_train_begin()
test_metric_value_list = []
test_loss_list = []
self.optimizer = torch.optim.Adam(self.model.parameters())
for epoch in range(max_iter_num):
self._train()
test_loss, metric_value = self._test()
self.current_metric_value = metric_value
test_metric_value_list.append(metric_value)
test_loss_list.append(test_loss)
decreasing = self.early_stop.on_epoch_end(test_loss)
if self.early_stop.no_improvement_count == 0:
self._save_model()
if not decreasing:
if self.verbose:
print('\nNo loss decrease after {} epochs.\n'.format(max_no_improvement_num))
self._load_model()
break
last_num = min(max_no_improvement_num, max_iter_num)
return (sum(test_loss_list[-last_num:]) / last_num,
sum(test_metric_value_list[-last_num:]) / last_num)
def _train(self):
self.model.train()
loader = self.train_loader
self.current_epoch += 1
if self.verbose:
progress_bar = tqdm(total=len(loader),
desc='Epoch-'
+ str(self.current_epoch)
+ ', Current Metric - '
+ str(self.current_metric_value),
file=sys.stdout,
leave=False,
ncols=100,
position=0,
unit=' batch')
else:
progress_bar = None
for batch_idx, (inputs, targets) in enumerate(deepcopy(loader)):
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.loss_function(outputs, targets)
loss.backward()
self.optimizer.step()
if self.verbose:
if batch_idx % 10 == 0:
progress_bar.update(10)
if self.verbose:
progress_bar.close()
def _test(self):
self.model.eval()
test_loss = 0
all_targets = []
all_predicted = []
loader = self.test_loader
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(deepcopy(loader)):
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.model(inputs)
# cast tensor to float
test_loss += float(self.loss_function(outputs, targets))
all_predicted.append(outputs.cpu().numpy())
all_targets.append(targets.cpu().numpy())
all_predicted = reduce(lambda x, y: np.concatenate((x, y)), all_predicted)
all_targets = reduce(lambda x, y: np.concatenate((x, y)), all_targets)
return test_loss, self.metric.compute(all_predicted, all_targets)
def _save_model(self):
torch.save(self.model.state_dict(), self.temp_model_path)
def _load_model(self):
self.model.load_state_dict(torch.load(self.temp_model_path))
class GANModelTrainer(ModelTrainerBase):
def __init__(self,
g_model,
d_model,
train_data,
loss_function,
verbose,
gen_training_result=None):
"""Init the ModelTrainer with `model`, `x_train`, `y_train`, `x_test`, `y_test`, `verbose`"""
super().__init__(loss_function, train_data, verbose=verbose)
self.d_model = d_model
self.g_model = g_model
self.d_model.to(self.device)
self.g_model.to(self.device)
self.outf = None
self.out_size = 0
if gen_training_result is not None:
self.outf, self.out_size = gen_training_result
self.sample_noise = torch.randn(self.out_size,
self.g_model.nz,
1, 1, device=self.device)
self.optimizer_d = None
self.optimizer_g = None
def train_model(self,
max_iter_num=Constant.MAX_ITER_NUM,
max_no_improvement_num=Constant.MAX_NO_IMPROVEMENT_NUM):
self.optimizer_d = torch.optim.Adam(self.d_model.parameters())
self.optimizer_g = torch.optim.Adam(self.g_model.parameters())
if self.verbose:
progress_bar = tqdm(total=max_iter_num,
desc=' Model ',
file=sys.stdout,
ncols=75,
position=1,
unit=' epoch')
else:
progress_bar = None
for epoch in range(max_iter_num):
self._train(epoch)
if self.verbose:
progress_bar.update(1)
if self.verbose:
progress_bar.close()
def _train(self, epoch):
# put model into train mode
self.d_model.train()
# TODO: why?
cp_loader = deepcopy(self.train_loader)
if self.verbose:
progress_bar = tqdm(total=len(cp_loader),
desc='Current Epoch',
file=sys.stdout,
leave=False,
ncols=75,
position=0,
unit=' Batch')
else:
progress_bar = None
real_label = 1
fake_label = 0
for batch_idx, inputs in enumerate(cp_loader):
# Update Discriminator network maximize log(D(x)) + log(1 - D(G(z)))
# train with real
self.optimizer_d.zero_grad()
inputs = inputs.to(self.device)
batch_size = inputs.size(0)
outputs = self.d_model(inputs)
label = torch.full((batch_size,), real_label, device=self.device)
loss_d_real = self.loss_function(outputs, label)
loss_d_real.backward()
# train with fake
noise = torch.randn((batch_size, self.g_model.nz, 1, 1,), device=self.device)
fake_outputs = self.g_model(noise)
label.fill_(fake_label)
outputs = self.d_model(fake_outputs.detach())
loss_g_fake = self.loss_function(outputs, label)
loss_g_fake.backward()
self.optimizer_d.step()
# (2) Update G network: maximize log(D(G(z)))
self.g_model.zero_grad()
label.fill_(real_label)
outputs = self.d_model(fake_outputs)
loss_g = self.loss_function(outputs, label)
loss_g.backward()
self.optimizer_g.step()
if self.verbose:
if batch_idx % 10 == 0:
progress_bar.update(10)
if self.outf is not None and batch_idx % 100 == 0:
fake = self.g_model(self.sample_noise)
vutils.save_image(
fake.detach(),
'%s/fake_samples_epoch_%03d.png' % (self.outf, epoch),
normalize=True)
if self.verbose:
progress_bar.close()
class EarlyStop:
def __init__(self, max_no_improvement_num=Constant.MAX_NO_IMPROVEMENT_NUM, min_loss_dec=Constant.MIN_LOSS_DEC):
super().__init__()
self.training_losses = []
self.minimum_loss = None
self.no_improvement_count = 0
self._max_no_improvement_num = max_no_improvement_num
self._done = False
self._min_loss_dec = min_loss_dec
def on_train_begin(self):
self.training_losses = []
self.no_improvement_count = 0
self._done = False
self.minimum_loss = float('inf')
def on_epoch_end(self, loss):
self.training_losses.append(loss)
if self._done and loss > (self.minimum_loss - self._min_loss_dec):
return False
if loss > (self.minimum_loss - self._min_loss_dec):
self.no_improvement_count += 1
else:
self.no_improvement_count = 0
self.minimum_loss = loss
if self.no_improvement_count > self._max_no_improvement_num:
self._done = True
return True
| [
"torch.no_grad",
"torch.full",
"torch.load",
"torch.randn"
] | 0.4.1 | wpsliu123/AUTOKERAS | 172fb3cf705126e4c3d86b41292463e30ecf3c15 |
1.6 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
class MLPRepresentation(nn.Module):
"""
Deep Q network.
Choose multi-layer full connection with dropout as the basic network architecture.
"""
def __init__(self, name: str, input_dim: int, hidden_dims: [int], output_dim: int, dropout_p: float):
"""
Init deep Q network.
Args:
name (str): Network name.
input_dim (int): Network input dimension.
hidden_dims ([int]): Network hiddenlayer dimension. The length of `hidden_dims` means the
hidden layer number, which requires larger than 1.
output_dim (int): Network output dimension.
dropout_p (float): Dropout parameter.
"""
super().__init__()
self._name = name
self._dropout_p = dropout_p
self._input_dim = input_dim
self._hidden_dims = hidden_dims if hidden_dims is not None else []
self._output_dim = output_dim
self._layers = self._build_layers([input_dim] + self._hidden_dims)
if len(self._hidden_dims) == 0:
self._head = nn.Linear(self._input_dim, self._output_dim)
else:
self._head = nn.Linear(hidden_dims[-1], self._output_dim)
self._device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self._net = nn.Sequential(*self._layers, self._head).to(self._device)
def forward(self, x):
return self._net(x.to(self._device)).double()
@property
def input_dim(self):
return self._input_dim
@property
def name(self):
return self._name
@property
def output_dim(self):
return self._output_dim
def _build_basic_layer(self, input_dim, output_dim):
"""
Build basic layer.
BN -> Linear -> LeakyReLU -> Dropout
"""
return nn.Sequential(nn.Linear(input_dim, output_dim),
nn.LeakyReLU(),
nn.Dropout(p=self._dropout_p))
def _build_layers(self, layer_dims: []):
"""
Build multi basic layer.
BasicLayer1 -> BasicLayer2 -> ...
"""
layers = []
for input_dim, output_dim in zip(layer_dims, layer_dims[1:]):
layers.append(self._build_basic_layer(input_dim, output_dim))
return layers
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.LeakyReLU",
"torch.cuda.is_available"
] | 1.6.0 | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef |
1.4 | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
import logging
import math
import os
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.functional import gelu
from transformers.configuration_gpt2 import GPT2Config
from transformers.file_utils import add_start_docstrings
from transformers.modeling_utils import Conv1D, PreTrainedModel, SequenceSummary, prune_conv1d_layer
logger = logging.getLogger(__name__)
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {
"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin",
"gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-pytorch_model.bin",
"gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-pytorch_model.bin",
"distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-pytorch_model.bin",
}
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super().__init__()
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns - nd : ns, :ns]
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
nx = config.n_embd
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
output_attn = self.attn(
self.ln_1(x), layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask
)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class GPT2PreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = GPT2Config
pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
GPT2_START_DOCSTRING = r""" OpenAI GPT-2 model was proposed in
`Language Models are Unsupervised Multitask Learners`_
by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
corpus of ~40 GB of text data.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`Language Models are Unsupervised Multitask Learners`:
https://openai.com/blog/better-language-models/
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
GPT2_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.GPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**past**:
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:
Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING,
GPT2_INPUTS_DOCSTRING,
)
class GPT2Model(GPT2PreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super().__init__(config)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.output_past = config.output_past
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
attention_mask = attention_mask.view(-1, input_shape[-1])
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(
hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i]
)
hidden_states, present = outputs[:2]
if self.output_past:
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_past:
outputs = outputs + (presents,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (presents), (all hidden_states), (attentions)
@add_start_docstrings(
"""The GPT2 Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
GPT2_START_DOCSTRING,
GPT2_INPUTS_DOCSTRING,
)
class GPT2LMHeadModel(GPT2PreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super().__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def prepare_inputs_for_generation(self, input_ids, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if "past" in kwargs and kwargs["past"]:
input_ids = input_ids[:, -1].unsqueeze(-1)
inputs = {"input_ids": input_ids}
inputs.update(kwargs)
return inputs
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
transformer_outputs = self.transformer(
input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
# print('shift_logits {}'.format(shift_logits))
shift_labels = labels[..., 1:].contiguous()
# print('shift_labels {}'.format(shift_labels))
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
@add_start_docstrings(
"""The GPT2 Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
GPT2_START_DOCSTRING,
GPT2_INPUTS_DOCSTRING,
)
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
r"""
**mc_token_ids**: (`optional`, default to index of the last token of the input) ``torch.LongTensor`` of shape ``(batch_size, num_choices)``:
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
**lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
**mc_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**mc_loss**: (`optional`, returned when ``multiple_choice_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Multiple choice classification loss.
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTe로nsor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import torch
from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
# Add a [CLS] to the vocabulary (we should train it also!)
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
encoded_choices = [tokenizer.encode(s) for s in choices]
cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) #(n_embd, vocab_size)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
lm_labels=None,
mc_labels=None,
):
transformer_outputs = self.transformer(
input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states) # Language Model Head
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) # Multiple Classification Head
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
outputs = (loss,) + outputs
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.LayerNorm",
"torch.nn.Softmax",
"torch.arange",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.from_numpy",
"torch.matmul",
"torch.nn.Embedding"
] | 1.4.0 | kimhyoil/KoGPT2_Ai_Eassay | da7d160f6815dc8ec3dfd635495978409c2a897c |
1.2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Utilities file
This file contains utility functions for bookkeeping, logging, and data loading.
Methods which directly affect training should either go in layers, the model,
or train_fns.py.
'''
from __future__ import print_function
import sys
import os
import numpy as np
import time
import datetime
import json
import pickle
from argparse import ArgumentParser
import animal_hash
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import datasets as dset
def prepare_parser():
usage = 'Parser for all scripts.'
parser = ArgumentParser(description=usage)
### Dataset/Dataloader stuff ###
parser.add_argument(
'--dataset', type=str, default='I128_hdf5',
help='Which Dataset to train on, out of I128, I256, C10, C100;'
'Append "_hdf5" to use the hdf5 version for ISLVRC '
'(default: %(default)s)')
parser.add_argument(
'--augment', action='store_true', default=False,
help='Augment with random crops and flips (default: %(default)s)')
parser.add_argument(
'--num_workers', type=int, default=8,
help='Number of dataloader workers; consider using less for HDF5 '
'(default: %(default)s)')
parser.add_argument(
'--no_pin_memory', action='store_false', dest='pin_memory', default=True,
help='Pin data into memory through dataloader? (default: %(default)s)')
parser.add_argument(
'--shuffle', action='store_true', default=True,#TODOFalse,
help='Shuffle the data (strongly recommended)? (default: %(default)s)')
parser.add_argument(
'--load_in_mem', action='store_true', default=False,
help='Load all data into memory? (default: %(default)s)')
parser.add_argument(
'--use_multiepoch_sampler', action='store_true', default=True,#TODOFalse,
help='Use the multi-epoch sampler for dataloader? (default: %(default)s)')
### Model stuff ###
parser.add_argument(
'--model', type=str, default='BigGAN',
help='Name of the model module (default: %(default)s)')
parser.add_argument(
'--G_param', type=str, default='SN',
help='Parameterization style to use for G, spectral norm (SN) or SVD (SVD)'
' or None (default: %(default)s)')
parser.add_argument(
'--D_param', type=str, default='SN',
help='Parameterization style to use for D, spectral norm (SN) or SVD (SVD)'
' or None (default: %(default)s)')
parser.add_argument(
'--G_ch', type=int, default=96, #TODO 64,
help='Channel multiplier for G (default: %(default)s)')
parser.add_argument(
'--D_ch', type=int, default=96, #TODO64,
help='Channel multiplier for D (default: %(default)s)')
parser.add_argument(
'--G_depth', type=int, default=1,
help='Number of resblocks per stage in G? (default: %(default)s)')
parser.add_argument(
'--D_depth', type=int, default=1,
help='Number of resblocks per stage in D? (default: %(default)s)')
parser.add_argument(
'--D_thin', action='store_false', dest='D_wide', default=True,
help='Use the SN-GAN channel pattern for D? (default: %(default)s)')
parser.add_argument(
'--G_shared', action='store_true', default=True,#TODOFalse,
help='Use shared embeddings in G? (default: %(default)s)')
parser.add_argument(
'--shared_dim', type=int, default=128,#TODO0,
help='G''s shared embedding dimensionality; if 0, will be equal to dim_z. '
'(default: %(default)s)')
parser.add_argument(
'--dim_z', type=int, default=120,#TODO128,
help='Noise dimensionality: %(default)s)')
parser.add_argument(
'--z_var', type=float, default=1.0,
help='Noise variance: %(default)s)')
parser.add_argument(
'--hier', action='store_true', default=True,#TODOFalse,
help='Use hierarchical z in G? (default: %(default)s)')
parser.add_argument(
'--cross_replica', action='store_true', default=False,
help='Cross_replica batchnorm in G?(default: %(default)s)')
parser.add_argument(
'--mybn', action='store_true', default=False,
help='Use my batchnorm (which supports standing stats?) %(default)s)')
parser.add_argument(
'--G_nl', type=str, default='inplace_relu',#TODO'relu',
help='Activation function for G (default: %(default)s)')
parser.add_argument(
'--D_nl', type=str, default='inplace_relu',#TODO'relu',
help='Activation function for D (default: %(default)s)')
parser.add_argument(
'--G_attn', type=str, default='64',
help='What resolutions to use attention on for G (underscore separated) '
'(default: %(default)s)')
parser.add_argument(
'--D_attn', type=str, default='64',
help='What resolutions to use attention on for D (underscore separated) '
'(default: %(default)s)')
parser.add_argument(
'--norm_style', type=str, default='bn',
help='Normalizer style for G, one of bn [batchnorm], in [instancenorm], '
'ln [layernorm], gn [groupnorm] (default: %(default)s)')
### Model init stuff ###
parser.add_argument(
'--seed', type=int, default=0,
help='Random seed to use; affects both initialization and '
' dataloading. (default: %(default)s)')
parser.add_argument(
'--G_init', type=str, default='ortho',
help='Init style to use for G (default: %(default)s)')
parser.add_argument(
'--D_init', type=str, default='ortho',
help='Init style to use for D(default: %(default)s)')
parser.add_argument(
'--skip_init', action='store_true', default=True,#TODOFalse,
help='Skip initialization, ideal for testing when ortho init was used '
'(default: %(default)s)')
### Optimizer stuff ###
parser.add_argument(
'--G_lr', type=float, default=1e-4,#TODO5e-5,
help='Learning rate to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_lr', type=float, default=4e-4,#TODO2e-4,
help='Learning rate to use for Discriminator (default: %(default)s)')
parser.add_argument(
'--G_B1', type=float, default=0.0,
help='Beta1 to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_B1', type=float, default=0.0,
help='Beta1 to use for Discriminator (default: %(default)s)')
parser.add_argument(
'--G_B2', type=float, default=0.999,
help='Beta2 to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_B2', type=float, default=0.999,
help='Beta2 to use for Discriminator (default: %(default)s)')
### Batch size, parallel, and precision stuff ###
parser.add_argument(
'--batch_size', type=int, default=256,#TODO64,
help='Default overall batchsize (default: %(default)s)')
parser.add_argument(
'--G_batch_size', type=int, default=512,#TODO0,
help='Batch size to use for G; if 0, same as D (default: %(default)s)')
parser.add_argument(
'--num_G_accumulations', type=int, default=8,#TODO1,
help='Number of passes to accumulate G''s gradients over '
'(default: %(default)s)')
parser.add_argument(
'--num_D_steps', type=int, default=1,#TODO2,
help='Number of D steps per G step (default: %(default)s)')
parser.add_argument(
'--num_D_accumulations', type=int, default=8,#TODO1,
help='Number of passes to accumulate D''s gradients over '
'(default: %(default)s)')
parser.add_argument(
'--split_D', action='store_true', default=False,
help='Run D twice rather than concatenating inputs? (default: %(default)s)')
parser.add_argument(
'--num_epochs', type=int, default=100,
help='Number of epochs to train for (default: %(default)s)')
parser.add_argument(
'--parallel', action='store_true', default=True, #TODOFalse,
help='Train with multiple GPUs (default: %(default)s)')
parser.add_argument(
'--G_fp16', action='store_true', default=False,
help='Train with half-precision in G? (default: %(default)s)')
parser.add_argument(
'--D_fp16', action='store_true', default=False,
help='Train with half-precision in D? (default: %(default)s)')
parser.add_argument(
'--D_mixed_precision', action='store_true', default=False,
help='Train with half-precision activations but fp32 params in D? '
'(default: %(default)s)')
parser.add_argument(
'--G_mixed_precision', action='store_true', default=False,
help='Train with half-precision activations but fp32 params in G? '
'(default: %(default)s)')
parser.add_argument(
'--accumulate_stats', action='store_true', default=False,
help='Accumulate "standing" batchnorm stats? (default: %(default)s)')
parser.add_argument(
'--num_standing_accumulations', type=int, default=16,
help='Number of forward passes to use in accumulating standing stats? '
'(default: %(default)s)')
### Bookkeping stuff ###
parser.add_argument(
'--G_eval_mode', action='store_true', default=True,#TODOFalse,
help='Run G in eval mode (running/standing stats?) at sample/test time? '
'(default: %(default)s)')
parser.add_argument(
'--save_every', type=int, default=1000,#TODO2000,
help='Save every X iterations (default: %(default)s)')
parser.add_argument(
'--num_save_copies', type=int, default=2,
help='How many copies to save (default: %(default)s)')
parser.add_argument(
'--num_best_copies', type=int, default=5,#TODO2,
help='How many previous best checkpoints to save (default: %(default)s)')
parser.add_argument(
'--which_best', type=str, default='IS',
help='Which metric to use to determine when to save new "best"'
'checkpoints, one of IS or FID (default: %(default)s)')
parser.add_argument(
'--no_fid', action='store_true', default=False,
help='Calculate IS only, not FID? (default: %(default)s)')
parser.add_argument(
'--test_every', type=int, default=2000,#TODO5000,
help='Test every X iterations (default: %(default)s)')
parser.add_argument(
'--num_inception_images', type=int, default=50000,
help='Number of samples to compute inception metrics with '
'(default: %(default)s)')
parser.add_argument(
'--hashname', action='store_true', default=False,
help='Use a hash of the experiment name instead of the full config '
'(default: %(default)s)')
parser.add_argument(
'--base_root', type=str, default='',
help='Default location to store all weights, samples, data, and logs '
' (default: %(default)s)')
parser.add_argument(
'--data_root', type=str, default='data',
help='Default location where data is stored (default: %(default)s)')
parser.add_argument(
'--weights_root', type=str, default='weights',
help='Default location to store weights (default: %(default)s)')
parser.add_argument(
'--logs_root', type=str, default='logs',
help='Default location to store logs (default: %(default)s)')
parser.add_argument(
'--samples_root', type=str, default='samples',
help='Default location to store samples (default: %(default)s)')
parser.add_argument(
'--pbar', type=str, default='mine',
help='Type of progressbar to use; one of "mine" or "tqdm" '
'(default: %(default)s)')
parser.add_argument(
'--name_suffix', type=str, default='',
help='Suffix for experiment name for loading weights for sampling '
'(consider "best0") (default: %(default)s)')
parser.add_argument(
'--experiment_name', type=str, default='',
help='Optionally override the automatic experiment naming with this arg. '
'(default: %(default)s)')
parser.add_argument(
'--config_from_name', action='store_true', default=False,
help='Use a hash of the experiment name instead of the full config '
'(default: %(default)s)')
### EMA Stuff ###
parser.add_argument(
'--ema', action='store_true', default=True,#TODOFalse,
help='Keep an ema of G''s weights? (default: %(default)s)')
parser.add_argument(
'--ema_decay', type=float, default=0.9999,
help='EMA decay rate (default: %(default)s)')
parser.add_argument(
'--use_ema', action='store_true', default=True,#TODOFalse,
help='Use the EMA parameters of G for evaluation? (default: %(default)s)')
parser.add_argument(
'--ema_start', type=int, default=20000,#TODO0,
help='When to start updating the EMA weights (default: %(default)s)')
### Numerical precision and SV stuff ###
parser.add_argument(
'--adam_eps', type=float, default=1e-6,#TODO1e-8,
help='epsilon value to use for Adam (default: %(default)s)')
parser.add_argument(
'--BN_eps', type=float, default=1e-5,
help='epsilon value to use for BatchNorm (default: %(default)s)')
parser.add_argument(
'--SN_eps', type=float, default=1e-6,#TODO1e-8,
help='epsilon value to use for Spectral Norm(default: %(default)s)')
parser.add_argument(
'--num_G_SVs', type=int, default=1,
help='Number of SVs to track in G (default: %(default)s)')
parser.add_argument(
'--num_D_SVs', type=int, default=1,
help='Number of SVs to track in D (default: %(default)s)')
parser.add_argument(
'--num_G_SV_itrs', type=int, default=1,
help='Number of SV itrs in G (default: %(default)s)')
parser.add_argument(
'--num_D_SV_itrs', type=int, default=1,
help='Number of SV itrs in D (default: %(default)s)')
### Ortho reg stuff ###
parser.add_argument(
'--G_ortho', type=float, default=0.0, # 1e-4 is default for BigGAN
help='Modified ortho reg coefficient in G(default: %(default)s)')
parser.add_argument(
'--D_ortho', type=float, default=0.0,
help='Modified ortho reg coefficient in D (default: %(default)s)')
parser.add_argument(
'--toggle_grads', action='store_true', default=True,
help='Toggle D and G''s "requires_grad" settings when not training them? '
' (default: %(default)s)')
### Which train function ###
parser.add_argument(
'--which_train_fn', type=str, default='GAN',
help='How2trainyourbois (default: %(default)s)')
### Resume training stuff
parser.add_argument(
'--load_weights', type=str, default='',
help='Suffix for which weights to load (e.g. best0, copy0) '
'(default: %(default)s)')
parser.add_argument(
'--resume', action='store_true', default=False,
help='Resume training? (default: %(default)s)')
### Log stuff ###
parser.add_argument(
'--logstyle', type=str, default='%3.3e',
help='What style to use when logging training metrics?'
'One of: %#.#f/ %#.#e (float/exp, text),'
'pickle (python pickle),'
'npz (numpy zip),'
'mat (MATLAB .mat file) (default: %(default)s)')
parser.add_argument(
'--log_G_spectra', action='store_true', default=False,
help='Log the top 3 singular values in each SN layer in G? '
'(default: %(default)s)')
parser.add_argument(
'--log_D_spectra', action='store_true', default=False,
help='Log the top 3 singular values in each SN layer in D? '
'(default: %(default)s)')
parser.add_argument(
'--sv_log_interval', type=int, default=10,
help='Iteration interval for logging singular values '
' (default: %(default)s)')
return parser
# Arguments for sample.py; not presently used in train.py
def add_sample_parser(parser):
parser.add_argument(
'--sample_npz', action='store_true', default=True,#TODOFalse,
help='Sample "sample_num_npz" images and save to npz? '
'(default: %(default)s)')
parser.add_argument(
'--sample_num_npz', type=int, default=50000,
help='Number of images to sample when sampling NPZs '
'(default: %(default)s)')
parser.add_argument(
'--sample_sheets', action='store_true', default=True,#TODOFalse,
help='Produce class-conditional sample sheets and stick them in '
'the samples root? (default: %(default)s)')
parser.add_argument(
'--sample_interps', action='store_true', default=True,#TODOFalse,
help='Produce interpolation sheets and stick them in '
'the samples root? (default: %(default)s)')
parser.add_argument(
'--sample_sheet_folder_num', type=int, default=-1,
help='Number to use for the folder for these sample sheets '
'(default: %(default)s)')
parser.add_argument(
'--sample_random', action='store_true', default=True,#TODOFalse,
help='Produce a single random sheet? (default: %(default)s)')
parser.add_argument(
'--sample_trunc_curves', type=str, default='0.05_0.05_1.0',#TODO'',
help='Get inception metrics with a range of variances?'
'To use this, specify a startpoint, step, and endpoint, e.g. '
'--sample_trunc_curves 0.2_0.1_1.0 for a startpoint of 0.2, '
'endpoint of 1.0, and stepsize of 1.0. Note that this is '
'not exactly identical to using tf.truncated_normal, but should '
'have approximately the same effect. (default: %(default)s)')
parser.add_argument(
'--sample_inception_metrics', action='store_true', default=True,#TODOFalse,
help='Calculate Inception metrics with sample.py? (default: %(default)s)')
return parser
def make_sure_dir(file_or_path, str_type='dir'):
if str_type == 'file':
file_or_path = os.path.dirname(os.path.abspath(file_or_path))
if not os.path.exists(file_or_path):
os.makedirs(file_or_path)
def save_config_to_json(config, filename):
'''
Save the dictyionary config to a json file in the fiven path
Args:
config: dict, to be saved
filename: str, the path to save
'''
assert filename.endswith('.json'), 'the filename for the saving of config should be end with .josn'
make_sure_dir(filename, str_type='file')
config_ = {}
for k in config:
if isinstance(config[k], (str, list, int, float, bool)):
config_[k] = config[k]
else:
config_[k] = str(config[k])
with open(filename, 'w') as f:
# f.write(json.dumps(config_))
json.dump(config_, f, indent=4)
print('Config file saved to {}'.format(filename))
# Convenience dicts
dset_dict = {'I32': dset.ImageFolder, 'I64': dset.ImageFolder,
'I128': dset.ImageFolder, 'I256': dset.ImageFolder,
'I32_hdf5': dset.ILSVRC_HDF5, 'I64_hdf5': dset.ILSVRC_HDF5,
'I128_hdf5': dset.ILSVRC_HDF5, 'I256_hdf5': dset.ILSVRC_HDF5,
'C10': dset.CIFAR10, 'C100': dset.CIFAR100}
imsize_dict = {'I32': 32, 'I32_hdf5': 32,
'I64': 64, 'I64_hdf5': 64,
'I128': 128, 'I128_hdf5': 128,
'I256': 256, 'I256_hdf5': 256,
'C10': 32, 'C100': 32}
root_dict = {'I32': 'ImageNet', 'I32_hdf5': 'ILSVRC32.hdf5',
'I64': 'ImageNet', 'I64_hdf5': 'ILSVRC64.hdf5',
'I128': 'ImageNet', 'I128_hdf5': 'ILSVRC128.hdf5',
'I256': 'ImageNet', 'I256_hdf5': 'ILSVRC256.hdf5',
'C10': 'cifar', 'C100': 'cifar'}
nclass_dict = {'I32': 1000, 'I32_hdf5': 1000,
'I64': 1000, 'I64_hdf5': 1000,
'I128': 1000, 'I128_hdf5': 1000,
'I256': 1000, 'I256_hdf5': 1000,
'C10': 10, 'C100': 100}
# Number of classes to put per sample sheet
classes_per_sheet_dict = {'I32': 50, 'I32_hdf5': 50,
'I64': 50, 'I64_hdf5': 50,
'I128': 20, 'I128_hdf5': 20,
'I256': 20, 'I256_hdf5': 20,
'C10': 10, 'C100': 100}
activation_dict = {'inplace_relu': nn.ReLU(inplace=True),
'relu': nn.ReLU(inplace=False),
'ir': nn.ReLU(inplace=True),}
class CenterCropLongEdge(object):
"""Crops the given PIL Image on the long edge.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
return transforms.functional.center_crop(img, min(img.size))
def __repr__(self):
return self.__class__.__name__
class RandomCropLongEdge(object):
"""Crops the given PIL Image on the long edge with a random start point.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
size = (min(img.size), min(img.size))
# Only step forward along this edge if it's the long edge
i = (0 if size[0] == img.size[0]
else np.random.randint(low=0,high=img.size[0] - size[0]))
j = (0 if size[1] == img.size[1]
else np.random.randint(low=0,high=img.size[1] - size[1]))
return transforms.functional.crop(img, i, j, size[0], size[1])
def __repr__(self):
return self.__class__.__name__
# multi-epoch Dataset sampler to avoid memory leakage and enable resumption of
# training from the same sample regardless of if we stop mid-epoch
class MultiEpochSampler(torch.utils.data.Sampler):
r"""Samples elements randomly over multiple epochs
Arguments:
data_source (Dataset): dataset to sample from
num_epochs (int) : Number of times to loop over the dataset
start_itr (int) : which iteration to begin from
"""
def __init__(self, data_source, num_epochs, start_itr=0, batch_size=128):
self.data_source = data_source
self.num_samples = len(self.data_source)
self.num_epochs = num_epochs
self.start_itr = start_itr
self.batch_size = batch_size
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integeral "
"value, but got num_samples={}".format(self.num_samples))
def __iter__(self):
n = len(self.data_source)
# Determine number of epochs
num_epochs = int(np.ceil((n * self.num_epochs
- (self.start_itr * self.batch_size)) / float(n)))
# Sample all the indices, and then grab the last num_epochs index sets;
# This ensures if we're starting at epoch 4, we're still grabbing epoch 4's
# indices
out = [torch.randperm(n) for epoch in range(self.num_epochs)][-num_epochs:]
# Ignore the first start_itr % n indices of the first epoch
out[0] = out[0][(self.start_itr * self.batch_size % n):]
# if self.replacement:
# return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
# return iter(.tolist())
output = torch.cat(out).tolist()
print('Length dataset output is %d' % len(output))
return iter(output)
def __len__(self):
return len(self.data_source) * self.num_epochs - self.start_itr * self.batch_size
# Convenience function to centralize all data loaders
def get_data_loaders(dataset, data_root=None, augment=False, batch_size=64,
num_workers=8, shuffle=True, load_in_mem=False, hdf5=False,
pin_memory=True, drop_last=True, start_itr=0,
num_epochs=500, use_multiepoch_sampler=False,
**kwargs):
# Append /FILENAME.hdf5 to root if using hdf5
data_root += '/%s' % root_dict[dataset]
print('Using dataset root location %s' % data_root)
which_dataset = dset_dict[dataset]
norm_mean = [0.5,0.5,0.5]
norm_std = [0.5,0.5,0.5]
image_size = imsize_dict[dataset]
# For image folder datasets, name of the file where we store the precomputed
# image locations to avoid having to walk the dirs every time we load.
dataset_kwargs = {'index_filename': '%s_imgs.npz' % dataset}
# HDF5 datasets have their own inbuilt transform, no need to train_transform
if 'hdf5' in dataset:
train_transform = None
else:
if augment:
print('Data will be augmented...')
if dataset in ['C10', 'C100']:
train_transform = [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()]
else:
train_transform = [RandomCropLongEdge(),
transforms.Resize(image_size),
transforms.RandomHorizontalFlip()]
else:
print('Data will not be augmented...')
if dataset in ['C10', 'C100']:
train_transform = []
else:
train_transform = [CenterCropLongEdge(), transforms.Resize(image_size)]
# train_transform = [transforms.Resize(image_size), transforms.CenterCrop]
train_transform = transforms.Compose(train_transform + [
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)])
train_set = which_dataset(root=data_root, transform=train_transform,
load_in_mem=load_in_mem, **dataset_kwargs)
# Prepare loader; the loaders list is for forward compatibility with
# using validation / test splits.
loaders = []
if use_multiepoch_sampler:
print('Using multiepoch sampler from start_itr %d...' % start_itr)
loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory}
sampler = MultiEpochSampler(train_set, num_epochs, start_itr, batch_size)
train_loader = DataLoader(train_set, batch_size=batch_size,
sampler=sampler, **loader_kwargs)
else:
loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory,
'drop_last': drop_last} # Default, drop last incomplete batch
train_loader = DataLoader(train_set, batch_size=batch_size,
shuffle=shuffle, **loader_kwargs)
loaders.append(train_loader)
return loaders
# Utility file to seed rngs
def seed_rng(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
# Utility to peg all roots to a base root
# If a base root folder is provided, peg all other root folders to it.
def update_config_roots(config):
if config['base_root']:
print('Pegging all root folders to base root %s' % config['base_root'])
for key in ['data', 'weights', 'logs', 'samples']:
config['%s_root' % key] = '%s/%s' % (config['base_root'], key)
return config
# Utility to prepare root folders if they don't exist; parent folder must exist
def prepare_root(config):
for key in ['weights_root', 'logs_root', 'samples_root']:
if not os.path.exists(config[key]):
print('Making directory %s for %s...' % (config[key], key))
os.mkdir(config[key])
# Simple wrapper that applies EMA to a model. COuld be better done in 1.0 using
# the parameters() and buffers() module functions, but for now this works
# with state_dicts using .copy_
class ema(object):
def __init__(self, source, target, decay=0.9999, start_itr=0):
self.source = source
self.target = target
self.decay = decay
# Optional parameter indicating what iteration to start the decay at
self.start_itr = start_itr
# Initialize target's params to be source's
self.source_dict = self.source.state_dict()
self.target_dict = self.target.state_dict()
print('Initializing EMA parameters to be source parameters...')
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.source_dict[key].data)
# target_dict[key].data = source_dict[key].data # Doesn't work!
def update(self, itr=None):
# If an iteration counter is provided and itr is less than the start itr,
# peg the ema weights to the underlying weights.
if itr and itr < self.start_itr:
decay = 0.0
else:
decay = self.decay
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.target_dict[key].data * decay
+ self.source_dict[key].data * (1 - decay))
# Apply modified ortho reg to a model
# This function is an optimized version that directly computes the gradient,
# instead of computing and then differentiating the loss.
def ortho(model, strength=1e-4, blacklist=[]):
with torch.no_grad():
for param in model.parameters():
# Only apply this to parameters with at least 2 axes, and not in the blacklist
if len(param.shape) < 2 or any([param is item for item in blacklist]):
continue
w = param.view(param.shape[0], -1)
grad = (2 * torch.mm(torch.mm(w, w.t())
* (1. - torch.eye(w.shape[0], device=w.device)), w))
param.grad.data += strength * grad.view(param.shape)
# Default ortho reg
# This function is an optimized version that directly computes the gradient,
# instead of computing and then differentiating the loss.
def default_ortho(model, strength=1e-4, blacklist=[]):
with torch.no_grad():
for param in model.parameters():
# Only apply this to parameters with at least 2 axes & not in blacklist
if len(param.shape) < 2 or param in blacklist:
continue
w = param.view(param.shape[0], -1)
grad = (2 * torch.mm(torch.mm(w, w.t())
- torch.eye(w.shape[0], device=w.device), w))
param.grad.data += strength * grad.view(param.shape)
# Convenience utility to switch off requires_grad
def toggle_grad(model, on_or_off):
for param in model.parameters():
param.requires_grad = on_or_off
# Function to join strings or ignore them
# Base string is the string to link "strings," while strings
# is a list of strings or Nones.
def join_strings(base_string, strings):
return base_string.join([item for item in strings if item])
# Save a model's weights, optimizer, and the state_dict
def save_weights(G, D, state_dict, weights_root, experiment_name,
name_suffix=None, G_ema=None):
root = '/'.join([weights_root, experiment_name])
if not os.path.exists(root):
os.mkdir(root)
if name_suffix:
print('Saving weights to %s/%s...' % (root, name_suffix))
else:
print('Saving weights to %s...' % root)
torch.save(G.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G', name_suffix])))
torch.save(G.optim.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix])))
torch.save(D.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['D', name_suffix])))
torch.save(D.optim.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix])))
torch.save(state_dict,
'%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix])))
if G_ema is not None:
torch.save(G_ema.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix])))
def load_state_dict(model, state_dict, strict=True):
keys_p = list(state_dict.keys())
keys_r = list(model.state_dict().keys())
keys_r_miss = [k for k in keys_r if k not in keys_p]
keys_p_miss = [k for k in keys_p if k not in keys_r]
model.load_state_dict(state_dict, strict=strict)
if len(keys_r_miss) > 0:
print("No param in provided state dict: {}".format(str(keys_r_miss)))
if len(keys_p_miss) > 0:
print("No param in the model: {}".format(str(keys_p_miss)))
# Load a model's weights, optimizer, and the state_dict
def load_weights(G, D, state_dict, weights_root, experiment_name,
name_suffix=None, G_ema=None, strict=True, load_optim=True):
root = '/'.join([weights_root, experiment_name])
if name_suffix:
print('Loading %s weights from %s...' % (name_suffix, root))
else:
print('Loading weights from %s...' % root)
if G is not None:
G.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G', name_suffix]))),
strict=strict)
if load_optim:
G.optim.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix]))))
if D is not None:
D.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['D', name_suffix]))),
strict=strict)
if load_optim:
D.optim.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix]))))
# Load state dict
for item in state_dict:
state_dict[item] = torch.load('%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix])))[item]
if G_ema is not None:
G_ema.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix]))),
strict=strict)
''' MetricsLogger originally stolen from VoxNet source code.
Used for logging inception metrics'''
class MetricsLogger(object):
def __init__(self, fname, reinitialize=False):
self.fname = fname
self.reinitialize = reinitialize
if os.path.exists(self.fname):
if self.reinitialize:
print('{} exists, deleting...'.format(self.fname))
os.remove(self.fname)
def log(self, record=None, **kwargs):
"""
Assumption: no newlines in the input.
"""
if record is None:
record = {}
record.update(kwargs)
record['_stamp'] = time.time()
with open(self.fname, 'a') as f:
f.write(json.dumps(record, ensure_ascii=True) + '\n')
# Logstyle is either:
# '%#.#f' for floating point representation in text
# '%#.#e' for exponent representation in text
# 'npz' for output to npz # NOT YET SUPPORTED
# 'pickle' for output to a python pickle # NOT YET SUPPORTED
# 'mat' for output to a MATLAB .mat file # NOT YET SUPPORTED
class MyLogger(object):
def __init__(self, fname, reinitialize=False, logstyle='%3.3f'):
self.root = fname
if not os.path.exists(self.root):
os.mkdir(self.root)
self.reinitialize = reinitialize
self.metrics = []
self.logstyle = logstyle # One of '%3.3f' or like '%3.3e'
# Delete log if re-starting and log already exists
def reinit(self, item):
if os.path.exists('%s/%s.log' % (self.root, item)):
if self.reinitialize:
# Only print the removal mess
if 'sv' in item :
if not any('sv' in item for item in self.metrics):
print('Deleting singular value logs...')
else:
print('{} exists, deleting...'.format('%s_%s.log' % (self.root, item)))
os.remove('%s/%s.log' % (self.root, item))
# Log in plaintext; this is designed for being read in MATLAB(sorry not sorry)
def log(self, itr, **kwargs):
for arg in kwargs:
if arg not in self.metrics:
if self.reinitialize:
self.reinit(arg)
self.metrics += [arg]
if self.logstyle == 'pickle':
print('Pickle not currently supported...')
# with open('%s/%s.log' % (self.root, arg), 'a') as f:
# pickle.dump(kwargs[arg], f)
elif self.logstyle == 'mat':
print('.mat logstyle not currently supported...')
else:
with open('%s/%s.log' % (self.root, arg), 'a') as f:
f.write('%d: %s\n' % (itr, self.logstyle % kwargs[arg]))
# Write some metadata to the logs directory
def write_metadata(logs_root, experiment_name, config, state_dict):
with open(('%s/%s/metalog.txt' %
(logs_root, experiment_name)), 'w') as writefile:
writefile.write('datetime: %s\n' % str(datetime.datetime.now()))
writefile.write('config: %s\n' % str(config))
writefile.write('state: %s\n' %str(state_dict))
"""
Very basic progress indicator to wrap an iterable in.
Author: Jan Schlüter
Andy's adds: time elapsed in addition to ETA, makes it possible to add
estimated time to 1k iters instead of estimated time to completion.
"""
def progress(items, desc='', total=None, min_delay=0.1, displaytype='s1k'):
"""
Returns a generator over `items`, printing the number and percentage of
items processed and the estimated remaining processing time before yielding
the next item. `total` gives the total number of items (required if `items`
has no length), and `min_delay` gives the minimum time in seconds between
subsequent prints. `desc` gives an optional prefix text (end with a space).
"""
total = total or len(items)
t_start = time.time()
t_last = 0
for n, item in enumerate(items):
t_now = time.time()
if t_now - t_last > min_delay:
print("\r%s%d/%d (%6.2f%%)" % (
desc, n+1, total, n / float(total) * 100), end=" ")
if n > 0:
if displaytype == 's1k': # minutes/seconds for 1000 iters
next_1000 = n + (1000 - n%1000)
t_done = t_now - t_start
t_1k = t_done / n * next_1000
outlist = list(divmod(t_done, 60)) + list(divmod(t_1k - t_done, 60))
print("(TE/ET1k: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
else:# displaytype == 'eta':
t_done = t_now - t_start
t_total = t_done / n * total
outlist = list(divmod(t_done, 60)) + list(divmod(t_total - t_done, 60))
print("(TE/ETA: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
sys.stdout.flush()
t_last = t_now
yield item
t_total = time.time() - t_start
print("\r%s%d/%d (100.00%%) (took %d:%02d)" % ((desc, total, total) +
divmod(t_total, 60)))
# Sample function for use with inception metrics
def sample(G, z_, y_, config):
with torch.no_grad():
z_.sample_()
y_.sample_()
if config['parallel']:
G_z = nn.parallel.data_parallel(G, (z_, G.shared(y_)))
else:
G_z = G(z_, G.shared(y_))
return G_z, y_
# Sample function for sample sheets
def sample_sheet(G, classes_per_sheet, num_classes, samples_per_class, parallel,
samples_root, experiment_name, folder_number, z_=None):
# Prepare sample directory
if not os.path.isdir('%s/%s' % (samples_root, experiment_name)):
os.mkdir('%s/%s' % (samples_root, experiment_name))
if not os.path.isdir('%s/%s/%d' % (samples_root, experiment_name, folder_number)):
os.mkdir('%s/%s/%d' % (samples_root, experiment_name, folder_number))
# loop over total number of sheets
for i in range(num_classes // classes_per_sheet):
ims = []
y = torch.arange(i * classes_per_sheet, (i + 1) * classes_per_sheet, device='cuda')
for j in range(samples_per_class):
if (z_ is not None) and hasattr(z_, 'sample_') and classes_per_sheet <= z_.size(0):
z_.sample_()
else:
z_ = torch.randn(classes_per_sheet, G.dim_z, device='cuda')
with torch.no_grad():
if parallel:
o = nn.parallel.data_parallel(G, (z_[:classes_per_sheet], G.shared(y)))
else:
o = G(z_[:classes_per_sheet], G.shared(y))
ims += [o.data.cpu()]
# This line should properly unroll the images
out_ims = torch.stack(ims, 1).view(-1, ims[0].shape[1], ims[0].shape[2],
ims[0].shape[3]).data.float().cpu()
# The path for the samples
image_filename = '%s/%s/%d/samples%d.jpg' % (samples_root, experiment_name,
folder_number, i)
torchvision.utils.save_image(out_ims, image_filename,
nrow=samples_per_class, normalize=True)
# Interp function; expects x0 and x1 to be of shape (shape0, 1, rest_of_shape..)
def interp(x0, x1, num_midpoints):
"""
x0: [bs, 1, d]
x1: [bs, 1, d]
"""
lerp = torch.linspace(0, 1.0, num_midpoints + 2, device='cuda').to(x0.dtype) # [num_midpoints+2]
return ((x0 * (1 - lerp.view(1, -1, 1))) + (x1 * lerp.view(1, -1, 1)))
# interp sheet function
# Supports full, class-wise and intra-class interpolation
def interp_sheet(G, num_per_sheet, num_midpoints, num_classes, parallel,
samples_root, experiment_name, folder_number, sheet_number=0,
fix_z=False, fix_y=False, device='cuda'):
# Prepare zs and ys
if fix_z: # If fix Z, only sample 1 z per row
zs = torch.randn(num_per_sheet, 1, G.dim_z, device=device)
zs = zs.repeat(1, num_midpoints + 2, 1).view(-1, G.dim_z)
else:
zs = interp(torch.randn(num_per_sheet, 1, G.dim_z, device=device),
torch.randn(num_per_sheet, 1, G.dim_z, device=device),
num_midpoints).view(-1, G.dim_z)
if fix_y: # If fix y, only sample 1 z per row
ys = sample_1hot(num_per_sheet, num_classes)
ys = G.shared(ys).view(num_per_sheet, 1, -1)
ys = ys.repeat(1, num_midpoints + 2, 1).view(num_per_sheet * (num_midpoints + 2), -1)
else:
ys = interp(G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, -1),
G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, -1),
num_midpoints).view(num_per_sheet * (num_midpoints + 2), -1)
# Run the net--note that we've already passed y through G.shared.
if G.fp16:
zs = zs.half()
with torch.no_grad():
if parallel:
out_ims = nn.parallel.data_parallel(G, (zs, ys)).data.cpu()
else:
out_ims = G(zs, ys).data.cpu()
interp_style = '' + ('Z' if not fix_z else '') + ('Y' if not fix_y else '')
image_filename = '%s/%s/%d/interp%s%d.jpg' % (samples_root, experiment_name,
folder_number, interp_style,
sheet_number)
torchvision.utils.save_image(out_ims, image_filename,
nrow=num_midpoints + 2, normalize=True)
# Convenience debugging function to print out gradnorms and shape from each layer
# May need to rewrite this so we can actually see which parameter is which
def print_grad_norms(net):
gradsums = [[float(torch.norm(param.grad).item()),
float(torch.norm(param).item()), param.shape]
for param in net.parameters()]
order = np.argsort([item[0] for item in gradsums])
print(['%3.3e,%3.3e, %s' % (gradsums[item_index][0],
gradsums[item_index][1],
str(gradsums[item_index][2]))
for item_index in order])
# Get singular values to log. This will use the state dict to find them
# and substitute underscores for dots.
def get_SVs(net, prefix):
d = net.state_dict()
return {('%s_%s' % (prefix, key)).replace('.', '_') :
float(d[key].item())
for key in d if 'sv' in key}
# Name an experiment based on its config
def name_from_config(config):
name = '_'.join([
item for item in [
'Big%s' % config['which_train_fn'],
config['dataset'],
config['model'] if config['model'] != 'BigGAN' else None,
'seed%d' % config['seed'],
'Gch%d' % config['G_ch'],
'Dch%d' % config['D_ch'],
'Gd%d' % config['G_depth'] if config['G_depth'] > 1 else None,
'Dd%d' % config['D_depth'] if config['D_depth'] > 1 else None,
'bs%d' % config['batch_size'],
'Gfp16' if config['G_fp16'] else None,
'Dfp16' if config['D_fp16'] else None,
'nDs%d' % config['num_D_steps'] if config['num_D_steps'] > 1 else None,
'nDa%d' % config['num_D_accumulations'] if config['num_D_accumulations'] > 1 else None,
'nGa%d' % config['num_G_accumulations'] if config['num_G_accumulations'] > 1 else None,
'Glr%2.1e' % config['G_lr'],
'Dlr%2.1e' % config['D_lr'],
'GB%3.3f' % config['G_B1'] if config['G_B1'] !=0.0 else None,
'GBB%3.3f' % config['G_B2'] if config['G_B2'] !=0.999 else None,
'DB%3.3f' % config['D_B1'] if config['D_B1'] !=0.0 else None,
'DBB%3.3f' % config['D_B2'] if config['D_B2'] !=0.999 else None,
'Gnl%s' % config['G_nl'],
'Dnl%s' % config['D_nl'],
'Ginit%s' % config['G_init'],
'Dinit%s' % config['D_init'],
'G%s' % config['G_param'] if config['G_param'] != 'SN' else None,
'D%s' % config['D_param'] if config['D_param'] != 'SN' else None,
'Gattn%s' % config['G_attn'] if config['G_attn'] != '0' else None,
'Dattn%s' % config['D_attn'] if config['D_attn'] != '0' else None,
'Gortho%2.1e' % config['G_ortho'] if config['G_ortho'] > 0.0 else None,
'Dortho%2.1e' % config['D_ortho'] if config['D_ortho'] > 0.0 else None,
config['norm_style'] if config['norm_style'] != 'bn' else None,
'cr' if config['cross_replica'] else None,
'Gshared' if config['G_shared'] else None,
'hier' if config['hier'] else None,
'ema' if config['ema'] else None,
config['name_suffix'] if config['name_suffix'] else None,
]
if item is not None])
# dogball
if config['hashname']:
return hashname(name)
else:
return name
# A simple function to produce a unique experiment name from the animal hashes.
def hashname(name):
h = hash(name)
a = h % len(animal_hash.a)
h = h // len(animal_hash.a)
b = h % len(animal_hash.b)
h = h // len(animal_hash.c)
c = h % len(animal_hash.c)
return animal_hash.a[a] + animal_hash.b[b] + animal_hash.c[c]
# Get GPU memory, -i is the index
def query_gpu(indices):
os.system('nvidia-smi -i 0 --query-gpu=memory.free --format=csv')
# Convenience function to count the number of parameters in a module
def count_parameters(module):
print('Number of parameters: {}'.format(
sum([p.data.nelement() for p in module.parameters()])))
# Convenience function to sample an index, not actually a 1-hot
def sample_1hot(batch_size, num_classes, device='cuda'):
return torch.randint(low=0, high=num_classes, size=(batch_size,),
device=device, dtype=torch.int64, requires_grad=False)
# A highly simplified convenience class for sampling from distributions
# One could also use PyTorch's inbuilt distributions package.
# Note that this class requires initialization to proceed as
# x = Distribution(torch.randn(size))
# x.init_distribution(dist_type, **dist_kwargs)
# x = x.to(device,dtype)
# This is partially based on https://discuss.pytorch.org/t/subclassing-torch-tensor/23754/2
class Distribution(torch.Tensor):
# Init the params of the distribution
def init_distribution(self, dist_type, **kwargs):
self.dist_type = dist_type
self.dist_kwargs = kwargs
if self.dist_type == 'normal':
self.mean, self.var = kwargs['mean'], kwargs['var']
elif self.dist_type == 'categorical':
self.num_categories = kwargs['num_categories']
# if given the number of category ids, and each number of sampls for each categroy,
# the conditional y will be generated in the given manner
if 'num_categories_to_sample' in kwargs and 'per_category_to_sample' in kwargs:
if kwargs['num_categories_to_sample'] is not None and kwargs['per_category_to_sample'] is not None:
self.num_categories_to_sample = kwargs['num_categories_to_sample']
self.per_category_to_sample = kwargs['per_category_to_sample']
if self.num_categories_to_sample <= 0:
self.categories_to_sample = list(range(self.num_categories))
else:
categories = list(range(self.num_categories))
np.random.shuffle(categories)
self.categories_to_sample = categories[:self.num_categories_to_sample]
self.count = 0
self.total_count = len(self.categories_to_sample) * self.per_category_to_sample
self.next = True
def sample_(self):
if self.dist_type == 'normal':
self.normal_(self.mean, self.var)
elif self.dist_type == 'categorical':
if hasattr(self, 'categories_to_sample') and hasattr(self, 'per_category_to_sample'):
batch_size = self.shape[0]
count_cur = self.count + batch_size
cate_idx_pre = self.count // self.per_category_to_sample
cate_idx_cur = count_cur // self.per_category_to_sample
cate_id = self.categories_to_sample[cate_idx_pre:cate_idx_cur+1]
cate_id = torch.tensor(cate_id).unsqueeze(dim=1).repeat(1, self.per_category_to_sample).view(-1)
start_idx = self.count - self.per_category_to_sample * max(0, cate_idx_pre)
end_idx = start_idx + batch_size
if end_idx > cate_id.shape[0]:
cate_id = torch.cat((cate_id, cate_id), dim=0)
self.copy_(cate_id[start_idx:end_idx])
self.count = count_cur
self.next = self.count < self.total_count
else: # generate the category label randomly
self.random_(0, self.num_categories)
# return self.variable
# Silly hack: overwrite the to() method to wrap the new object
# in a distribution as well
def to(self, *args, **kwargs):
new_obj = Distribution(self)
new_obj.init_distribution(self.dist_type, **self.dist_kwargs)
new_obj.data = super().to(*args, **kwargs)
return new_obj
# Convenience function to prepare a z and y vector
def prepare_z_y(G_batch_size, dim_z, nclasses, device='cuda',
fp16=False,z_var=1.0, per_category_to_sample=None,
num_categories_to_sample=None):
z_ = Distribution(torch.randn(G_batch_size, dim_z, requires_grad=False))
z_.init_distribution('normal', mean=0, var=z_var)
z_ = z_.to(device,torch.float16 if fp16 else torch.float32)
if fp16:
z_ = z_.half()
y_ = Distribution(torch.zeros(G_batch_size, requires_grad=False))
y_.init_distribution('categorical',num_categories=nclasses,
per_category_to_sample=per_category_to_sample,
num_categories_to_sample=num_categories_to_sample)
y_ = y_.to(device, torch.int64)
return z_, y_
def initiate_standing_stats(net):
for module in net.modules():
if hasattr(module, 'accumulate_standing'):
module.reset_stats()
module.accumulate_standing = True
def accumulate_standing_stats(net, z, y, nclasses, num_accumulations=16):
initiate_standing_stats(net)
net.train()
for i in range(num_accumulations):
with torch.no_grad():
z.normal_()
y.random_(0, nclasses)
x = net(z, net.shared(y)) # No need to parallelize here unless using syncbn
# Set to eval mode
net.eval()
# This version of Adam keeps an fp32 copy of the parameters and
# does all of the parameter updates in fp32, while still doing the
# forwards and backwards passes using fp16 (i.e. fp16 copies of the
# parameters and fp16 activations).
#
# Note that this calls .float().cuda() on the params.
import math
from torch.optim.optimizer import Optimizer
class Adam16(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
params = list(params)
super(Adam16, self).__init__(params, defaults)
# Safety modification to make sure we floatify our state
def load_state_dict(self, state_dict):
super(Adam16, self).load_state_dict(state_dict)
for group in self.param_groups:
for p in group['params']:
self.state[p]['exp_avg'] = self.state[p]['exp_avg'].float()
self.state[p]['exp_avg_sq'] = self.state[p]['exp_avg_sq'].float()
self.state[p]['fp32_p'] = self.state[p]['fp32_p'].float()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Fp32 copy of the weights
state['fp32_p'] = p.data.float()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], state['fp32_p'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
state['fp32_p'].addcdiv_(-step_size, exp_avg, denom)
p.data = state['fp32_p'].half()
return loss
| [
"torch.cat",
"torch.cuda.manual_seed",
"torch.stack",
"torch.randperm",
"torch.eye",
"torch.nn.parallel.data_parallel",
"torch.norm",
"torch.manual_seed",
"torch.randint",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.zeros",
"torch.linspace",
"torch.nn.ReLU",
"torch.arange",
"torch.no_grad",
"torch.randn"
] | 1.2.0 | liuqk3/BigGAN-PyTorch | 9b4491f5d68f34a1fe55bc0e8171fa3d3ad7bb08 |
1.6 | import logging
from typing import List, Dict, Any, Optional, TYPE_CHECKING
import torch
from allennlp.training.callbacks.callback import TrainerCallback
from allennlp.training.util import get_train_and_validation_metrics
from allennlp.data import TensorDict
if TYPE_CHECKING:
from allennlp.training.trainer import GradientDescentTrainer
logger = logging.getLogger(__name__)
@TrainerCallback.register("console_logger")
class ConsoleLoggerCallback(TrainerCallback):
def __init__(
self,
serialization_dir: str,
should_log_inputs: bool = False,
) -> None:
super().__init__(serialization_dir)
self._should_log_inputs = should_log_inputs
def on_batch(
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[TensorDict],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_primary: bool = True,
batch_grad_norm: Optional[float] = None,
**kwargs,
) -> None:
if not is_primary:
return None
# We only want to do this for the first batch in the first epoch.
if batch_number == 1 and epoch == 0 and self._should_log_inputs:
logger.info("Batch inputs")
for b, batch in enumerate(batch_inputs):
self._log_fields(batch, log_prefix="batch_input") # type: ignore
def _log_fields(self, fields: Dict, log_prefix: str = ""):
for key, val in fields.items():
key = log_prefix + "/" + key
if isinstance(val, dict):
self._log_fields(val, key)
elif isinstance(val, torch.Tensor):
torch.set_printoptions(threshold=2)
logger.info("%s (Shape: %s)\n%s", key, " x ".join([str(x) for x in val.shape]), val)
torch.set_printoptions(threshold=1000)
elif isinstance(val, List):
logger.info('Field : "%s" : (Length %d of type "%s")', key, len(val), type(val[0]))
elif isinstance(val, str):
logger.info('Field : "{}" : "{:20.20} ..."'.format(key, val))
else:
logger.info('Field : "%s" : %s', key, val)
def on_epoch(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_primary: bool = True,
**kwargs,
) -> None:
if not is_primary:
return None
train_metrics, val_metrics = get_train_and_validation_metrics(metrics)
metric_names = set(train_metrics.keys())
if val_metrics is not None:
metric_names.update(val_metrics.keys())
val_metrics = val_metrics or {}
dual_message_template = "%s | %8.3f | %8.3f"
no_val_message_template = "%s | %8.3f | %8s"
no_train_message_template = "%s | %8s | %8.3f"
header_template = "%s | %-10s"
name_length = max(len(x) for x in metric_names)
logger.info(header_template, "Training".rjust(name_length + 13), "Validation")
for name in sorted(metric_names):
train_metric = train_metrics.get(name)
val_metric = val_metrics.get(name)
if val_metric is not None and train_metric is not None:
logger.info(
dual_message_template, name.ljust(name_length), train_metric, val_metric
)
elif val_metric is not None:
logger.info(no_train_message_template, name.ljust(name_length), "N/A", val_metric)
elif train_metric is not None:
logger.info(no_val_message_template, name.ljust(name_length), train_metric, "N/A")
| [
"torch.set_printoptions"
] | 1.6.0 | jbrry/allennlp | d906175d953bebcc177567ec0157220c3bd1b9ad |
1.6 | # The MIT License
#
# Copyright (c) 2020 Vincent Liu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import torch
import torch.nn as nn
from modules.loss import GinormousCompositeLoss
from modules.layers import AdaptiveInstanceNorm2d, LayerNorm2d
class ResidualBlock(nn.Module):
''' Implements a residual block with (Adaptive) Instance Normalization '''
def __init__(self, channels, s_dim=None, h_dim=None):
super().__init__()
self.conv1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, channels, kernel_size=3)
),
)
self.conv2 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, channels, kernel_size=3)
),
)
self.use_style = s_dim is not None and h_dim is not None
if self.use_style:
self.norm1 = AdaptiveInstanceNorm2d(channels, s_dim, h_dim)
self.norm2 = AdaptiveInstanceNorm2d(channels, s_dim, h_dim)
else:
self.norm1 = nn.InstanceNorm2d(channels)
self.norm2 = nn.InstanceNorm2d(channels)
self.activation = nn.ReLU()
def forward(self, x, s=None):
x_id = x
x = self.conv1(x)
x = self.norm1(x, s) if self.use_style else self.norm1(x)
x = self.activation(x)
x = self.conv2(x)
x = self.norm2(x, s) if self.use_style else self.norm2(x)
return x + x_id
class ContentEncoder(nn.Module):
''' Implements a MUNIT encoder for content '''
def __init__(self, base_channels=64, n_downsample=2, n_res_blocks=4):
super().__init__()
channels = base_channels
# input convolutional layer
layers = [
nn.ReflectionPad2d(3),
nn.utils.spectral_norm(
nn.Conv2d(3, channels, kernel_size=7)
),
nn.InstanceNorm2d(channels),
nn.ReLU(inplace=True),
]
# downsampling layers
for i in range(n_downsample):
layers += [
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, 2 * channels, kernel_size=4, stride=2)
),
nn.InstanceNorm2d(2 * channels),
nn.ReLU(inplace=True),
]
channels *= 2
# residual blocks with non-adaptive instance normalization
layers += [
ResidualBlock(channels) for _ in range(n_res_blocks)
]
self.layers = nn.Sequential(*layers)
self.out_channels = channels
def forward(self, x):
return self.layers(x)
@property
def channels(self):
return self.out_channels
class StyleEncoder(nn.Module):
''' Implements a MUNIT encoder for style '''
n_deepen_layers = 2
def __init__(self, base_channels=64, n_downsample=4, s_dim=8):
super().__init__()
channels = base_channels
# input convolutional layer
layers = [
nn.ReflectionPad2d(3),
nn.utils.spectral_norm(
nn.Conv2d(3, channels, kernel_size=7, padding=0)
),
nn.ReLU(inplace=True),
]
# downsampling layers
for i in range(self.n_deepen_layers):
layers += [
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, 2 * channels, kernel_size=4, stride=2)
),
nn.ReLU(inplace=True),
]
channels *= 2
for i in range(n_downsample - self.n_deepen_layers):
layers += [
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, channels, kernel_size=4, stride=2)
),
nn.ReLU(inplace=True),
]
# apply global pooling and pointwise convolution to style_channels
layers += [
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(channels, s_dim, kernel_size=1),
]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class Decoder(nn.Module):
'''
Decoder Class
Values:
in_channels: number of channels from encoder output, a scalar
n_upsample: number of upsampling layers, a scalar
n_res_blocks: number of residual blocks, a scalar
s_dim: the dimension of the style tensor (s), a scalar
h_dim: the hidden dimension of the MLP, a scalar
'''
def __init__(self, in_channels, n_upsample=2, n_res_blocks=4, s_dim=8, h_dim=256):
super().__init__()
channels = in_channels
# residual blocks with adaptive instance norm
self.res_blocks = nn.ModuleList([
ResidualBlock(channels, s_dim) for _ in range(n_res_blocks)
])
# upsampling blocks
layers = []
for i in range(n_upsample):
layers += [
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d(2),
nn.utils.spectral_norm(
nn.Conv2d(channels, channels // 2, kernel_size=5)
),
LayerNorm2d(channels // 2),
]
channels //= 2
layers += [
nn.ReflectionPad2d(3),
nn.utils.spectral_norm(
nn.Conv2d(channels, 3, kernel_size=7)
),
nn.Tanh(),
]
self.layers = nn.Sequential(*layers)
def forward(self, x, s):
for res_block in self.res_blocks:
x = res_block(x, s=s)
x = self.layers(x)
return x
class Generator(nn.Module):
''' Implements a MUNIT generator '''
def __init__(
self,
base_channels: int = 64,
n_c_downsample: int = 2,
n_s_downsample: int = 4,
n_res_blocks: int = 4,
s_dim: int = 8,
h_dim: int = 256,
):
super().__init__()
self.c_enc = ContentEncoder(
base_channels=base_channels, n_downsample=n_c_downsample, n_res_blocks=n_res_blocks,
)
self.s_enc = StyleEncoder(
base_channels=base_channels, n_downsample=n_s_downsample, s_dim=s_dim,
)
self.dec = Decoder(
self.c_enc.channels, n_upsample=n_c_downsample, n_res_blocks=n_res_blocks, s_dim=s_dim, h_dim=h_dim,
)
def encode(self, x):
content = self.c_enc(x)
style = self.s_enc(x)
return (content, style)
def decode(self, content, style):
return self.dec(content, style)
class Discriminator(nn.Module):
''' Implements a MUNIT discriminator '''
def __init__(
self,
base_channels: int = 64,
n_layers: int = 3,
n_discriminators: int = 3,
):
super().__init__()
self.discriminators = nn.ModuleList([
self.patchgan_discriminator(base_channels, n_layers) for _ in range(n_discriminators)
])
self.downsample = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
@staticmethod
def patchgan_discriminator(base_channels, n_layers):
'''
Function that constructs and returns one PatchGAN discriminator module.
'''
channels = base_channels
# input convolutional layer
layers = [
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(3, channels, kernel_size=4, stride=2),
),
nn.LeakyReLU(0.2, inplace=True),
]
# hidden convolutional layers
for _ in range(n_layers):
layers += [
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, 2 * channels, kernel_size=4, stride=2)
),
nn.LeakyReLU(0.2, inplace=True),
]
channels *= 2
# output projection layer
layers += [
nn.utils.spectral_norm(
nn.Conv2d(channels, 1, kernel_size=1)
),
]
return nn.Sequential(*layers)
def forward(self, x):
outputs = []
for discriminator in self.discriminators:
outputs.append(discriminator(x))
x = self.downsample(x)
return outputs
class MUNIT(nn.Module):
''' Implements the MUNIT model in full '''
def __init__(
self,
gen_channels: int = 64,
n_c_downsample: int = 2,
n_s_downsample: int = 4,
n_res_blocks: int = 4,
s_dim: int = 8,
h_dim: int = 256,
dis_channels: int = 64,
n_layers: int = 3,
n_discriminators: int = 3,
scale_loss_weights_to_one: bool = True,
):
super().__init__()
self.gen_a = Generator(
base_channels=gen_channels, n_c_downsample=n_c_downsample, n_s_downsample=n_s_downsample, n_res_blocks=n_res_blocks, s_dim=s_dim, h_dim=h_dim,
)
self.gen_b = Generator(
base_channels=gen_channels, n_c_downsample=n_c_downsample, n_s_downsample=n_s_downsample, n_res_blocks=n_res_blocks, s_dim=s_dim, h_dim=h_dim,
)
self.dis_a = Discriminator(
base_channels=dis_channels, n_layers=n_layers, n_discriminators=n_discriminators,
)
self.dis_b = Discriminator(
base_channels=dis_channels, n_layers=n_layers, n_discriminators=n_discriminators,
)
self.s_dim = s_dim
self.loss = GinormousCompositeLoss
self.scale_loss_weights_to_one = scale_loss_weights_to_one
def forward(self, x_a: torch.tensor, x_b: torch.tensor):
s_a = torch.randn(x_a.size(0), self.s_dim, 1, 1, device=x_a.device).to(x_a.dtype)
s_b = torch.randn(x_b.size(0), self.s_dim, 1, 1, device=x_b.device).to(x_b.dtype)
# encode real x and compute image reconstruction loss
x_a_loss, c_a, s_a_fake = self.loss.image_recon_loss(x_a, self.gen_a)
x_b_loss, c_b, s_b_fake = self.loss.image_recon_loss(x_b, self.gen_b)
# decode real (c, s) and compute latent reconstruction loss
c_b_loss, s_a_loss, x_ba = self.loss.latent_recon_loss(c_b, s_a, self.gen_a)
c_a_loss, s_b_loss, x_ab = self.loss.latent_recon_loss(c_a, s_b, self.gen_b)
# compute adversarial losses
gen_a_adv_loss = self.loss.adversarial_loss(x_ba, self.dis_a, False)
gen_b_adv_loss = self.loss.adversarial_loss(x_ab, self.dis_b, False)
# sum up losses for gen
gen_loss = (
10 * x_a_loss + c_b_loss + s_a_loss + gen_a_adv_loss + \
10 * x_b_loss + c_a_loss + s_b_loss + gen_b_adv_loss
)
if self.scale_loss_weights_to_one:
gen_loss = gen_loss * 0.1
# sum up losses for dis
dis_loss = (
self.loss.adversarial_loss(x_ba.detach(), self.dis_a, False) + \
self.loss.adversarial_loss(x_a.detach(), self.dis_a, True) + \
self.loss.adversarial_loss(x_ab.detach(), self.dis_b, False) + \
self.loss.adversarial_loss(x_b.detach(), self.dis_b, True)
)
return gen_loss, dis_loss, x_ab, x_ba
def infer(self, x_a: torch.tensor, x_b: torch.tensor, encode_style: bool = True):
self.eval()
if not encode_style:
s_a = torch.ones(x_a.shape, self.z_dim, 1, 1, device=x_a.device).to(x_a.dtype)
s_b = torch.ones(x_b.shape, self.z_dim, 1, 1, device=x_b.device).to(x_b.dtype)
c_a, _ = self.gen_a.encode(x_a)
c_b, _ = self.gen_b.encode(x_b)
else:
c_a, s_a = self.gen_a.encode(x_a)
c_b, s_b = self.gen_b.encode(x_b)
x_ba = self.gen_a.decode(c_b, s_a)
x_ab = self.gen_b.decode(c_a, s_b)
return x_ba, x_ab
| [
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.ones",
"torch.nn.ReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.ReflectionPad2d",
"torch.nn.InstanceNorm2d",
"torch.nn.AdaptiveAvgPool2d"
] | 1.6.0 | vliu15/munit | 5789d96590519d729f89c9501eba7692fa7054ef |
0.1 | # -*- coding: utf-8 -*-
import os
from glob import glob
from os.path import join
from datetime import datetime
import torch
import torchvision
import transformers
import more_itertools
import numpy as np
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torchvision.transforms as T
from tqdm.auto import tqdm
from einops import rearrange
from . import utils
from .model.utils import get_attention_mask, get_t2t_attention_mask
def generate_codebooks(
text,
tokenizer,
model,
top_k, top_p, images_num,
image_prompts=None,
temperature=1.0, bs=8,
seed=None, use_cache=True,
):
# TODO docstring
if seed is not None:
utils.seed_everything(seed)
else:
seed = int((datetime.utcnow().timestamp() * 10 ** 6) % (2 ** 32 - 1))
utils.seed_everything(seed)
vocab_size = model.get_param('vocab_size')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
image_seq_length = model.get_param('image_seq_length')
total_seq_length = l_text_seq_length + image_seq_length + r_text_seq_length
device = model.get_param('device')
text = text.lower().strip()
encoded = tokenizer.encode_text(text, text_seq_length=r_text_seq_length)
codebooks = []
for chunk in more_itertools.chunked(range(images_num), bs):
chunk_bs = len(chunk)
with torch.no_grad():
attention_mask = torch.tril(torch.ones((chunk_bs, 1, total_seq_length, total_seq_length), device=device))
out = encoded.unsqueeze(0).repeat(chunk_bs, 1).to(device)
has_cache = False
if image_prompts is not None:
prompts_idx, prompts = image_prompts.image_prompts_idx, image_prompts.image_prompts
prompts = prompts.repeat(chunk_bs, 1)
for idx in tqdm(range(l_text_seq_length, l_text_seq_length + image_seq_length)):
idx -= l_text_seq_length
if image_prompts is not None and idx in prompts_idx:
out = torch.cat((out, prompts[:, idx].unsqueeze(1)), dim=-1)
else:
logits, has_cache = model(out, attention_mask,
has_cache=has_cache, use_cache=use_cache, return_loss=False)
logits = logits[:, -1, vocab_size:]
logits /= temperature
filtered_logits = transformers.top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
probs = torch.nn.functional.softmax(filtered_logits, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
codebooks.append(out[:, -image_seq_length:])
return torch.cat(codebooks)
def generate_captions(
pil_img,
tokenizer,
model,
vae,
template='',
top_k=32, top_p=0.6, captions_num=128,
temperature=1.0, bs=64,
seed=None, use_cache=True,
):
if seed is None:
seed = int((datetime.utcnow().timestamp() * 10 ** 6) % (2 ** 32 - 1))
utils.seed_everything(seed)
vocab_size = model.get_param('vocab_size')
image_tokens_per_dim = model.get_param('image_tokens_per_dim')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
image_seq_length = model.get_param('image_seq_length')
device = model.get_param('device')
image_transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.RandomResizedCrop(image_tokens_per_dim * 8,
scale=(1., 1.),
ratio=(1., 1.)),
T.ToTensor()
])
img = image_transform(pil_img)
template = template.lower().strip()
template_encoded = tokenizer.encode_text(template, text_seq_length=r_text_seq_length)
template_size = (template_encoded != 0).sum() - 1 # eos
template_encoded = template_encoded[:template_size]
generated_tokens = []
for chunk in more_itertools.chunked(range(captions_num), bs):
chunk_bs = len(chunk)
with torch.no_grad():
masks = torch.ones(chunk_bs, r_text_seq_length, dtype=torch.int32)
attention_mask = get_attention_mask(masks, chunk_bs, l_text_seq_length, image_tokens_per_dim,
r_text_seq_length, device)
images = img.unsqueeze(0).repeat((chunk_bs, 1, 1, 1)).to(device)
image_input_ids = vae.get_codebook_indices(images)
out = torch.cat((
torch.zeros((chunk_bs, l_text_seq_length), dtype=torch.int64).to(device),
image_input_ids,
template_encoded.repeat(chunk_bs, 1).to(device),
), dim=1)
has_cache = False
for _ in tqdm(range(
l_text_seq_length + image_seq_length + template_size,
l_text_seq_length + image_seq_length + r_text_seq_length
)):
logits, has_cache = model(out, attention_mask,
has_cache=has_cache, use_cache=use_cache, return_loss=False)
logits = logits[:, -1, :vocab_size]
logits /= temperature
filtered_logits = transformers.top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
probs = torch.nn.functional.softmax(filtered_logits, dim=-1)
sample = torch.multinomial(probs, 1)
indexes = torch.where(sample >= vocab_size - l_text_seq_length)
sample[indexes] = 3
out = torch.cat((out, sample), dim=-1)
generated_tokens.append(out[:, -r_text_seq_length:])
generated_tokens = torch.cat(generated_tokens)
texts = set()
for tokens in generated_tokens:
end = torch.where(tokens == 3)[0].shape[0] or tokens.shape[0]
text = tokenizer.decode_text(tokens[:end]).strip()
if text:
texts.add(text)
return list(texts)
def show(pil_images, nrow=4, size=14, save_dir=None, show=True):
"""
:param pil_images: list of images in PIL
:param nrow: number of rows
:param size: size of the images
:param save_dir: dir for separately saving of images, example: save_dir='./pics'
"""
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
count = len(glob(join(save_dir, 'img_*.png')))
for i, pil_image in enumerate(pil_images):
pil_image.save(join(save_dir, f'img_{count+i}.png'))
pil_images = [pil_image.convert('RGB') for pil_image in pil_images]
imgs = torchvision.utils.make_grid(utils.pil_list_to_torch_tensors(pil_images), nrow=nrow)
if not isinstance(imgs, list):
imgs = [imgs.cpu()]
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False, figsize=(size, size))
for i, img in enumerate(imgs):
img = img.detach()
img = torchvision.transforms.functional.to_pil_image(img)
if save_dir is not None:
count = len(glob(join(save_dir, 'group_*.png')))
img.save(join(save_dir, f'group_{count+i}.png'))
if show:
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if show:
fix.show()
plt.show()
def self_reranking_by_text(
text,
codebooks,
tokenizer,
model,
bs=64,
):
vocab_size = model.get_param('vocab_size')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
image_seq_length = model.get_param('image_seq_length')
image_tokens_per_dim = model.get_param('image_tokens_per_dim')
device = model.get_param('device')
text = text.lower().strip()
encoded = tokenizer.encode_text(text, text_seq_length=r_text_seq_length)
mask = torch.zeros(r_text_seq_length, dtype=torch.int64)
mask[encoded != 0] = 1
ppl_text, ppl_image = [], []
for chunk in more_itertools.chunked(codebooks, bs):
chunk_bs = len(chunk)
with torch.no_grad():
attention_mask = get_attention_mask(
mask.unsqueeze(0).repeat(chunk_bs, 1).to(device),
chunk_bs, l_text_seq_length, image_tokens_per_dim, r_text_seq_length, device
)
input_ids = torch.cat((
torch.zeros((chunk_bs, l_text_seq_length), dtype=torch.int64).to(device),
torch.stack(chunk),
encoded.unsqueeze(0).repeat(chunk_bs, 1).to(device),
), dim=1)
logits, _ = model(input_ids, attention_mask, has_cache=False, use_cache=False, return_loss=False)
logits = rearrange(logits, 'b n c -> b c n')
image_logits = logits[:, vocab_size:,
l_text_seq_length:l_text_seq_length + image_seq_length - 1].contiguous().float()
r_text_logits = logits[:, :vocab_size, -r_text_seq_length:-1].contiguous().float()
input_ids = input_ids.contiguous().long()
ppl_image.append(
ce_to_ppl(F.cross_entropy(
image_logits,
input_ids[:, l_text_seq_length + 1:l_text_seq_length + image_seq_length],
reduction='none',
))
)
ppl_text.append(
ce_to_ppl(F.cross_entropy(
r_text_logits,
input_ids[:, -(r_text_seq_length - 1):],
ignore_index=0,
reduction='none',
))
)
return torch.cat(ppl_text), torch.cat(ppl_image)
def self_reranking_by_image(
texts,
pil_img,
tokenizer,
model,
vae,
bs=64,
seed=42,
):
if seed is not None:
utils.seed_everything(seed)
vocab_size = model.get_param('vocab_size')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
image_seq_length = model.get_param('image_seq_length')
image_tokens_per_dim = model.get_param('image_tokens_per_dim')
device = model.get_param('device')
image_transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.RandomResizedCrop(image_tokens_per_dim * 8,
scale=(1., 1.),
ratio=(1., 1.)),
T.ToTensor()
])
img = image_transform(pil_img)
ppl_text, ppl_image = [], []
for chunk in more_itertools.chunked(texts, bs):
chunk_bs = len(chunk)
with torch.no_grad():
chunk_encoded, masks = [], []
for text in chunk:
text = text.lower().strip()
encoded = tokenizer.encode_text(text, text_seq_length=r_text_seq_length)
mask = torch.zeros(r_text_seq_length, dtype=torch.int64)
mask[encoded != 0] = 1
chunk_encoded.append(encoded)
masks.append(mask)
chunk_encoded = torch.stack(chunk_encoded)
masks = torch.stack(masks)
attention_mask = get_attention_mask(
masks.to(device),
chunk_bs, l_text_seq_length, image_tokens_per_dim, r_text_seq_length, device
)
images = img.unsqueeze(0).repeat((chunk_bs, 1, 1, 1)).to(device)
image_input_ids = vae.get_codebook_indices(images)
input_ids = torch.cat((
chunk_encoded.to(device),
image_input_ids,
chunk_encoded.to(device),
), dim=1)
logits, _ = model(input_ids, attention_mask, has_cache=False, use_cache=False, return_loss=False)
logits = rearrange(logits, 'b n c -> b c n')
image_logits = logits[:, vocab_size:,
l_text_seq_length:l_text_seq_length + image_seq_length - 1].contiguous().float()
l_text_logits = logits[:, :vocab_size, :l_text_seq_length - 1].contiguous().float()
input_ids = input_ids.contiguous().long()
ppl_image.append(
ce_to_ppl(F.cross_entropy(
image_logits,
input_ids[:, l_text_seq_length + 1:l_text_seq_length + image_seq_length],
reduction='none',
))
)
ppl_text.append(
ce_to_ppl(F.cross_entropy(
l_text_logits,
input_ids[:, 1:l_text_seq_length],
ignore_index=0,
reduction='none',
))
)
ppl_text = torch.cat(ppl_text)
ppl_image = torch.cat(ppl_image)
return ppl_text, ppl_image
def zs_clf(pil_img, classes, model, tokenizer, vae, bs=8, template=None):
"""
classes - list of strings
template - prefix template
"""
template = template or '{}'
vocab_size = model.get_param('vocab_size')
image_tokens_per_dim = model.get_param('image_tokens_per_dim')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
device = model.get_param('device')
image_transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.RandomResizedCrop(image_tokens_per_dim * 8,
scale=(1., 1.),
ratio=(1., 1.)),
T.ToTensor()
])
encoded, masks = [], []
for _class in classes:
text = template.format(_class).lower().strip()
class_encoded = tokenizer.encode_text(text, text_seq_length=r_text_seq_length)
encoded.append(class_encoded)
mask = torch.zeros(r_text_seq_length, dtype=torch.int64)
mask[class_encoded != 0] = 1
masks.append(mask)
encoded = torch.stack(encoded, 0)
masks = torch.stack(masks, 0)
with torch.no_grad():
img = image_transform(pil_img)
images = img.unsqueeze(0).to(device)
image_input_ids = vae.get_codebook_indices(images)
ppl_text, ppl_image = [], [] # noqa
for indexes in more_itertools.chunked(range(len(classes)), bs):
chunk_encoded = encoded[indexes]
chunk_masks = masks[indexes]
chunk_bs = chunk_encoded.shape[0]
attention_mask = get_attention_mask(chunk_masks, chunk_bs, l_text_seq_length,
image_tokens_per_dim, r_text_seq_length, device)
input_ids = torch.cat((
torch.zeros(l_text_seq_length, dtype=torch.int64).repeat(chunk_bs, 1).to(device),
image_input_ids.repeat(chunk_bs, 1),
chunk_encoded.to(device),
), dim=1)
logits, _ = model(input_ids, attention_mask, has_cache=False, use_cache=False, return_loss=False)
logits = rearrange(logits, 'b n c -> b c n')
r_text_logits = logits[:, :vocab_size, -r_text_seq_length:-1].contiguous()
chunk_ppl_text = ce_to_ppl(F.cross_entropy(
r_text_logits[:, :, :],
input_ids[:, -(r_text_seq_length - 1):],
ignore_index=0,
reduction='none',
))
ppl_text.append(chunk_ppl_text)
ppl_text = torch.cat(ppl_text)
ppl_text = ppl_text / ppl_text.norm(dim=0, keepdim=True)
scores = ppl_text.softmax(0)
pred = scores.argmin().item()
return {
'label': pred,
'class': classes[pred],
'scores': scores.cpu().numpy(),
}
def generate_texts(
tokenizer,
model,
template='',
top_k=32, top_p=0.8, texts_num=128,
temperature=1.0, bs=64,
seed=None, use_cache=True,
):
if seed is None:
seed = int((datetime.utcnow().timestamp() * 10 ** 6) % (2 ** 32 - 1))
utils.seed_everything(seed)
vocab_size = model.get_param('vocab_size')
image_tokens_per_dim = model.get_param('image_tokens_per_dim')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
device = model.get_param('device')
template = template.lower().strip()
template_encoded = tokenizer.encode_text(template, text_seq_length=l_text_seq_length)
template_size = (template_encoded != 0).sum() - 1 # eos
template_encoded = template_encoded[:template_size]
generated_tokens = []
for chunk in more_itertools.chunked(range(texts_num), bs):
chunk_bs = len(chunk)
with torch.no_grad():
attention_mask = get_t2t_attention_mask(chunk_bs, l_text_seq_length, image_tokens_per_dim,
r_text_seq_length, device)
out = template_encoded.repeat(chunk_bs, 1).to(device)
has_cache = False
for _ in tqdm(range(template_size, l_text_seq_length)):
logits, has_cache = model(out, attention_mask,
has_cache=has_cache, use_cache=use_cache, return_loss=False)
logits = logits[:, -1, :vocab_size]
logits /= temperature
filtered_logits = transformers.top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
probs = torch.nn.functional.softmax(filtered_logits, dim=-1)
sample = torch.multinomial(probs, 1)
indexes = torch.where(sample > vocab_size - l_text_seq_length)
sample[indexes] = 3
out = torch.cat((out, sample), dim=-1)
generated_tokens.append(out[:, :l_text_seq_length])
generated_tokens = torch.cat(generated_tokens)
texts = set()
for tokens in generated_tokens:
end = torch.where(tokens == 3)[0].shape[0] or tokens.shape[0]
text = tokenizer.decode_text(tokens[:end]).strip()
if text:
texts.add(text)
texts = list(texts)
ppl_text = []
for chunk in more_itertools.chunked(texts, bs):
chunk_bs = len(chunk)
with torch.no_grad():
chunk_encoded = []
for text in chunk:
text = text.lower().strip()
encoded = tokenizer.encode_text(text, text_seq_length=l_text_seq_length)
chunk_encoded.append(encoded)
chunk_encoded = torch.stack(chunk_encoded)
attention_mask = get_t2t_attention_mask(
chunk_bs, l_text_seq_length, image_tokens_per_dim, r_text_seq_length, device
)
input_ids = chunk_encoded.to(device)
logits, _ = model(input_ids, attention_mask, has_cache=False, use_cache=False, return_loss=False)
logits = rearrange(logits, 'b n c -> b c n')
l_text_logits = logits[:, :vocab_size, :l_text_seq_length - 1].contiguous().float()
input_ids = input_ids.contiguous().long()
ppl_text.append(
ce_to_ppl(F.cross_entropy(
l_text_logits,
input_ids[:, 1:l_text_seq_length],
ignore_index=0,
reduction='none',
))
)
ppl_text = torch.cat(ppl_text)
result = []
for idx in ppl_text.argsort():
idx = idx.item()
result.append({
'text': texts[idx],
'ppl': round(ppl_text[idx].item(), 2),
})
return result
def ce_to_ppl(ce):
indexes = torch.where(ce)
ce[indexes] = torch.exp(ce[indexes])
ppl = ce.sum(1) / torch.unique(indexes[0], return_counts=True)[1]
return ppl
| [
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.unique",
"torch.no_grad",
"torch.ones",
"torch.multinomial",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.softmax",
"torch.exp",
"torch.where"
] | 0.1.3 | WildGenie/ru-dolph | c80a320a60dcb60ccb66b86c3421e16e33235d97 |
1.7 | import os
import json
import torch
import onir
from onir import util, spec, predictors, datasets
from onir.interfaces import trec, plaintext
@predictors.register('reranker')
class Reranker(predictors.BasePredictor):
name = None
@staticmethod
def default_config():
return {
'batch_size': 64,
'gpu': True,
'gpu_determ': True,
'preload': False,
'run_threshold': 0,
'measures': 'map,ndcg,p@20,ndcg@20,mrr',
'source': 'run'
}
def __init__(self, config, ranker, trainer, dataset, vocab, logger, random):
self.config = config
self.ranker = ranker
self.trainer = trainer
self.dataset = dataset
self.logger = logger
self.vocab = vocab
self.random = random
self.input_spec = ranker.input_spec()
def _iter_batches(self, device):
fields = set(self.input_spec['fields']) | {'query_id', 'doc_id'}
it = datasets.record_iter(self.dataset,
fields=fields,
source=self.config['source'],
run_threshold=self.config['run_threshold'],
minrel=None,
shuf=False,
random=self.random,
inf=False)
for batch_items in util.chunked(it, self.config['batch_size']):
batch = {}
for record in batch_items:
for k, seq in record.items():
batch.setdefault(k, []).append(seq)
batch = spec.apply_spec_batch(batch, self.input_spec, device)
# ship 'em
yield batch
def _preload_batches(self, device):
with self.logger.duration('loading evaluation data'):
batches = list(self.logger.pbar(self._iter_batches(device), desc='preloading eval data (batches)'))
while True:
yield batches
def _reload_batches(self, device):
while True:
it = self._iter_batches(device)
yield it
def pred_ctxt(self):
device = util.device(self.config, self.logger)
if self.config['preload']:
datasource = self._preload_batches(device)
else:
datasource = self._reload_batches(device)
return PredictorContext(self, datasource, device)
def iter_scores(self, ranker, datasource, device):
if ranker.name == 'trivial' and not ranker.config['neg'] and not ranker.config['qsum'] and not ranker.config['max']:
for qid, values in self.dataset.run().items():
for did, score in values.items():
yield qid, did, score
return
if ranker.name == 'trivial' and not ranker.config['neg'] and not ranker.config['qsum'] and ranker.config['max']:
qrels = self.dataset.qrels()
for qid, values in self.dataset.run().items():
q_qrels = qrels.get(qid, {})
for did in values:
yield qid, did, q_qrels.get(did, -1)
return
with torch.no_grad():
ranker.eval()
ds = next(datasource, None)
total = None
if isinstance(ds, list):
total = sum(len(d['query_id']) for d in ds)
elif self.config['source'] == 'run':
if self.config['run_threshold'] > 0:
total = sum(min(len(v), self.config['run_threshold']) for v in self.dataset.run().values())
else:
total = sum(len(v) for v in self.dataset.run().values())
elif self.config['source'] == 'qrels':
total = sum(len(v) for v in self.dataset.qrels().values())
with self.logger.pbar_raw(total=total, desc='pred', quiet=True) as pbar:
for batch in util.background(ds):
batch = {k: (v.to(device) if torch.is_tensor(v) else v) for k, v in batch.items()}
rel_scores = self.ranker(**batch).cpu()
if len(rel_scores.shape) == 2:
rel_scores = rel_scores[:, 0]
triples = list(zip(batch['query_id'], batch['doc_id'], rel_scores))
for qid, did, score in triples:
yield qid, did, score.item()
pbar.update(len(batch['query_id']))
def rerank_dict(self, ranker, device):
datasource = self._reload_batches(device)
result = {}
for qid, did, score in self.iter_scores(ranker, datasource, device):
result.setdefault(qid, {})[did] = score
return result
class PredictorContext:
def __init__(self, pred, datasource, device):
self.pred = pred
self.datasource = datasource
self.device = device
def __call__(self, ctxt):
cached = True
epoch = ctxt['epoch']
base_path = os.path.join(ctxt['base_path'], self.pred.dataset.path_segment())
if self.pred.config['source'] == 'run' and self.pred.config['run_threshold'] > 0:
base_path = '{p}_runthreshold-{run_threshold}'.format(p=base_path, **self.pred.config)
os.makedirs(os.path.join(base_path, 'runs'), exist_ok=True)
with open(os.path.join(base_path, 'config.json'), 'wt') as f:
json.dump(self.pred.dataset.config, f)
run_path = os.path.join(base_path, 'runs', f'{epoch}.run')
if os.path.exists(run_path):
run = trec.read_run_dict(run_path)
else:
if self.pred.config['source'] == 'run' and self.pred.config['run_threshold'] > 0:
official_run = self.pred.dataset.run('dict')
else:
official_run = {}
run = {}
ranker = ctxt['ranker']().to(self.device)
this_qid = None
these_docs = {}
with util.finialized_file(run_path, 'wt') as f:
#print(ranker, self.datasource)
for qid, did, score in self.pred.iter_scores(ranker, self.datasource, self.device):
if qid != this_qid:
if this_qid is not None:
these_docs = self._apply_threshold(these_docs, official_run.get(this_qid, {}))
trec.write_run_dict(f, {this_qid: these_docs})
this_qid = qid
these_docs = {}
these_docs[did] = score
if this_qid is not None:
these_docs = self._apply_threshold(these_docs, official_run.get(this_qid, {}))
trec.write_run_dict(f, {this_qid: these_docs})
cached = False
result = {
'epoch': epoch,
'run': run,
'run_path': run_path,
'base_path': base_path,
'cached': cached
}
result['metrics'] = {m: None for m in self.pred.config['measures'].split(',') if m}
result['metrics_by_query'] = {m: None for m in result['metrics']}
#print(result['metrics'])
missing_metrics = self.load_metrics(result)
if missing_metrics:
#print("MISSING",missing_metrics)
measures = set(missing_metrics)
result['cached'] = False
qrels = self.pred.dataset.qrels()
calculated_metrics = onir.metrics.calc(qrels, run_path, measures)
result['metrics_by_query'].update(calculated_metrics)
result['metrics'].update(onir.metrics.mean(calculated_metrics))
self.write_missing_metrics(result, missing_metrics)
try:
if ctxt['ranker']().config.get('add_runscore'):
result['metrics']['runscore_alpha'] = torch.sigmoid(ctxt['ranker']().runscore_alpha).item()
rs_alpha_f = os.path.join(ctxt['base_path'], 'runscore_alpha.txt')
with open(rs_alpha_f, 'at') as f:
plaintext.write_tsv(rs_alpha_f, [(str(epoch), str(result['metrics']['runscore_alpha']))])
except FileNotFoundError:
pass # model may no longer exist, ignore
return result
def load_metrics(self, ctxt):
missing = set()
epoch = ctxt['epoch']
for metric in list(ctxt['metrics']):
path_agg = os.path.join(ctxt['base_path'], metric, 'agg.txt')
path_epoch = os.path.join(ctxt['base_path'], metric, f'{epoch}.txt')
if os.path.exists(path_agg) and os.path.exists(path_epoch):
ctxt['metrics'][metric] = [float(v) for k, v in plaintext.read_tsv(path_agg) if int(k) == epoch][0]
ctxt['metrics_by_query'][metric] = {k: float(v) for k, v in plaintext.read_tsv(path_epoch)}
else:
#print(os.path.exists(path_agg), path_agg)
#print(os.path.exists(path_epoch), path_epoch)
missing.add(metric)
return missing
def write_missing_metrics(self, ctxt, missing_metrics):
epoch = ctxt['epoch']
for metric in missing_metrics:
os.makedirs(os.path.join(ctxt['base_path'], metric), exist_ok=True)
path_agg = os.path.join(ctxt['base_path'], metric, 'agg.txt')
path_epoch = os.path.join(ctxt['base_path'], metric, f'{epoch}.txt')
with open(path_agg, 'at') as f:
plaintext.write_tsv(f, [(str(epoch), str(ctxt['metrics'][metric]))])
plaintext.write_tsv(path_epoch, ctxt['metrics_by_query'][metric].items())
def _apply_threshold(self, these_docs, original_scores):
min_score = min(these_docs.values())
missing_docs = original_scores.keys() - these_docs.keys()
for i, did in enumerate(sorted(missing_docs, key=lambda did: original_scores[did], reverse=True)):
these_docs[did] = min_score - i - 1
return these_docs
| [
"torch.is_tensor",
"torch.no_grad"
] | 1.7.1 | tgeral68/OpenNIR | 225b26185bd67fdc00f24de3ef70d35768e22243 |
1.6 | import numpy as np
import torch
import pytest
import copy
from unittest.mock import Mock
from d3rlpy.algos.torch.utility import soft_sync, hard_sync
from d3rlpy.algos.torch.utility import set_eval_mode, set_train_mode
from d3rlpy.algos.torch.utility import freeze, unfreeze
from d3rlpy.algos.torch.utility import torch_api, train_api, eval_api
from d3rlpy.algos.torch.utility import map_location
from d3rlpy.algos.torch.utility import get_state_dict, set_state_dict
from d3rlpy.algos.torch.utility import compute_augmentation_mean
@pytest.mark.parametrize('tau', [0.05])
@pytest.mark.parametrize('input_size', [32])
@pytest.mark.parametrize('output_size', [32])
def test_soft_sync(tau, input_size, output_size):
module = torch.nn.Linear(input_size, output_size)
targ_module = torch.nn.Linear(input_size, output_size)
original = copy.deepcopy(targ_module)
soft_sync(targ_module, module, tau)
module_params = module.parameters()
targ_params = targ_module.parameters()
original_params = original.parameters()
for p, targ_p, orig_p in zip(module_params, targ_params, original_params):
assert torch.allclose(p * tau + orig_p * (1.0 - tau), targ_p)
@pytest.mark.parametrize('input_size', [32])
@pytest.mark.parametrize('output_size', [32])
def test_hard_sync(input_size, output_size):
module = torch.nn.Linear(input_size, output_size)
targ_module = torch.nn.Linear(input_size, output_size)
hard_sync(targ_module, module)
for p, targ_p in zip(module.parameters(), targ_module.parameters()):
assert torch.allclose(targ_p, p)
def test_map_location_with_cpu():
assert map_location('cpu:0') == 'cpu'
def test_map_location_with_cuda():
fn = map_location('cuda:0')
dummy = Mock()
dummy.cuda = Mock()
fn(dummy, '')
dummy.cuda.assert_called_with('cuda:0')
class DummyImpl:
def __init__(self):
self.fc1 = torch.nn.Linear(100, 100)
self.fc2 = torch.nn.Linear(100, 100)
self.optim = torch.optim.Adam(self.fc1.parameters())
self.device = 'cpu:0'
@torch_api()
def torch_api_func(self, x):
assert isinstance(x, torch.Tensor)
@torch_api(scaler_targets=['x'])
def torch_api_func_with_scaler(self, x, y, ref_x, ref_y):
assert isinstance(x, torch.Tensor)
assert torch.allclose(x, torch.tensor(ref_x, dtype=torch.float32))
assert torch.allclose(y, torch.tensor(ref_y, dtype=torch.float32))
@train_api
def train_api_func(self):
assert self.fc1.training
assert self.fc2.training
@eval_api
def eval_api_func(self):
assert not self.fc1.training
assert not self.fc2.training
def check_if_same_dict(a, b):
for k, v in a.items():
if isinstance(v, torch.Tensor):
assert (b[k] == v).all()
else:
assert b[k] == v
def test_get_state_dict():
impl = DummyImpl()
state_dict = get_state_dict(impl)
check_if_same_dict(state_dict['fc1'], impl.fc1.state_dict())
check_if_same_dict(state_dict['fc2'], impl.fc2.state_dict())
check_if_same_dict(state_dict['optim'], impl.optim.state_dict())
def test_set_state_dict():
impl1 = DummyImpl()
impl2 = DummyImpl()
impl1.optim.step()
assert not (impl1.fc1.weight == impl2.fc1.weight).all()
assert not (impl1.fc1.bias == impl2.fc1.bias).all()
assert not (impl1.fc2.weight == impl2.fc2.weight).all()
assert not (impl1.fc2.bias == impl2.fc2.bias).all()
chkpt = get_state_dict(impl1)
set_state_dict(impl2, chkpt)
assert (impl1.fc1.weight == impl2.fc1.weight).all()
assert (impl1.fc1.bias == impl2.fc1.bias).all()
assert (impl1.fc2.weight == impl2.fc2.weight).all()
assert (impl1.fc2.bias == impl2.fc2.bias).all()
def test_eval_mode():
impl = DummyImpl()
impl.fc1.train()
impl.fc2.train()
set_eval_mode(impl)
assert not impl.fc1.training
assert not impl.fc2.training
def test_train_mode():
impl = DummyImpl()
impl.fc1.eval()
impl.fc2.eval()
set_train_mode(impl)
assert impl.fc1.training
assert impl.fc2.training
@pytest.mark.skip(reason='no way to test this')
def test_to_cuda():
pass
@pytest.mark.skip(reason='no way to test this')
def test_to_cpu():
pass
def test_freeze():
impl = DummyImpl()
freeze(impl)
for p in impl.fc1.parameters():
assert not p.requires_grad
for p in impl.fc2.parameters():
assert not p.requires_grad
def test_unfreeze():
impl = DummyImpl()
freeze(impl)
unfreeze(impl)
for p in impl.fc1.parameters():
assert p.requires_grad
for p in impl.fc2.parameters():
assert p.requires_grad
def test_compute_augmentation_mean():
class DummyAugmentation:
def __init__(self):
self.n = 1
def transform(self, x):
y = x + self.n
self.n += 1
return y
aug = DummyAugmentation()
def func(x):
return x
x = np.random.random((100, 100))
y = compute_augmentation_mean(aug, 2, func, {'x': x}, 'x')
assert np.allclose(y, x + 1.5)
def test_torch_api():
impl = DummyImpl()
impl.scaler = None
x = np.random.random((100, 100))
impl.torch_api_func(x)
def test_torch_api_with_scaler():
impl = DummyImpl()
class DummyScaler:
def transform(self, x):
return x + 0.1
scaler = DummyScaler()
impl.scaler = scaler
x = np.random.random((100, 100))
y = np.random.random((100, 100))
impl.torch_api_func_with_scaler(x, y, ref_x=x + 0.1, ref_y=y)
def test_train_api():
impl = DummyImpl()
impl.fc1.eval()
impl.fc2.eval()
impl.train_api_func()
def test_eval_api():
impl = DummyImpl()
impl.fc1.train()
impl.fc2.train()
impl.eval_api_func()
| [
"torch.nn.Linear",
"torch.tensor",
"torch.allclose"
] | 1.6.0 | alxlampe/d3rlpy | af7e6bd018a51f95138d121f59c50dc36ec87e3a |
1.0 | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import gc
import inspect
import math
import os
import re
import shutil
import sys
import time
import warnings
from logging import StreamHandler
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_distributed_available,
is_torch_tpu_available,
is_training_run_on_sagemaker,
)
from .modeling_utils import PreTrainedModel, unwrap_model
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
import fairscale
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if version.parse(fairscale.__version__) >= version.parse("0.3"):
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.wrap import auto_wrap
else:
FullyShardedDDP = None
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_training_run_on_sagemaker():
logging.add_handler(StreamHandler(sys.stdout))
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or (args.deepspeed and args.do_train)
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
self._signature_columns = None
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_ddp is not None else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"])
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Build the sampler.
if self.args.group_by_length:
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
model_input_name=model_input_name,
)
else:
if self.args.world_size <= 1:
return RandomSampler(self.train_dataset)
elif self.args.parallel_mode == ParallelMode.TPU and not self.args.dataloader_drop_last:
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
)
else:
return DistributedSampler(
self.train_dataset, num_replicas=self.args.world_size, rank=self.args.process_index
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_distributed_available():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
self.is_in_train = True
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(self.args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({self.args.output_dir})")
if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(resume_from_checkpoint)
model_reloaded = True
else:
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if train_dataset_is_sized
else self.args.max_steps * self.args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if (
((step + 1) % self.args.gradient_accumulation_steps != 0)
and self.args.local_rank != -1
and self.args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif self.args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
if self.deepspeed:
# free up any memory that might be useful for eval
self.deepspeed = None
self.optimizer = None
self.lr_scheduler = None
self.model_wrapped = self.model
gc.collect() # force memory release
# to restore normal behavior outside of train replay the place_model_on_device logic w/o deepspeed
self.place_model_on_device = self.args.place_model_on_device
if self.is_model_parallel:
self.place_model_on_device = False
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or dist.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
if self.args.deepspeed and not self.args.do_train:
# no harm, but flagging to the user that deepspeed config is ignored for eval
# flagging only for when --do_train wasn't passed as only then it's redundant
logger.info("Detected the deepspeed argument but it will not be used for evaluation")
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, half it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
| [
"torch.distributed.get_world_size",
"torch.cat",
"torch.utils.data.sampler.RandomSampler",
"torch.utils.data.dataloader.DataLoader",
"torch.cuda.amp.autocast",
"torch.no_grad",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.data.sampler.SequentialSampler",
"torch.tensor",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.amp.GradScaler",
"torch.distributed.get_rank",
"torch.distributed.barrier",
"torch.distributed.get_local_rank",
"torch.nn.DataParallel"
] | 1.0 | rmroczkowski/transformers | c988db5af2a5f1ccfcb5ad19bd735b6a77516637 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.