version
stringclasses 21
values | code
stringlengths 225
174k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 10
107
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
0.4 | import sys
sys.path.append("..")
import torch
from torch import nn
import torch.nn.functional as F
import random
import argparse
try:
from torchqrnn import QRNN
except:
# to stop python 3.7.x breaking
QRNN = None
from models.EncoderDecoder import (
EncoderModel,
DecoderModel,
EncoderDecoderModel,
DecoderOutputType,
)
from models.components.attention import (
AttentionModule
)
from vocab import Vocabulary
from constants import (
UNKNOWN_TOKEN,
PAD_TOKEN,
)
class EncoderQRNN(EncoderModel):
def __init__(
self,
src_vocab: Vocabulary,
hidden_size: int,
num_layers: int,
dropout: float,
):
super(EncoderQRNN, self).__init__()
self.input_size = len(src_vocab)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = dropout
self.embedding = nn.Embedding(
len(src_vocab),
hidden_size,
)
self.lstm = QRNN(
input_size=hidden_size,
hidden_size=hidden_size,
num_layers=num_layers,
)
def forward(
self,
src_tokens: torch.Tensor,
src_lengths: torch.Tensor,
hidden: torch.Tensor = None,
) -> torch.Tensor:
embedded = self.embedding(src_tokens)
# print(embedded.shape)
#packed = nn.utils.rnn.pack_padded_sequence(embedded, src_lengths, batch_first=True)
#packed = packed.t()
embedded = embedded.transpose(0, 1)
outputs, hidden = self.lstm(embedded, hidden)
outputs = outputs.transpose(0, 1)
#outputs, outputs_length = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
# sum up bidirectional outputs to keep hidden size the same
#outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:]
# print('output: ', outputs.shape)
return outputs, hidden
class AttentionDecoderQRNN(DecoderModel):
def __init__(
self,
trg_vocab: Vocabulary,
hidden_size: int,
num_layers: int,
dropout: float,
teacher_student_ratio: float,
):
super(AttentionDecoderQRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = len(trg_vocab)
self.num_layers = num_layers
self.dropout = dropout
self.teacher_student_ratio = teacher_student_ratio
self.trg_vocab = trg_vocab
# layers
self.embedding = nn.Embedding(
len(trg_vocab),
hidden_size,
)
self.dropout = nn.Dropout(dropout)
self.attn = AttentionModule('general', hidden_size)
self.lstm = QRNN(
input_size=hidden_size * 2,
hidden_size=hidden_size,
num_layers=num_layers,
)
self.out = nn.Linear(
hidden_size,
len(trg_vocab),
)
def forward(
self,
prev_tokens: torch.Tensor,
encoder_out: tuple,
) -> torch.Tensor:
encoder_outputs, last_hidden = encoder_out
batch_size, seq_len = prev_tokens.shape
if random.random() <= self.teacher_student_ratio:
return self.teacher_forward(
last_hidden,
encoder_outputs,
prev_tokens,
)
else:
return self.student_forward(
last_hidden,
encoder_outputs,
seq_len,
)
def forward_eval(
self,
prev_tokens: torch.Tensor,
encoder_out: tuple,
intermediate: torch.Tensor,
) -> torch.Tensor:
encoder_outputs, last_hidden = encoder_out
return self.teacher_forward(
last_hidden if intermediate is None else intermediate,
encoder_outputs,
prev_tokens,
)
def teacher_forward(
self,
final_hidden: torch.Tensor,
encoder_outputs: torch.Tensor,
prev_tokens: torch.Tensor,
) -> torch.Tensor:
batch_size, seq_len = prev_tokens.shape
final_hidden = final_hidden[:self.num_layers]
final_encoder_hidden = final_hidden
# embedded_prev_tokens: (batch, seq_len, trg_vocab)
embedded_prev_tokens = self.embedding(prev_tokens)
embedded_prev_tokens = self.dropout(embedded_prev_tokens)
decoder_outputs = []
last_hidden = final_hidden
for i in range(seq_len):
attn_weights = self.attn(last_hidden[-1], encoder_outputs)
# encoder_outputs: (batch, seq_len, dim)
# attn_weights = (batch, seq_len)
context = attn_weights.transpose(1,2).bmm(encoder_outputs)
#print(encoder_outputs.shape)
#print(embedded_prev_tokens.shape, context.shape)
lstm_input = torch.cat((embedded_prev_tokens[:, i:i+1, :], context), dim=2)
lstm_input = lstm_input.transpose(0, 1)
output, last_hidden = self.lstm(lstm_input, last_hidden)
output = output.transpose(0, 1)
decoder_outputs.append(output)
decoder_outputs = torch.cat(decoder_outputs, dim=1)
out = self.out(decoder_outputs)
return out, last_hidden
def student_forward(
self,
final_hidden: torch.Tensor,
encoder_outputs: torch.Tensor,
seq_len: int,
) -> torch.Tensor:
batch_size = encoder_outputs.shape[0]
final_hidden = final_hidden[:self.num_layers]
device = final_hidden.device
prev_output = torch.zeros((batch_size, 1)).long().to(device)
prev_output[:, 0] = self.trg_vocab.stoi['<sos>']
final_encoder_hidden = final_hidden
decoder_outputs = []
last_hidden = final_hidden
for i in range(seq_len):
attn_weights = self.attn(last_hidden[-1], encoder_outputs)
# encoder_outputs: (batch, seq_len, dim)
# attn_weights = (batch, seq_len)
context = attn_weights.transpose(1,2).bmm(encoder_outputs)
embedded_prev_tokens = self.embedding(prev_output)
embedded_prev_tokens = self.dropout(embedded_prev_tokens)
lstm_input = torch.cat((embedded_prev_tokens, context), dim=2)
output, last_hidden = self.lstm(lstm_input, last_hidden)
output = self.out(output)
decoder_outputs.append(output)
topi = output.data.max(2)[1]
prev_output = topi
decoder_outputs = torch.cat(decoder_outputs, dim=1)
return decoder_outputs, last_hidden
def build_model(
src_vocab: Vocabulary,
trg_vocab: Vocabulary,
encoder_embed_dim: int,
encoder_hidden_dim: int,
encoder_dropout: float,
encoder_num_layers: int,
decoder_embed_dim: int,
decoder_hidden_dim: int,
decoder_dropout: float,
decoder_num_layers: int,
teacher_student_ratio: float,
) -> nn.Module:
encoder = EncoderQRNN(
src_vocab=src_vocab,
hidden_size=encoder_hidden_dim,
num_layers=encoder_num_layers,
dropout=encoder_dropout,
)
decoder = AttentionDecoderQRNN(
trg_vocab=trg_vocab,
hidden_size=decoder_hidden_dim,
num_layers=decoder_num_layers,
dropout=decoder_dropout,
teacher_student_ratio=teacher_student_ratio,
)
return EncoderDecoderModel(
encoder,
decoder,
src_vocab,
trg_vocab,
)
def add_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--encoder_embed_dim', type=int, default=512, help='Embedding dimension for the encoder')
parser.add_argument('--encoder_hidden_dim', type=int, default=512, help='The hidden (feature size) for the encoder')
parser.add_argument('--encoder_dropout', type=float, default=0.2, help='the encoder dropout to apply')
parser.add_argument('--decoder_embed_dim', type=int, default=512, help='the decoder embedding dimension')
parser.add_argument('--decoder_hidden_dim', type=int, default=512, help='the hidden (feature size) for the decoder')
parser.add_argument('--decoder_dropout', type=float, default=0.2, help='the decoder dropout')
parser.add_argument('--encoder_layers', type=int, default=4, help='the number of layers in the encoder')
parser.add_argument('--decoder_layers', type=int, default=4, help='the number of layers in the decoder')
parser.add_argument('--teacher_student_ratio', type=float, default=1.0, help='the ratio of teacher to student to use')
| [
"torch.zeros",
"torch.nn.Dropout",
"torch.cat"
] | 0.4.1 | AkshatSh/BinarizedNMT | 7fa15149fdfcad6b1fd0956157c3730f3dcd781f |
1.9 | import torch
import torch.backends.cudnn as cudnn
from collections import OrderedDict
from .modules.utils import yaml_loader, create_model_for_provider
from .modules.craft import CRAFT
def copy_state_dict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
def load_craft(config_file, model_pth):
cfg = yaml_loader(config_file)
net = CRAFT()
print("Loading weights from checkpoint (" + model_pth + ")")
if cfg["cuda"]:
net.load_state_dict(copy_state_dict(torch.load(model_pth)))
else:
net.load_state_dict(copy_state_dict(torch.load(model_pth, map_location="cpu")))
if cfg["cuda"]:
net = net.cuda()
net = torch.nn.DataParallel(net)
cudnn.benchmark = False
net.eval()
return cfg, net
def load_craft_onnx(config_file, model_pth):
cfg = yaml_loader(config_file)
device = "CUDAExecutionProvider" if torch.cuda.is_available() else "CPUExecutionProvider"
print("Loading weights from checkpoint (" + model_pth + ")")
net = create_model_for_provider(model_pth, device)
return cfg, net
| [
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.load"
] | 1.9.1 | jakartaresearch/receipt-ocr | 003e067eb7d80495226ad15235fa1d626a09103e |
1.9 | """
Copyright (c) 2019-present NAVER Corp.
MIT License
"""
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from ..basenet.vgg16_bn import init_weights
class RefineNet(nn.Module):
def __init__(self):
super(RefineNet, self).__init__()
self.last_conv = nn.Sequential(
nn.Conv2d(34, 64, kernel_size=3, padding=1), nn.BatchNorm2d(
64), nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(
64), nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(
64), nn.ReLU(inplace=True)
)
self.aspp1 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, dilation=6,
padding=6), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(
128), nn.ReLU(inplace=True),
nn.Conv2d(128, 1, kernel_size=1)
)
self.aspp2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, dilation=12,
padding=12), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(
128), nn.ReLU(inplace=True),
nn.Conv2d(128, 1, kernel_size=1)
)
self.aspp3 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, dilation=18,
padding=18), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(
128), nn.ReLU(inplace=True),
nn.Conv2d(128, 1, kernel_size=1)
)
self.aspp4 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, dilation=24,
padding=24), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(
128), nn.ReLU(inplace=True),
nn.Conv2d(128, 1, kernel_size=1)
)
init_weights(self.last_conv.modules())
init_weights(self.aspp1.modules())
init_weights(self.aspp2.modules())
init_weights(self.aspp3.modules())
init_weights(self.aspp4.modules())
def forward(self, y, upconv4):
refine = torch.cat([y.permute(0, 3, 1, 2), upconv4], dim=1)
refine = self.last_conv(refine)
aspp1 = self.aspp1(refine)
aspp2 = self.aspp2(refine)
aspp3 = self.aspp3(refine)
aspp4 = self.aspp4(refine)
# out = torch.add([aspp1, aspp2, aspp3, aspp4], dim=1)
out = aspp1 + aspp2 + aspp3 + aspp4
return out.permute(0, 2, 3, 1) # , refine.permute(0,2,3,1)
| [
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d"
] | 1.9.1 | jakartaresearch/receipt-ocr | 003e067eb7d80495226ad15235fa1d626a09103e |
1.2 | '''
Copyright 2019 The Microsoft DeepSpeed Team
'''
import time
import psutil
import torch
from deepspeed.pt.log_utils import logger
def print_rank_0(message):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
logger.info(message)
else:
logger.info(message)
class SynchronizedWallClockTimer:
"""Group of timers. Borrowed from Nvidia Megatron code"""
class Timer:
"""Timer."""
def __init__(self, name):
self.name_ = name
self.elapsed_ = 0.0
self.started_ = False
self.start_time = time.time()
def start(self):
"""Start the timer."""
assert not self.started_, 'timer has already been started'
torch.cuda.synchronize()
self.start_time = time.time()
self.started_ = True
def stop(self):
"""Stop the timer."""
assert self.started_, 'timer is not started'
torch.cuda.synchronize()
self.elapsed_ += (time.time() - self.start_time)
self.started_ = False
def reset(self):
"""Reset timer."""
self.elapsed_ = 0.0
self.started_ = False
def elapsed(self, reset=True):
"""Calculate the elapsed time."""
started_ = self.started_
# If the timing in progress, end it first.
if self.started_:
self.stop()
# Get the elapsed time.
elapsed_ = self.elapsed_
# Reset the elapsed time
if reset:
self.reset()
# If timing was in progress, set it back.
if started_:
self.start()
return elapsed_
def __init__(self):
self.timers = {}
def __call__(self, name):
if name not in self.timers:
self.timers[name] = self.Timer(name)
return self.timers[name]
@staticmethod
def memory_usage():
alloc = "mem_allocated: {:.4f} GB".format(torch.cuda.memory_allocated() /
(1024 * 1024 * 1024))
max_alloc = "max_mem_allocated: {:.4f} GB".format(
torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024))
cache = "cache_allocated: {:.4f} GB".format(torch.cuda.memory_cached() /
(1024 * 1024 * 1024))
max_cache = "max_cache_allocated: {:.4f} GB".format(
torch.cuda.max_memory_cached() / (1024 * 1024 * 1024))
return " | {} | {} | {} | {}".format(alloc, max_alloc, cache, max_cache)
def log(self, names, normalizer=1.0, reset=True, memory_breakdown=False):
"""Log a group of timers."""
assert normalizer > 0.0
string = 'time (ms)'
for name in names:
elapsed_time = self.timers[name].elapsed(reset=reset) * 1000.0 / normalizer
string += ' | {}: {:.2f}'.format(name, elapsed_time)
if memory_breakdown:
string += self.memory_usage()
print_rank_0(string)
class ThroughputTimer():
def __init__(self,
batch_size,
num_workers,
start_step=2,
steps_per_output=50,
monitor_memory=True,
logging_fn=None):
self.start_time = 0
self.end_time = 0
self.started = False
self.batch_size = batch_size
if batch_size is None:
self.batch_size = 1
self.num_workers = num_workers
self.start_step = start_step
self.epoch_count = 0
self.local_step_count = 0
self.total_step_count = 0
self.total_elapsed_time = 0
self.steps_per_output = steps_per_output
self.monitor_memory = monitor_memory
self.logging = logging_fn
if self.logging is None:
self.logging = logger.info
self.initialized = False
def update_epoch_count(self):
self.epoch_count += 1
self.local_step_count = 0
def _init_timer(self):
self.initialized = True
def start(self):
self._init_timer()
self.started = True
if self.total_step_count >= self.start_step:
torch.cuda.synchronize()
self.start_time = time.time()
def stop(self, report_speed=True):
if not self.started:
return
self.started = False
self.total_step_count += 1
self.local_step_count += 1
if self.total_step_count > self.start_step:
torch.cuda.synchronize()
self.end_time = time.time()
duration = self.end_time - self.start_time
self.total_elapsed_time += duration
if self.local_step_count % self.steps_per_output == 0:
if report_speed:
self.logging("{}/{}, SamplesPerSec={}".format(
self.epoch_count,
self.local_step_count,
self.avg_samples_per_sec()))
if self.monitor_memory:
virt_mem = psutil.virtual_memory()
swap = psutil.swap_memory()
self.logging("{}/{}, vm percent: {}, swap percent: {}".format(
self.epoch_count,
self.local_step_count,
virt_mem.percent,
swap.percent))
def avg_samples_per_sec(self):
if self.total_step_count > 0:
samples_per_step = self.batch_size * self.num_workers
total_step_offset = self.total_step_count - self.start_step
avg_time_per_step = self.total_elapsed_time / total_step_offset
# training samples per second
return samples_per_step / avg_time_per_step
return float("-inf")
| [
"torch.cuda.max_memory_cached",
"torch.cuda.synchronize",
"torch.cuda.max_memory_allocated",
"torch.cuda.memory_allocated",
"torch.cuda.memory_cached",
"torch.distributed.is_initialized",
"torch.distributed.get_rank"
] | 1.2 | MannyKayy/DeepSpeed | 67821f95e4ee04f65965eac4ecc1ffacab4302e6 |
1.4 | import os
import cv2
import time
import argparse
import torch
import warnings
import numpy as np
from detector import build_detector
from deep_sort import build_tracker
from utils.draw import draw_boxes
from utils.parser import get_config
from utils.log import get_logger
from utils.io import write_results
class VideoTracker(object):
def __init__(self, cfg, args, video_path):
self.cfg = cfg
self.args = args
self.video_path = video_path
self.logger = get_logger("root")
self.video_name = video_path.split("/")[-1].split(".")[0]
use_cuda = args.use_cuda and torch.cuda.is_available()
if not use_cuda:
warnings.warn("Running in cpu mode which maybe very slow!", UserWarning)
if args.display:
cv2.namedWindow("test", cv2.WINDOW_NORMAL)
cv2.resizeWindow("test", args.display_width, args.display_height)
if args.cam != -1:
print("Using webcam " + str(args.cam))
self.vdo = cv2.VideoCapture(args.cam)
else:
self.vdo = cv2.VideoCapture()
self.detector = build_detector(cfg, use_cuda=use_cuda)
self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
self.class_names = self.detector.class_names
print("Class name: ", self.class_names)
def __enter__(self):
if self.args.cam != -1:
ret, frame = self.vdo.read()
assert ret, "Error: Camera error"
self.im_width = frame.shape[0]
self.im_height = frame.shape[1]
else:
assert os.path.isfile(self.video_path), "Path error"
self.vdo.open(self.video_path)
self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
assert self.vdo.isOpened()
if self.args.save_path:
os.makedirs(self.args.save_path, exist_ok=True)
# path of saved video and results
self.save_video_path = os.path.join(self.args.save_path, self.video_name + "_results.avi")
self.save_results_path = os.path.join(self.args.save_path, self.video_name + "_results.txt")
# create video writer
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
self.writer = cv2.VideoWriter(self.save_video_path, fourcc, 20, (self.im_width, self.im_height))
# logging
self.logger.info("Save results to {}".format(self.args.save_path))
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type:
print(exc_type, exc_value, exc_traceback)
def run(self):
results = []
idx_frame = 0
while self.vdo.grab():
idx_frame += 1
if idx_frame % self.args.frame_interval:
continue
start = time.time()
_, ori_im = self.vdo.retrieve()
im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)
# do detection
bbox_xywh, cls_conf, cls_ids = self.detector(im)
# select person class
mask = cls_ids < 7
bbox_xywh = bbox_xywh[mask]
# bbox dilation just in case bbox too small, delete this line if using a better pedestrian detector
bbox_xywh[:, 3:] *= 1.2
cls_conf = cls_conf[mask]
# do tracking
outputs = self.deepsort.update(bbox_xywh, cls_conf, im)
# draw boxes for visualization
if len(outputs) > 0:
bbox_tlwh = []
bbox_xyxy = outputs[:, :4]
identities = outputs[:, -1]
ori_im = draw_boxes(ori_im, bbox_xyxy, identities)
for bb_xyxy in bbox_xyxy:
bbox_tlwh.append(self.deepsort._xyxy_to_tlwh(bb_xyxy))
results.append((idx_frame - 1, bbox_tlwh, identities))
end = time.time()
if self.args.display:
cv2.imshow("test", ori_im)
cv2.waitKey(1)
if self.args.save_path:
self.writer.write(ori_im)
# save results
write_results(self.save_results_path, results, 'mot')
# logging
self.logger.info("time: {:.03f}s, fps: {:.03f}, detection numbers: {}, tracking numbers: {}" \
.format(end - start, 1 / (end - start), bbox_xywh.shape[0], len(outputs)))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("VIDEO_PATH", type=str)
parser.add_argument("--config_detection", type=str, default="./configs/yolov3.yaml")
parser.add_argument("--config_deepsort", type=str, default="./configs/deep_sort.yaml")
# parser.add_argument("--ignore_display", dest="display", action="store_false", default=True)
parser.add_argument("--display", action="store_true")
parser.add_argument("--frame_interval", type=int, default=1)
parser.add_argument("--display_width", type=int, default=800)
parser.add_argument("--display_height", type=int, default=600)
parser.add_argument("--save_path", type=str, default="./output/")
parser.add_argument("--cpu", dest="use_cuda", action="store_false", default=True)
parser.add_argument("--camera", action="store", dest="cam", type=int, default="-1")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
cfg = get_config()
cfg.merge_from_file(args.config_detection)
cfg.merge_from_file(args.config_deepsort)
with VideoTracker(cfg, args, video_path=args.VIDEO_PATH) as vdo_trk:
vdo_trk.run()
| [
"torch.cuda.is_available"
] | 1.4.0 | LeDuySon/Vehicle-tracking-deepsort | ab03375d11d83def0452260d7071e9c1cc7406c2 |
1.4 | import os
import time
import math
import torch
import numpy as np
from PIL import Image, ImageDraw
import struct # get_image_size
import imghdr # get_image_size
def sigmoid(x):
return 1.0 / (math.exp(-x) + 1.)
def softmax(x):
x = torch.exp(x - torch.max(x))
x /= x.sum()
return x
def bbox_iou(box1, box2, x1y1x2y2=True):
if x1y1x2y2:
x1_min = min(box1[0], box2[0])
x2_max = max(box1[2], box2[2])
y1_min = min(box1[1], box2[1])
y2_max = max(box1[3], box2[3])
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
else:
w1, h1 = box1[2], box1[3]
w2, h2 = box2[2], box2[3]
x1_min = min(box1[0] - w1 / 2.0, box2[0] - w2 / 2.0)
x2_max = max(box1[0] + w1 / 2.0, box2[0] + w2 / 2.0)
y1_min = min(box1[1] - h1 / 2.0, box2[1] - h2 / 2.0)
y2_max = max(box1[1] + h1 / 2.0, box2[1] + h2 / 2.0)
w_union = x2_max - x1_min
h_union = y2_max - y1_min
w_cross = w1 + w2 - w_union
h_cross = h1 + h2 - h_union
carea = 0
if w_cross <= 0 or h_cross <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = w_cross * h_cross
uarea = area1 + area2 - carea
return float(carea / uarea)
def multi_bbox_ious(boxes1, boxes2, x1y1x2y2=True):
if x1y1x2y2:
x1_min = torch.min(boxes1[0], boxes2[0])
x2_max = torch.max(boxes1[2], boxes2[2])
y1_min = torch.min(boxes1[1], boxes2[1])
y2_max = torch.max(boxes1[3], boxes2[3])
w1, h1 = boxes1[2] - boxes1[0], boxes1[3] - boxes1[1]
w2, h2 = boxes2[2] - boxes2[0], boxes2[3] - boxes2[1]
else:
w1, h1 = boxes1[2], boxes1[3]
w2, h2 = boxes2[2], boxes2[3]
x1_min = torch.min(boxes1[0] - w1 / 2.0, boxes2[0] - w2 / 2.0)
x2_max = torch.max(boxes1[0] + w1 / 2.0, boxes2[0] + w2 / 2.0)
y1_min = torch.min(boxes1[1] - h1 / 2.0, boxes2[1] - h2 / 2.0)
y2_max = torch.max(boxes1[1] + h1 / 2.0, boxes2[1] + h2 / 2.0)
w_union = x2_max - x1_min
h_union = y2_max - y1_min
w_cross = w1 + w2 - w_union
h_cross = h1 + h2 - h_union
mask = (((w_cross <= 0) + (h_cross <= 0)) > 0)
area1 = w1 * h1
area2 = w2 * h2
carea = w_cross * h_cross
carea[mask] = 0
uarea = area1 + area2 - carea
return carea / uarea
from nms import boxes_nms
def post_process(boxes, num_classes, conf_thresh=0.01, nms_thresh=0.45, obj_thresh=0.3):
batch_size = boxes.size(0)
# nms
results_boxes = []
for batch_id in range(batch_size):
processed_boxes = []
for cls_id in range(num_classes):
mask = (boxes[batch_id, :, -1] == cls_id) * (boxes[batch_id, :, 4] > obj_thresh)
masked_boxes = boxes[batch_id, mask]
keep = boxes_nms(masked_boxes[:, :4], masked_boxes[:, 5], nms_thresh)
nmsed_boxes = masked_boxes[keep, :]
processed_boxes.append(nmsed_boxes)
processed_boxes = torch.cat(processed_boxes, dim=0)
results_boxes.append(processed_boxes)
return results_boxes
def xywh_to_xyxy(boxes_xywh):
boxes_xyxy = boxes_xywh.copy()
boxes_xyxy[:, 0] = boxes_xywh[:, 0] - boxes_xywh[:, 2] / 2.
boxes_xyxy[:, 0] = boxes_xywh[:, 0] - boxes_xywh[:, 2] / 2.
boxes_xyxy[:, 0] = boxes_xywh[:, 0] - boxes_xywh[:, 2] / 2.
boxes_xyxy[:, 0] = boxes_xywh[:, 0] - boxes_xywh[:, 2] / 2.
return boxes_xyxy
def xyxy_to_xywh(boxes_xyxy):
if isinstance(boxes_xyxy, torch.Tensor):
boxes_xywh = boxes_xyxy.clone()
elif isinstance(boxes_xyxy, np.ndarray):
boxes_xywh = boxes_xyxy.copy()
boxes_xywh[:, 0] = (boxes_xyxy[:, 0] + boxes_xyxy[:, 2]) / 2.
boxes_xywh[:, 1] = (boxes_xyxy[:, 1] + boxes_xyxy[:, 3]) / 2.
boxes_xywh[:, 2] = boxes_xyxy[:, 2] - boxes_xyxy[:, 0]
boxes_xywh[:, 3] = boxes_xyxy[:, 3] - boxes_xyxy[:, 1]
return boxes_xywh
def nms(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
for i in range(len(boxes)):
det_confs[i] = boxes[i][4]
_, sortIds = torch.sort(det_confs, descending=True)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[4] > 0:
out_boxes.append(box_i)
for j in range(i + 1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
# print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[4] = 0
return out_boxes
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def convert2cpu_long(gpu_matrix):
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)
def get_all_boxes(output, conf_thresh, num_classes, only_objectness=1, validation=False, use_cuda=True):
# total number of inputs (batch size)
# first element (x) for first tuple (x, anchor_mask, num_anchor)
batchsize = output[0]['x'].data.size(0)
all_boxes = []
for i in range(len(output)):
pred, anchors, num_anchors = output[i]['x'].data, output[i]['a'], output[i]['n'].item()
boxes = get_region_boxes(pred, conf_thresh, num_classes, anchors, num_anchors, \
only_objectness=only_objectness, validation=validation, use_cuda=use_cuda)
all_boxes.append(boxes)
return torch.cat(all_boxes, dim=1)
def get_region_boxes(output, obj_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False,
use_cuda=True):
device = torch.device("cuda" if use_cuda else "cpu")
anchors = anchors.to(device)
anchor_step = anchors.size(0) // num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
assert (output.size(1) == (5 + num_classes) * num_anchors)
h = output.size(2)
w = output.size(3)
cls_anchor_dim = batch * num_anchors * h * w
# all_boxes = []
output = output.view(batch * num_anchors, 5 + num_classes, h * w).transpose(0, 1).contiguous().view(5 + num_classes,
cls_anchor_dim)
grid_x = torch.linspace(0, w - 1, w).repeat(batch * num_anchors, h, 1).view(cls_anchor_dim).to(device)
grid_y = torch.linspace(0, h - 1, h).repeat(w, 1).t().repeat(batch * num_anchors, 1, 1).view(cls_anchor_dim).to(
device)
ix = torch.LongTensor(range(0, 2)).to(device)
anchor_w = anchors.view(num_anchors, anchor_step).index_select(1, ix[0]).repeat(1, batch, h * w).view(
cls_anchor_dim)
anchor_h = anchors.view(num_anchors, anchor_step).index_select(1, ix[1]).repeat(1, batch, h * w).view(
cls_anchor_dim)
xs, ys = torch.sigmoid(output[0]) + grid_x, torch.sigmoid(output[1]) + grid_y
ws, hs = torch.exp(output[2]) * anchor_w.detach(), torch.exp(output[3]) * anchor_h.detach()
det_confs = torch.sigmoid(output[4])
# by ysyun, dim=1 means input is 2D or even dimension else dim=0
cls_confs = torch.nn.Softmax(dim=1)(output[5:5 + num_classes].transpose(0, 1)).detach()
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1).float()
# sz_hw = h*w
# sz_hwa = sz_hw*num_anchors
# det_confs = convert2cpu(det_confs)
# cls_max_confs = convert2cpu(cls_max_confs)
# cls_max_ids = convert2cpu_long(cls_max_ids)
# xs, ys = convert2cpu(xs), convert2cpu(ys)
# ws, hs = convert2cpu(ws), convert2cpu(hs)
cls_confs = det_confs * cls_max_confs
# boxes = [xs/w, ys/h, ws/w, hs/h, det_confs, cls_confs, cls_max_ids]
xs, ys, ws, hs = xs / w, ys / h, ws / w, hs / h
x1, y1, x2, y2 = torch.clamp_min(xs - ws / 2., 0.), torch.clamp_min(ys - hs / 2., 0.), torch.clamp_max(xs + ws / 2.,
1.), torch.clamp_max(
ys + hs / 2., 1.)
boxes = [x1, y1, x2, y2, det_confs, cls_confs, cls_max_ids]
boxes = list(map(lambda x: x.view(batch, -1), boxes))
boxes = torch.stack(boxes, dim=2)
# for b in range(batch):
# boxes = []
# for cy in range(h):
# for cx in range(w):
# for i in range(num_anchors):
# ind = b*sz_hwa + i*sz_hw + cy*w + cx
# det_conf = det_confs[ind]
# if only_objectness:
# conf = det_confs[ind]
# else:
# conf = det_confs[ind] * cls_max_confs[ind]
# if conf > conf_thresh:
# bcx = xs[ind]
# bcy = ys[ind]
# bw = ws[ind]
# bh = hs[ind]
# cls_max_conf = cls_max_confs[ind]
# cls_max_id = cls_max_ids[ind]
# box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id]
# boxes.append(box)
# all_boxes.append(boxes)
return boxes
# def get_all_boxes(output, conf_thresh, num_classes, only_objectness=1, validation=False, use_cuda=True):
# # total number of inputs (batch size)
# # first element (x) for first tuple (x, anchor_mask, num_anchor)
# tot = output[0]['x'].data.size(0)
# all_boxes = [[] for i in range(tot)]
# for i in range(len(output)):
# pred, anchors, num_anchors = output[i]['x'].data, output[i]['a'], output[i]['n'].item()
# b = get_region_boxes(pred, conf_thresh, num_classes, anchors, num_anchors, \
# only_objectness=only_objectness, validation=validation, use_cuda=use_cuda)
# for t in range(tot):
# all_boxes[t] += b[t]
# return all_boxes
# def get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False, use_cuda=True):
# device = torch.device("cuda" if use_cuda else "cpu")
# anchors = anchors.to(device)
# anchor_step = anchors.size(0)//num_anchors
# if output.dim() == 3:
# output = output.unsqueeze(0)
# batch = output.size(0)
# assert(output.size(1) == (5+num_classes)*num_anchors)
# h = output.size(2)
# w = output.size(3)
# cls_anchor_dim = batch*num_anchors*h*w
# t0 = time.time()
# all_boxes = []
# output = output.view(batch*num_anchors, 5+num_classes, h*w).transpose(0,1).contiguous().view(5+num_classes, cls_anchor_dim)
# grid_x = torch.linspace(0, w-1, w).repeat(batch*num_anchors, h, 1).view(cls_anchor_dim).to(device)
# grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(cls_anchor_dim).to(device)
# ix = torch.LongTensor(range(0,2)).to(device)
# anchor_w = anchors.view(num_anchors, anchor_step).index_select(1, ix[0]).repeat(1, batch, h*w).view(cls_anchor_dim)
# anchor_h = anchors.view(num_anchors, anchor_step).index_select(1, ix[1]).repeat(1, batch, h*w).view(cls_anchor_dim)
# xs, ys = torch.sigmoid(output[0]) + grid_x, torch.sigmoid(output[1]) + grid_y
# ws, hs = torch.exp(output[2]) * anchor_w.detach(), torch.exp(output[3]) * anchor_h.detach()
# det_confs = torch.sigmoid(output[4])
# # by ysyun, dim=1 means input is 2D or even dimension else dim=0
# cls_confs = torch.nn.Softmax(dim=1)(output[5:5+num_classes].transpose(0,1)).detach()
# cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
# cls_max_confs = cls_max_confs.view(-1)
# cls_max_ids = cls_max_ids.view(-1)
# t1 = time.time()
# sz_hw = h*w
# sz_hwa = sz_hw*num_anchors
# det_confs = convert2cpu(det_confs)
# cls_max_confs = convert2cpu(cls_max_confs)
# cls_max_ids = convert2cpu_long(cls_max_ids)
# xs, ys = convert2cpu(xs), convert2cpu(ys)
# ws, hs = convert2cpu(ws), convert2cpu(hs)
# if validation:
# cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
# t2 = time.time()
# for b in range(batch):
# boxes = []
# for cy in range(h):
# for cx in range(w):
# for i in range(num_anchors):
# ind = b*sz_hwa + i*sz_hw + cy*w + cx
# det_conf = det_confs[ind]
# if only_objectness:
# conf = det_confs[ind]
# else:
# conf = det_confs[ind] * cls_max_confs[ind]
# if conf > conf_thresh:
# bcx = xs[ind]
# bcy = ys[ind]
# bw = ws[ind]
# bh = hs[ind]
# cls_max_conf = cls_max_confs[ind]
# cls_max_id = cls_max_ids[ind]
# box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id]
# if (not only_objectness) and validation:
# for c in range(num_classes):
# tmp_conf = cls_confs[ind][c]
# if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
# box.append(tmp_conf)
# box.append(c)
# boxes.append(box)
# all_boxes.append(boxes)
# t3 = time.time()
# if False:
# print('---------------------------------')
# print('matrix computation : %f' % (t1-t0))
# print(' gpu to cpu : %f' % (t2-t1))
# print(' boxes filter : %f' % (t3-t2))
# print('---------------------------------')
# return all_boxes
def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):
import cv2
colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]])
def get_color(c, x, max_val):
ratio = float(x) / max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio -= i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return int(r * 255)
width = img.shape[1]
height = img.shape[0]
for i in range(len(boxes)):
box = boxes[i]
x1 = int(round((box[0] - box[2] / 2.0) * width))
y1 = int(round((box[1] - box[3] / 2.0) * height))
x2 = int(round((box[0] + box[2] / 2.0) * width))
y2 = int(round((box[1] + box[3] / 2.0) * height))
if color:
rgb = color
else:
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
# print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
if color is None:
rgb = (red, green, blue)
img = cv2.putText(img, class_names[cls_id], (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)
img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, 1)
if savename:
print("save plot results to %s" % savename)
cv2.imwrite(savename, img)
return img
def plot_boxes(img, boxes, savename=None, class_names=None):
colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]])
def get_color(c, x, max_val):
ratio = float(x) / max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio -= i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return int(r * 255)
width = img.width
height = img.height
draw = ImageDraw.Draw(img)
print("%d box(es) is(are) found" % len(boxes))
for i in range(len(boxes)):
box = boxes[i]
x1 = (box[0] - box[2] / 2.0) * width
y1 = (box[1] - box[3] / 2.0) * height
x2 = (box[0] + box[2] / 2.0) * width
y2 = (box[1] + box[3] / 2.0) * height
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
rgb = (red, green, blue)
draw.text((x1, y1), class_names[cls_id], fill=rgb)
draw.rectangle([x1, y1, x2, y2], outline=rgb)
if savename:
print("save plot results to %s" % savename)
img.save(savename)
return img
def read_truths(lab_path):
if not os.path.exists(lab_path):
return np.array([])
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
truths = truths.reshape(truths.size // 5, 5) # to avoid single truth problem
return truths
else:
return np.array([])
def read_truths_args(lab_path, min_box_scale):
truths = read_truths(lab_path)
new_truths = []
for i in range(truths.shape[0]):
if truths[i][3] < min_box_scale:
continue
new_truths.append([truths[i][0], truths[i][1], truths[i][2], truths[i][3], truths[i][4]])
return np.array(new_truths)
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r', encoding='utf8') as fp:
lines = fp.readlines()
for line in lines:
class_names.append(line.strip())
return class_names
def image2torch(img):
if isinstance(img, Image.Image):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0, 1).transpose(0, 2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
elif type(img) == np.ndarray: # cv2 image
img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0)
else:
print("unknown image type")
exit(-1)
return img
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=True):
model.eval()
t0 = time.time()
img = image2torch(img)
t1 = time.time()
img = img.to(torch.device("cuda" if use_cuda else "cpu"))
t2 = time.time()
out_boxes = model(img)
boxes = get_all_boxes(out_boxes, conf_thresh, model.num_classes, use_cuda=use_cuda)[0]
t3 = time.time()
boxes = nms(boxes, nms_thresh)
t4 = time.time()
if False:
print('-----------------------------------')
print(' image to tensor : %f' % (t1 - t0))
print(' tensor to cuda : %f' % (t2 - t1))
print(' predict : %f' % (t3 - t2))
print(' nms : %f' % (t4 - t3))
print(' total : %f' % (t4 - t0))
print('-----------------------------------')
return boxes
def read_data_cfg(datacfg):
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(datacfg) as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
key, value = line.split('=')
key = key.strip()
value = value.strip()
options[key] = value
return options
def scale_bboxes(bboxes, width, height):
import copy
dets = copy.deepcopy(bboxes)
for i in range(len(dets)):
dets[i][0] = dets[i][0] * width
dets[i][1] = dets[i][1] * height
dets[i][2] = dets[i][2] * width
dets[i][3] = dets[i][3] * height
return dets
def file_lines(thefilepath):
count = 0
thefile = open(thefilepath, 'rb')
while True:
buffer = thefile.read(8192 * 1024)
if not buffer:
break
count += buffer.count(b'\n')
thefile.close()
return count
def get_image_size(fname):
"""
Determine the image type of fhandle and return its size.
from draco
"""
with open(fname, 'rb') as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', head[16:24])
elif imghdr.what(fname) == 'gif':
width, height = struct.unpack('<HH', head[6:10])
elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg':
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except Exception: # IGNORE:W0703
return
else:
return
return width, height
def logging(message):
print('%s %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), message))
| [
"torch.device",
"torch.cat",
"torch.sigmoid",
"torch.stack",
"torch.min",
"torch.max",
"torch.clamp_max",
"torch.nn.Softmax",
"torch.FloatTensor",
"torch.linspace",
"torch.exp",
"torch.sort",
"torch.clamp_min"
] | 1.4.0 | LeDuySon/Vehicle-tracking-deepsort | ab03375d11d83def0452260d7071e9c1cc7406c2 |
0.4 | import ai.causalcell.utils.configuration as configuration
import ai.causalcell.datasets.synthetic_dataset as sd
import logging
import numpy as np
import torch
import random
import os
import copy
import dill as pickle
import skopt
from collections import OrderedDict
# from ai.causalcell.datasets.synthetic_dataset import global_graph
_LOG = logging.getLogger(__name__)
def set_seed(seed, cuda=False):
"""
Fix the seed for numpy, python random, and pytorch.
"""
print('pytorch/random seed: {}'.format(seed))
# Numpy, python, pytorch (cpu), pytorch (gpu).
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if cuda:
torch.cuda.manual_seed_all(seed)
def save_results(results, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Save best model
output_name = "best_model_{}.pth.tar".format(results["exp_id"])
torch.save(results["best_model"].state_dict(), os.path.join(output_dir, output_name))
# Save last model
output_name = "last_model_{}.pth.tar".format(results["exp_id"])
torch.save(results["last_model"].state_dict(), os.path.join(output_dir, output_name))
# Save the rest of the results dictionary
del results["best_model"]
del results["last_model"]
output_name = "results_{}.pkl".format(results["exp_id"])
with open(os.path.join(output_dir, output_name), 'wb') as f:
pickle.dump(results, f, protocol=pickle.HIGHEST_PROTOCOL)
def train_epoch(model, device, train_loader, epoch):
model.train()
all_loss, all_losses = [], None
for batch_idx, data in enumerate(train_loader):
x, fingerprint, compound, line = data
x = x.to(device)
fingerprint = fingerprint.to(device)
# Expected to return a dictionary of outputs.
loss, losses = model.forward_backward_update(x, fingerprint, compound, line, device=device)
if all_losses is None:
all_losses = {i: [losses[i].detach().cpu().item()] for i in losses.keys()}
else:
for i in losses.keys():
all_losses[i].append(losses[i].detach().cpu().item())
all_loss.append(loss.detach())
all_loss = float(torch.mean(torch.tensor(all_loss)).detach().numpy())
print('epoch {} Mean train loss: {:.4f}'.format(epoch, all_loss))
return all_loss, all_losses
def evaluate_epoch(model, device, data_loader, epoch):
"""Evaluates a given model on given data."""
model.eval()
all_loss, all_losses = [], None
with torch.no_grad():
for batch_idx, data in enumerate(data_loader):
x, fingerprint, compound, line = data
x = x.to(device)
fingerprint = fingerprint.to(device)
# Expected to return a dictionary of outputs.
loss, losses = model.forward_loss(x, fingerprint, compound, line, device=device)
if all_losses is None:
all_losses = {i: [losses[i].detach().cpu().item()] for i in losses.keys()}
else:
for i in losses.keys():
all_losses[i].append(losses[i].detach().cpu().item())
# Sum up batch loss.
loss = sum(losses.values())
all_loss.append(loss)
all_loss = float(torch.mean(torch.tensor(all_loss)).detach().numpy())
print('epoch {} Mean valid loss: {:.4f}'.format(epoch, all_loss))
return all_loss, all_losses
def train(cfg):
"""
Trains a model on a dataset given the supplied configuration.
save is by default True and will result in the model's performance being
saved to a handy pickle file, as well as the best-performing model being
saved. Set this to False when doing an outer loop of hyperparameter
optimization.
"""
exp_name = cfg['experiment_name']
exp_id = cfg['exp_id']
n_epochs = cfg['n_epochs']
seed = cfg['seed']
output_dir = os.path.join('results', cfg['experiment_name'])
early_stopping = cfg['early_stopping']
patience_max = cfg['patience_max']
patience = 0
set_seed(seed)
# dataloader
valid_loader = configuration.setup_dataloader(cfg, 'valid')
train_loader = configuration.setup_dataloader(cfg, 'train')
device = 'cuda' if cfg['cuda'] else 'cpu'
model = configuration.setup_model(cfg).to(device)
print('model: \n{}'.format(model))
best_valid_loss = np.inf
best_model, best_epoch = None, None
all_train_losses, all_valid_losses = [], []
for epoch in range(n_epochs):
train_loss, train_losses = train_epoch(model=model, device=device, train_loader=train_loader,
epoch=epoch)
valid_loss, valid_losses = evaluate_epoch(model=model, device=device, data_loader=valid_loader, epoch=epoch)
all_train_losses.append(train_losses)
all_valid_losses.append(valid_losses)
if valid_loss < best_valid_loss:
best_model = copy.deepcopy(model)
best_epoch = epoch
best_valid_loss = valid_loss
else:
patience += 1
if early_stopping and patience > patience_max:
break
results = {"exp_name": exp_name,
"config": cfg,
"data_graph": sd.global_graph,
"seed": seed,
"exp_id": exp_id,
"n_envs_in_split": {"train": train_loader.batch_sampler.n_envs_in_split,
"valid": valid_loader.batch_sampler.n_envs_in_split},
"n_samples_in_split": {"train": train_loader.batch_sampler.n_samples,
"valid": valid_loader.batch_sampler.n_samples},
"losses": {"train": all_train_losses, "valid": all_valid_losses},
"best_epoch": best_epoch,
"best_model": best_model.to('cpu'),
"last_model": model.to('cpu')}
save_results(results, output_dir)
| [
"torch.cuda.manual_seed_all",
"torch.no_grad",
"torch.manual_seed",
"torch.tensor"
] | 0.4.1 | Bertinus/causal_cell_embedding | 417b55749130fc7b7832fd3ee4c49feff4a04593 |
1.1 | import os
import pytest
import torch
import tests.base.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core import memory
from pytorch_lightning.trainer.distrib_parts import (
parse_gpu_ids,
determine_root_gpu_device,
)
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import LightningTestModel
PRETEND_N_OF_GPUS = 16
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_model_ddp2(tmpdir):
"""Make sure DDP2 works."""
tutils.reset_seed()
tutils.set_random_master_port()
model, hparams = tutils.get_default_model()
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=True,
max_epochs=1,
train_percent_check=0.4,
val_percent_check=0.2,
gpus=2,
weights_summary=None,
distributed_backend='ddp2'
)
tutils.run_model_test(trainer_options, model)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_model_ddp(tmpdir):
"""Make sure DDP works."""
tutils.reset_seed()
tutils.set_random_master_port()
model, hparams = tutils.get_default_model()
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=False,
max_epochs=1,
train_percent_check=0.4,
val_percent_check=0.2,
gpus=[0, 1],
distributed_backend='ddp'
)
tutils.run_model_test(trainer_options, model)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_ddp_all_dataloaders_passed_to_fit(tmpdir):
"""Make sure DDP works with dataloaders passed to fit()"""
tutils.reset_seed()
tutils.set_random_master_port()
model, hparams = tutils.get_default_model()
trainer_options = dict(default_save_path=tmpdir,
show_progress_bar=False,
max_epochs=1,
train_percent_check=0.4,
val_percent_check=0.2,
gpus=[0, 1],
distributed_backend='ddp')
fit_options = dict(train_dataloader=model.train_dataloader(),
val_dataloaders=model.val_dataloader())
trainer = Trainer(**trainer_options)
result = trainer.fit(model, **fit_options)
assert result == 1, "DDP doesn't work with dataloaders passed to fit()."
def test_optimizer_return_options():
tutils.reset_seed()
trainer = Trainer()
model, hparams = tutils.get_default_model()
# single optimizer
opt_a = torch.optim.Adam(model.parameters(), lr=0.002)
opt_b = torch.optim.SGD(model.parameters(), lr=0.002)
scheduler_a = torch.optim.lr_scheduler.StepLR(opt_a, 10)
scheduler_b = torch.optim.lr_scheduler.StepLR(opt_b, 10)
# single optimizer
optim, lr_sched, freq = trainer.init_optimizers(opt_a)
assert len(optim) == 1 and len(lr_sched) == 0 and len(freq) == 0
# opt tuple
opts = (opt_a, opt_b)
optim, lr_sched, freq = trainer.init_optimizers(opts)
assert len(optim) == 2 and optim[0] == opts[0] and optim[1] == opts[1]
assert len(lr_sched) == 0 and len(freq) == 0
# opt list
opts = [opt_a, opt_b]
optim, lr_sched, freq = trainer.init_optimizers(opts)
assert len(optim) == 2 and optim[0] == opts[0] and optim[1] == opts[1]
assert len(lr_sched) == 0 and len(freq) == 0
# opt tuple of 2 lists
opts = ([opt_a], [scheduler_a])
optim, lr_sched, freq = trainer.init_optimizers(opts)
assert len(optim) == 1 and len(lr_sched) == 1 and len(freq) == 0
assert optim[0] == opts[0][0]
assert lr_sched[0] == dict(scheduler=scheduler_a, interval='epoch',
frequency=1, reduce_on_plateau=False, monitor='val_loss')
# opt single dictionary
opts = {"optimizer": opt_a, "lr_scheduler": scheduler_a}
optim, lr_sched, freq = trainer.init_optimizers(opts)
assert len(optim) == 1 and len(lr_sched) == 1 and len(freq) == 0
assert optim[0] == opt_a
assert lr_sched[0] == dict(scheduler=scheduler_a, interval='epoch',
frequency=1, reduce_on_plateau=False, monitor='val_loss')
# opt multiple dictionaries with frequencies
opts = (
{"optimizer": opt_a, "lr_scheduler": scheduler_a, "frequency": 1},
{"optimizer": opt_b, "lr_scheduler": scheduler_b, "frequency": 5},
)
optim, lr_sched, freq = trainer.init_optimizers(opts)
assert len(optim) == 2 and len(lr_sched) == 2 and len(freq) == 2
assert optim[0] == opt_a
assert lr_sched[0] == dict(scheduler=scheduler_a, interval='epoch',
frequency=1, reduce_on_plateau=False, monitor='val_loss')
assert freq == [1, 5]
def test_cpu_slurm_save_load(tmpdir):
"""Verify model save/load/checkpoint on CPU."""
tutils.reset_seed()
hparams = tutils.get_default_hparams()
model = LightningTestModel(hparams)
# logger file to get meta
logger = tutils.get_default_testtube_logger(tmpdir, False)
version = logger.version
trainer_options = dict(
max_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(tmpdir)
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
real_global_step = trainer.global_step
# traning complete
assert result == 1, 'amp + ddp model failed to complete'
# predict with trained model before saving
# make a prediction
dataloaders = model.test_dataloader()
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
for dataloader in dataloaders:
for batch in dataloader:
break
x, y = batch
x = x.view(x.size(0), -1)
model.eval()
pred_before_saving = model(x)
# test HPC saving
# simulate snapshot on slurm
saved_filepath = trainer.hpc_save(tmpdir, logger)
assert os.path.exists(saved_filepath)
# new logger file to get meta
logger = tutils.get_default_testtube_logger(tmpdir, False, version=version)
trainer_options = dict(
max_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(tmpdir),
)
trainer = Trainer(**trainer_options)
model = LightningTestModel(hparams)
# set the epoch start hook so we can predict before the model does the full training
def assert_pred_same():
assert trainer.global_step == real_global_step and trainer.global_step > 0
# predict with loaded model to make sure answers are the same
trainer.model.eval()
new_pred = trainer.model(x)
assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1
model.on_epoch_start = assert_pred_same
# by calling fit again, we trigger training, loading weights from the cluster
# and our hook to predict using current model before any more weight updates
trainer.fit(model)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_none_backend(tmpdir):
"""Make sure when using multiple GPUs the user can't use `distributed_backend = None`."""
tutils.reset_seed()
model, hparams = tutils.get_default_model()
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=False,
max_epochs=1,
train_percent_check=0.1,
val_percent_check=0.1,
gpus='-1'
)
with pytest.warns(UserWarning):
tutils.run_model_test(trainer_options, model)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_model_dp(tmpdir):
"""Make sure DP works."""
tutils.reset_seed()
model, hparams = tutils.get_default_model()
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=False,
distributed_backend='dp',
max_epochs=1,
train_percent_check=0.1,
val_percent_check=0.1,
gpus='-1'
)
tutils.run_model_test(trainer_options, model)
# test memory helper functions
memory.get_memory_profile('min_max')
@pytest.fixture
def mocked_device_count(monkeypatch):
def device_count():
return PRETEND_N_OF_GPUS
monkeypatch.setattr(torch.cuda, 'device_count', device_count)
@pytest.fixture
def mocked_device_count_0(monkeypatch):
def device_count():
return 0
monkeypatch.setattr(torch.cuda, 'device_count', device_count)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(["gpus", "expected_num_gpus", "distributed_backend"], [
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."),
pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."),
pytest.param(-1, PRETEND_N_OF_GPUS, "ddp", id="-1 - use all gpus"),
pytest.param('-1', PRETEND_N_OF_GPUS, "ddp", id="'-1' - use all gpus"),
pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)")
])
def test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(["gpus", "expected_num_gpus", "distributed_backend"], [
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."),
])
def test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="O gpus, expect gpu root device to be None."),
pytest.param(1, 0, "ddp", id="1 gpu, expect gpu root device to be 0."),
pytest.param(-1, 0, "ddp", id="-1 - use all gpus, expect gpu root device to be 0."),
pytest.param('-1', 0, "ddp", id="'-1' - use all gpus, expect gpu root device to be 0."),
pytest.param(3, 0, "ddp", id="3 gpus, expect gpu root device to be 0.(backend:ddp)")
])
def test_root_gpu_property(mocked_device_count, gpus, expected_root_gpu, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize([
'gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(None, None, None, id="None is None"),
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="None is None"),
])
def test_root_gpu_property_0_passing(
mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu
# Asking for a gpu when non are available will result in a MisconfigurationException
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize([
'gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(1, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param([1, 2], None, "ddp"),
pytest.param([0, 1], None, "ddp"),
pytest.param(-1, None, "ddp"),
pytest.param('-1', None, "ddp")
])
def test_root_gpu_property_0_raising(
mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):
with pytest.raises(MisconfigurationException):
Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu'], [
pytest.param(None, None, id="No gpus, expect gpu root device to be None"),
pytest.param([0], 0, id="Oth gpu, expect gpu root device to be 0."),
pytest.param([1], 1, id="1st gpu, expect gpu root device to be 1."),
pytest.param([3], 3, id="3rd gpu, expect gpu root device to be 3."),
pytest.param([1, 2], 1, id="[1, 2] gpus, expect gpu root device to be 1."),
])
def test_determine_root_gpu_device(gpus, expected_root_gpu):
assert determine_root_gpu_device(gpus) == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_gpu_ids'], [
pytest.param(None, None),
pytest.param(0, None),
pytest.param(1, [0]),
pytest.param(3, [0, 1, 2]),
pytest.param(-1, list(range(PRETEND_N_OF_GPUS)), id="-1 - use all gpus"),
pytest.param([0], [0]),
pytest.param([1, 3], [1, 3]),
pytest.param('0', [0]),
pytest.param('3', [3]),
pytest.param('1, 3', [1, 3]),
pytest.param('-1', list(range(PRETEND_N_OF_GPUS)), id="'-1' - use all gpus"),
])
def test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):
assert parse_gpu_ids(gpus) == expected_gpu_ids
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus'], [
pytest.param(0.1),
pytest.param(-2),
pytest.param(False),
pytest.param([]),
pytest.param([-1]),
pytest.param([None]),
pytest.param(['0']),
pytest.param((0, 1)),
])
def test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):
with pytest.raises(MisconfigurationException):
parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [''])
def test_parse_gpu_fail_on_empty_string(mocked_device_count, gpus):
# This currently results in a ValueError instead of MisconfigurationException
with pytest.raises(ValueError):
parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [[1, 2, 19], -1, '-1'])
def test_parse_gpu_fail_on_non_existant_id(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):
parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
def test_parse_gpu_fail_on_non_existant_id_2(mocked_device_count):
with pytest.raises(MisconfigurationException):
parse_gpu_ids([1, 2, 19])
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [-1, '-1'])
def test_parse_gpu_returns_None_when_no_devices_are_available(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):
parse_gpu_ids(gpus)
# if __name__ == '__main__':
# pytest.main([__file__])
| [
"torch.optim.lr_scheduler.StepLR",
"torch.eq",
"torch.cuda.device_count"
] | 1.1 | igor-krawczuk/pytorch-lightning | 7de51f78ac2ec09b230e1cb8a786f872de3b861f |
1.5 | import argparse
import os, sys
from abc import ABC, abstractmethod
import torch
import models
import datasets
class BaseOptions(ABC):
"""This class is an abstract base class (ABC) for options.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseOptions.__init__(self, opt).
-- <initialize>: initialize the option's arguments.
-- <parse>: parse the option's arguments.
"""
def __init__(self):
pass
@abstractmethod
def initialize(self, parser):
pass
@abstractmethod
def parse(self):
pass
class BasicOptions(BaseOptions):
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--data_root', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | recyc_gan]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9bs', help='specify generator architecture [resnet_9bs | resnet_6bs | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# vgg parameters for perceptrual loss
parser.add_argument('--vgg', type=float, default=0, help='use perceptrual loss')
parser.add_argument('--vgg_mean', action='store_true', help='substract mean in vgg loss')
parser.add_argument('--vgg_choose', type=str, default='relu5_3', help='choose layer for vgg')
parser.add_argument('--no_vgg_instance', action='store_true', help='vgg instance normalization')
parser.add_argument('--vgg_maxpooling', action='store_true', help='normalize attention map')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | degraded]')
parser.add_argument('--degraded_mode', type=str, default='colorization', help='chooses how datasets are loaded. [colorization | super_resolution | denoising | restoration]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=0 if sys.platform.startswith('win') else 4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.is_train)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = datasets.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.is_train)
opt, _ = parser.parse_known_args() # parse again with new defaults
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
os.makedirs(expr_dir, exist_ok=True)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.is_train = self.is_train # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| [
"torch.cuda.set_device"
] | 1.5.0 | atomicoo/EnhanceIMG | 8c009fbb6c5461ff6d7f30bdacec72232639c7f2 |
1.3 | # -*- coding: utf-8 -*-
import torch
from caption.tokenizers import TextEncoderBase
def mask_fill(
fill_value: float,
tokens: torch.tensor,
embeddings: torch.tensor,
padding_index: int,
) -> torch.tensor:
"""
Function that masks embeddings representing padded elements.
:param fill_value: the value to fill the embeddings belonging to padded tokens.
:param tokens: The input sequences [bsz x seq_len].
:param embeddings: word embeddings [bsz x seq_len x hiddens].
:param padding_index: Index of the padding token.
"""
padding_mask = tokens.eq(padding_index).unsqueeze(-1)
return embeddings.float().masked_fill_(padding_mask, fill_value).type_as(embeddings)
def mask_tokens(
inputs: torch.tensor,
tokenizer: TextEncoderBase,
mlm_probability: float = 0.15,
ignore_index: int = -100,
):
""" Mask tokens function from Hugging Face that prepares masked tokens inputs/labels for
masked language modeling.
:param inputs: Input tensor to be masked.
:param tokenizer: COMET text encoder.
:param mlm_probability: Probability of masking a token (default: 15%).
:param ignore_index: Specifies a target value that is ignored and does not contribute to
the input gradient (default: -100).
Returns:
- Tuple with input to the model and the target.
"""
if tokenizer.mask_index is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language"
"modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
probability_matrix = torch.full(labels.shape, mlm_probability)
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val) for val in labels.tolist()
]
probability_matrix.masked_fill_(
torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0
)
padding_mask = labels.eq(tokenizer.padding_index)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = ignore_index # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with ([MASK])
indices_replaced = (
torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
)
inputs[indices_replaced] = tokenizer.mask_index
# 10% of the time, we replace masked input tokens with random word
indices_random = (
torch.bernoulli(torch.full(labels.shape, 0.5)).bool()
& masked_indices
& ~indices_replaced
)
random_words = torch.randint(tokenizer.vocab_size, labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
| [
"torch.bernoulli",
"torch.randint",
"torch.full",
"torch.tensor"
] | 1.3.1 | Unbabel/caption | 90725dbf5bc3809e0364d20d0837c58968ceb2b1 |
1.1 | #coding=utf-8
"""
Implementation of some commonly used losses.
"""
# python 2.X, 3.X compatibility
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
#import os
#import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class BCECrossEntropyLoss(nn.Module):
"""
sigmoid with binary cross entropy loss.
consider the multiclass task as multi binary classification problem.
one-vs-rest way.
SUM over the channel.
"""
def __init__(self,
num_classes=21,
ignore_index=255):
super(BCECrossEntropyLoss, self).__init__()
self.num_classes = num_classes
self.ignore_index = ignore_index
def forward(self, logits_4D, labels_4D):
"""
Args:
logits_4D : [N, C, H, W], dtype=float32
labels_4D : [N, H, W], dtype=long
"""
label_flat = labels_4D.view(-1).requires_grad_(False)
label_mask_flat = label_flat < self.num_classes
onehot_label_flat = F.one_hot(label_flat * label_mask_flat.long(), num_classes=self.num_classes).float()
onehot_label_flat = onehot_label_flat.requires_grad_(False)
logits_flat = logits_4D.permute(0, 2, 3, 1).contiguous().view([-1, self.num_classes])
# binary loss, multiplied by the not_ignore_mask
label_mask_flat = label_mask_flat.float()
valid_pixels = torch.sum(label_mask_flat)
binary_loss = F.binary_cross_entropy_with_logits(logits_flat,
target=onehot_label_flat,
weight=label_mask_flat.unsqueeze(dim=1),
reduction='sum')
bce_loss = torch.div(binary_loss, valid_pixels + 1.0)
return bce_loss
| [
"torch.div",
"torch.sum"
] | 1.1.0 | mzhaoshuai/RMI | 10a40cdbeb58bdd1bd7125fde73b48b12f9452c7 |
1.1 | import argparse
from pathlib import Path
import numpy as np
import h5py
from scipy.io import loadmat
import torch
from tqdm import tqdm
import logging
import pickle
import cv2
import pycolmap
from .utils.parsers import parse_retrieval, names_to_pair
def interpolate_scan(scan, kp):
h, w, c = scan.shape
kp = kp / np.array([[w-1, h-1]]) * 2 - 1
assert np.all(kp > -1) and np.all(kp < 1)
scan = torch.from_numpy(scan).permute(2, 0, 1)[None]
kp = torch.from_numpy(kp)[None, None]
grid_sample = torch.nn.functional.grid_sample
# To maximize the number of points that have depth:
# do bilinear interpolation first and then nearest for the remaining points
interp_lin = grid_sample(
scan, kp, align_corners=True, mode='bilinear')[0, :, 0]
interp_nn = torch.nn.functional.grid_sample(
scan, kp, align_corners=True, mode='nearest')[0, :, 0]
interp = torch.where(torch.isnan(interp_lin), interp_nn, interp_lin)
valid = ~torch.any(torch.isnan(interp), 0)
kp3d = interp.T.numpy()
valid = valid.numpy()
return kp3d, valid
def get_scan_pose(dataset_dir, rpath):
split_image_rpath = rpath.split('/')
floor_name = split_image_rpath[-3]
scan_id = split_image_rpath[-2]
image_name = split_image_rpath[-1]
building_name = image_name[:3]
path = Path(
dataset_dir, 'database/alignments', floor_name,
f'transformations/{building_name}_trans_{scan_id}.txt')
with open(path) as f:
raw_lines = f.readlines()
P_after_GICP = np.array([
np.fromstring(raw_lines[7], sep=' '),
np.fromstring(raw_lines[8], sep=' '),
np.fromstring(raw_lines[9], sep=' '),
np.fromstring(raw_lines[10], sep=' ')
])
return P_after_GICP
def pose_from_cluster(dataset_dir, q, retrieved, feature_file, match_file,
skip=None):
height, width = cv2.imread(str(dataset_dir / q)).shape[:2]
cx = .5 * width
cy = .5 * height
focal_length = 4032. * 28. / 36.
all_mkpq = []
all_mkpr = []
all_mkp3d = []
all_indices = []
kpq = feature_file[q]['keypoints'].__array__()
num_matches = 0
for i, r in enumerate(retrieved):
kpr = feature_file[r]['keypoints'].__array__()
pair = names_to_pair(q, r)
m = match_file[pair]['matches0'].__array__()
v = (m > -1)
if skip and (np.count_nonzero(v) < skip):
continue
mkpq, mkpr = kpq[v], kpr[m[v]]
num_matches += len(mkpq)
scan_r = loadmat(Path(dataset_dir, r + '.mat'))["XYZcut"]
mkp3d, valid = interpolate_scan(scan_r, mkpr)
Tr = get_scan_pose(dataset_dir, r)
mkp3d = (Tr[:3, :3] @ mkp3d.T + Tr[:3, -1:]).T
all_mkpq.append(mkpq[valid])
all_mkpr.append(mkpr[valid])
all_mkp3d.append(mkp3d[valid])
all_indices.append(np.full(np.count_nonzero(valid), i))
all_mkpq = np.concatenate(all_mkpq, 0)
all_mkpr = np.concatenate(all_mkpr, 0)
all_mkp3d = np.concatenate(all_mkp3d, 0)
all_indices = np.concatenate(all_indices, 0)
cfg = {
'model': 'SIMPLE_PINHOLE',
'width': width,
'height': height,
'params': [focal_length, cx, cy]
}
ret = pycolmap.absolute_pose_estimation(
all_mkpq, all_mkp3d, cfg, 48.00)
ret['cfg'] = cfg
return ret, all_mkpq, all_mkpr, all_mkp3d, all_indices, num_matches
def main(dataset_dir, retrieval, features, matches, results,
skip_matches=None):
assert retrieval.exists(), retrieval
assert features.exists(), features
assert matches.exists(), matches
retrieval_dict = parse_retrieval(retrieval)
queries = list(retrieval_dict.keys())
feature_file = h5py.File(features, 'r')
match_file = h5py.File(matches, 'r')
poses = {}
logs = {
'features': features,
'matches': matches,
'retrieval': retrieval,
'loc': {},
}
logging.info('Starting localization...')
for q in tqdm(queries):
db = retrieval_dict[q]
ret, mkpq, mkpr, mkp3d, indices, num_matches = pose_from_cluster(
dataset_dir, q, db, feature_file, match_file, skip_matches)
poses[q] = (ret['qvec'], ret['tvec'])
logs['loc'][q] = {
'db': db,
'PnP_ret': ret,
'keypoints_query': mkpq,
'keypoints_db': mkpr,
'3d_points': mkp3d,
'indices_db': indices,
'num_matches': num_matches,
}
logging.info(f'Writing poses to {results}...')
with open(results, 'w') as f:
for q in queries:
qvec, tvec = poses[q]
qvec = ' '.join(map(str, qvec))
tvec = ' '.join(map(str, tvec))
name = q.split("/")[-1]
f.write(f'{name} {qvec} {tvec}\n')
logs_path = f'{results}_logs.pkl'
logging.info(f'Writing logs to {logs_path}...')
with open(logs_path, 'wb') as f:
pickle.dump(logs, f)
logging.info('Done!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', type=Path, required=True)
parser.add_argument('--retrieval', type=Path, required=True)
parser.add_argument('--features', type=Path, required=True)
parser.add_argument('--matches', type=Path, required=True)
parser.add_argument('--results', type=Path, required=True)
parser.add_argument('--skip_matches', type=int)
args = parser.parse_args()
main(**args.__dict__)
| [
"torch.isnan",
"torch.from_numpy",
"torch.nn.functional.grid_sample"
] | 1.1 | patelajaychh/Hierarchical-Localization | d3f155d0587376a6fd0395ea36125016160fa448 |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import PreTrainedModel, prune_linear_layer
from .configuration_bert import BertConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
}
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
def get_weighted_loss(loss_fct, inputs, labels, weights):
loss = 0.0
for i in range(weights.shape[0]):
loss += (weights[i] + 1.0) * loss_fct(inputs[i:i + 1], labels[i:i + 1])
return loss / (sum(weights) + weights.shape[0])
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except (ImportError, AttributeError) as e:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
BertLayerNorm = torch.nn.LayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_tensor, attention_mask, head_mask=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, head_mask=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size,
config.vocab_size,
bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
BERT_START_DOCSTRING = r""" The BERT model was proposed in
`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer
pre-trained using a combination of masked language modeling objective and next sentence prediction
on a large corpus comprising the Toronto Book Corpus and Wikipedia.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
https://arxiv.org/abs/1810.04805
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Bert is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertModel(BertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output,
extended_attention_mask,
head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with two heads on top as done during the pre-training:
a `masked language modeling` head and a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForPreTraining(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
masked_lm_labels=None, next_sentence_label=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
masked_lm_labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForNextSentencePrediction(BertPreTrainedModel):
r"""
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Next sequence prediction (classification) loss.
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
seq_relationship_scores = outputs[0]
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
next_sentence_label=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, labels=None, weights=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
if weights is None:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
else:
loss = get_weighted_loss(loss_fct,
logits.view(-1, self.num_labels),
labels.view(-1), weights)
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForMultipleChoice(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForMultipleChoice, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, labels=None):
num_choices = input_ids.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1))
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForTokenClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForTokenClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
def __init__(self, config):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForQuestionAnswering(BertPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForQuestionAnswering.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss, start_scores, end_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
start_positions=None, end_positions=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.Dropout",
"torch.zeros",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.Softmax",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.from_numpy",
"torch.ones",
"torch.ones_like",
"torch.zeros_like",
"torch.matmul",
"torch.nn.Embedding"
] | 1.0.0 | irfanumar1994/pytorch-transformers | f257b96a879e38922eaa377be383be69372e78f1 |
0.4 | import os
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import datasets, transforms
from src.models.base import BaseModel
class MNIST(BaseModel):
def _setup(self):
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return x
def get_criterion(self):
return nn.CrossEntropyLoss()
def get_data_gen(self, batch_size, train=True):
dpath_data = os.path.join(
os.path.dirname(__file__),
'..',
'..',
'data'
)
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(dpath_data, train=train, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
return iter(train_loader)
| [
"torch.nn.Linear",
"torch.nn.functional.dropout",
"torch.nn.Conv2d",
"torch.nn.Dropout2d",
"torch.nn.CrossEntropyLoss"
] | 0.4.0 | jalexvig/imitating_optimizer | c0a62869ae678a62df9d13d1007efa0e531c6c3c |
0.4 | #!/usr/bin/env python
import os
from os.path import expanduser
home = expanduser("~")
import sys
import h5py
import argparse
from datetime import datetime
from time import time
import numpy as np
from numpy.random import permutation, seed
from scipy.cluster.vq import kmeans
import glob
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.init import xavier_uniform
from torch.nn.utils import clip_grad_norm
from tensorboardX import SummaryWriter
from sklearn.externals import joblib
from copy import deepcopy
from DataHandler import DataHandler
sys.path.append(os.path.join(home, "code/research_code/Spring_2018/TextModules/"))
from Evaluator import Evaluator
from Logger import get_logger
from model_library import AspectCLF, StudentBoWCLF, SeedCLF, smooth_cross_entropy
all_semeval_domains = ['english_restaurants', 'spanish_restaurants', 'french_restaurants', 'russian_restaurants',
'dutch_restaurants', 'turkish_restaurants']
all_domains = ['bags_and_cases', 'keyboards', 'boots', 'bluetooth', 'tv', 'vacuums']
class Trainer:
def __init__(self, args):
self.args = args
self.comment = '_{}'.format(args.domain)
if self.args.loss in ['SmoothCrossEntropy', "KL"]:
self.args.one_hot = True
else:
self.args.one_hot = False
self.datahandler = DataHandler(self.args)
self.writer = SummaryWriter(log_dir=self.args.logdir)
loggerfile = os.path.join(self.args.logdir, 'log.log')
self.logger = get_logger(logfile=loggerfile)
self.check_gpu()
joblib.dump(self.args, os.path.join(self.args.logdir, 'args.pkl'))
self.evaluator = Evaluator(args)
if args.no_seed_weights:
self.logger.info('NOT using seed weights...')
seed_weights = None
else:
self.logger.info('USING seed weights...')
seed_weights = self.datahandler.seed_w
if args.no_pretrained_emb:
self.logger.info('NOT using pretrained word embeddings...')
pretrained_emb = None
else:
pretrained_emb = self.datahandler.w_emb
if self.datahandler.num_aspects != self.args.num_aspects:
self.logger.info("Automatically changing num_aspects from {} to {}".format(self.args.num_aspects, self.datahandler.num_aspects))
self.args.num_aspects = self.datahandler.num_aspects
if args.model_type == 'embedding_based':
self.logger.info('Model: Embeddings based Classifier')
# prev model is loaded just to gather previous predictions and regularize the new model to
# provide similar predictions.
if args.memory_reg > 0:
self.prev_model = AspectCLF(vocab_size=self.datahandler.vocab_size, pretrained_emb=pretrained_emb, emb_size=self.datahandler.emb_size,
seed_encodings=None, seed_weights=seed_weights, num_aspects=self.args.num_aspects,
num_seeds=args.num_seeds, fix_a_emb=False, fix_w_emb=args.fix_w_emb, attention=args.attention,
deep_clf=args.deep_aspect_clf, enable_gpu=args.enable_gpu, cuda_device=args.cuda_device,
emb_dropout=args.emb_dropout, batch_norm= args.batch_norm, use_bert=args.use_bert,
bert_model=args.bert_model)
self.model = AspectCLF(vocab_size=self.datahandler.vocab_size, pretrained_emb=pretrained_emb, emb_size=self.datahandler.emb_size,
seed_encodings=None, seed_weights=seed_weights, num_aspects=self.args.num_aspects,
num_seeds=args.num_seeds, fix_a_emb=False, fix_w_emb=args.fix_w_emb, attention=args.attention,
deep_clf=args.deep_aspect_clf, enable_gpu=args.enable_gpu, cuda_device=args.cuda_device,
emb_dropout=args.emb_dropout, batch_norm= args.batch_norm, use_bert=args.use_bert,
bert_model=args.bert_model)
elif args.model_type == 'bow_based':
self.logger.info('Model: BoW Classifier')
self.model = StudentBoWCLF(self.datahandler.id2word, self.datahandler.aspects_ids)
else:
raise(BaseException('unknown model type: {}'.format(args.model_type)))
self.model = self.cuda(self.model)
self.teacher = SeedCLF(self.datahandler.id2word, self.datahandler.aspects_ids, seed_weights,
verbose=0, general_ind=self.datahandler.general_ind,
hard_pred=args.hard_teacher_pred)
self.optimizer = self.get_optimizer(args)
if args.scheduler_gamma > 0:
ms=args.bootstrap_epoch
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[ms, ms+1, ms+2, ms+3], gamma=args.scheduler_gamma)
self.loss_fn = self.get_loss_fn(args)
self.logger.info('Saving log at {}'.format(loggerfile))
self.logger.debug('enable_gpu={}'.format(args.enable_gpu))
self.epoch = -1
self.results = []
self.metric = self.args.target_metric
self.best_score = -1.0
self.best_test_score = -1.0
self.epoch_results = {}
if args.memory_reg > 0:
self.memory_loss = self.get_memory_loss_fn(args)
self.prev_model = self.cuda(self.prev_model)
self.student_proba_train = None
self.student_proba_dev = None
self.student_proba_test = None
self.labels_dev = None
self.labels_test = None
self.teacher_proba_train = None
self.teacher_pred_dev = None
self.teacher_pred_test = None
self.disagreement = -1
def check_gpu(self):
if self.args.enable_gpu:
torch.cuda.manual_seed(self.args.seed)
if self.args.enable_gpu and not torch.cuda.is_available():
raise(BaseException('CUDA is not supported in this machine. Please rerun by setting enable_gpu=False'))
if torch.cuda.device_count() > 1:
self.logger.info("Tip: You could use {} GPUs in this machine!".format(torch.cuda.device_count()))
def get_optimizer(self, args):
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.optimizer == 'Adadelta':
optimizer = torch.optim.Adadelta(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.optimizer == 'SGD':
optimizer = torch.optim.SGD(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)
else:
raise(NotImplementedError('unknown optimizer: {}'.format(args.optimizer)))
return optimizer
def get_loss_fn(self, args):
if args.loss == 'CrossEntropy':
loss_fn = nn.CrossEntropyLoss()
elif args.loss == 'NLL':
loss_fn = nn.NLLLoss()
elif args.loss == 'SmoothCrossEntropy':
loss_fn = smooth_cross_entropy
elif args.loss == 'KL':
loss_fn = nn.KLDivLoss()
else:
raise(NotImplementedError('unknown loss function: {}'.format(args.loss)))
return loss_fn
def get_memory_loss_fn(self, args):
if args.memory_loss == 'CrossEntropy':
loss_fn = nn.CrossEntropyLoss()
elif args.memory_loss == 'NLL':
loss_fn = nn.NLLLoss()
elif args.memory_loss == 'SmoothCrossEntropy':
loss_fn = smooth_cross_entropy
elif args.memory_loss == 'KL':
loss_fn = nn.KLDivLoss()
else:
raise(NotImplementedError('unknown loss function: {}'.format(args.loss)))
return loss_fn
def cuda(self, x):
if self.args.enable_gpu:
return x.cuda(self.args.cuda_device)
else:
return x
def train(self):
self.model.train()
if self.args.memory_reg > 0:
self.prev_model.eval()
all_losses = []
all_memory_losses = []
all_preds = self.cuda(torch.Tensor())
all_labels = []
self.teacher_scores_train = []
self.teacher_seed_word_pred_train = []
if args.scheduler_gamma > 0:
self.scheduler.step()
self.logger.info("Optimizing with lr={}".format(self.optimizer.state_dict()['param_groups'][0]['lr']))
if self.teacher.conf_mat is None:
self.logger.info("TEACHER does NOT use confusion matrices")
else:
self.logger.info("TEACHER uses confusion matrices")
for batch in self.datahandler.get_train_batches():
self.optimizer.zero_grad()
i = batch['ind']
if (args.deep_aspect_clf in ['CNN', 'charCNN']) and i > batch['total'] - 20: #or
# ignore really big segments when clf is CNN to avoid OOM error.
break
pred = self.model(batch)
# I use different ids for the teacher, because if SWD=1, then the seed words are dropped from batch['ids'].
teacher_scores, teacher_seed_word_pred = map(list, zip(*[self.teacher.predict_verbose(seg) for seg in batch['teacher_ids'].tolist()]))
if self.args.loss not in ["SmoothCrossEntropy", "KL"]:
label = np.argmax(teacher_scores, axis=1)
all_labels.extend(list(label))
label = self.cuda(Variable(torch.LongTensor(label)))
else:
# Convert the ground-truth aspect scores into probabilities summing to 1.
label = teacher_scores
all_labels.extend([np.argmax(l) for l in label])
label = self.cuda(Variable(torch.Tensor(label)))
label = F.softmax(label, dim=1)
loss = self.loss_fn(pred, label)
all_losses.append(loss.data.cpu().numpy())
if args.memory_reg == 0.0:
loss.backward()
else:
# Regularize the model to avoid forgetting the previous weights / predictions.
prev_pred = F.softmax(self.prev_model(batch), dim=1)
memory_loss = self.memory_loss(pred, prev_pred)
all_memory_losses.append(memory_loss.data.cpu().numpy())
total_loss = (1 - args.memory_reg) * loss + args.memory_reg * memory_loss
loss += memory_loss
total_loss.backward()
self.optimizer.step()
all_preds = torch.cat((all_preds, pred.data), dim=0)
self.teacher_scores_train.extend(teacher_scores)
self.teacher_seed_word_pred_train.extend(teacher_seed_word_pred)
if (self.args.report_every != -1) and (i % self.args.report_every == 0) and (i > 0):
avg_loss = np.mean(all_losses[-self.args.report_every:])
avg_memory_loss = np.mean(all_memory_losses[-self.args.report_every:])
if args.memory_reg == 0:
self.logger.debug('[{}][{}:{}/{}]\tLoss: {:f}'.format(self.args.domain, self.epoch, i, batch['total'], avg_loss))
else:
self.logger.debug('[{}][{}:{}/{}]\tLoss: {:.5f}\tMemory Loss: {:.5f}'.format(self.args.domain, self.epoch, i, batch['total'], avg_loss, avg_memory_loss))
all_proba = all_preds.cpu().numpy()
self.student_proba_train = all_proba
max_prob, all_preds = all_preds.max(dim=1)
all_preds = all_preds.cpu().numpy()
avg_loss = np.mean(all_losses)
res = self.evaluator.evaluate_group(all_preds, all_labels, all_proba, gt_classes=range(self.args.num_aspects),verbose=False)
res['loss'] = avg_loss
self.epoch_results['train'] = res
self.writer.add_histogram('train_loss{}'.format(self.comment), np.array(all_losses), self.epoch, bins=100)
# save disagreement
s_pred_hard = np.argmax(self.student_proba_train, axis=1)
t_pred_hard = np.argmax(self.teacher_scores_train, axis=1)
self.disagreement = ((s_pred_hard != t_pred_hard).sum()) / float(s_pred_hard.shape[0])
self.epoch_results['hard_disagreement'] = self.disagreement
def update_teacher(self):
# Use Maximum Likelihood Estimation to update the seed word confusion matrices.
assert self.student_proba_train is not None, "Student proba is None."
assert self.teacher_scores_train is not None, "Teacher scores is None."
s_pred_hard = np.argmax(self.student_proba_train, axis=1)
s_pred_soft = F.softmax(torch.Tensor(self.student_proba_train), dim=1).numpy()
t_pred_hard = np.argmax(self.teacher_scores_train, axis=1)
seed_word_occurences = np.array(self.teacher_seed_word_pred_train)
teacher_answers = seed_word_occurences.sum(axis=1) > 0
self.disagreement = ((s_pred_hard[teacher_answers] != t_pred_hard[teacher_answers]).sum()) / float(teacher_answers.sum())
self.epoch_results['train_disagreement'] = self.disagreement
K = self.args.num_aspects
N = s_pred_hard.shape[0]
# Initialize a zero confusion matrix for each seed word.
conf_mat = {wid: np.zeros(K) for wid in self.teacher.seed_list}
# Maximum Likelihood Estimation for the class priors
self.q = np.array([np.sum(s_pred_hard == i) for i in range(K)]) / float(N)
self.logger.info('Estimated class priors: {}'.format(",".join(["{:.2f}".format(x) for x in self.q])))
# Maximum Likelihood Estimation for each confusion matrix
for wid_i, wid in enumerate(self.teacher.seed_list):
# keep the segments where this seed word has been activated
relevant_ind = (seed_word_occurences[:, wid_i] > 0)
pred_aspect = self.teacher.seed_dict[wid][0]
if args.teacher_type == 'v1':
# Precision-based updates
if args.soft_updates == False:
conf_mat[wid] = np.array([np.sum(s_pred_hard[relevant_ind]==i) / float(np.sum(relevant_ind)) for i in range(K)])
else:
conf_mat[wid] = np.array([s_pred_soft[relevant_ind][:, i].sum() for i in range(K)])
conf_mat[wid] = conf_mat[wid] / float(conf_mat[wid].sum())
elif args.teacher_type == 'v2':
# Dawid-Skene model where each seed word is applied when it occurs in the segment
# We allow positive mass to other aspects.
conf_mat[wid][:] = self.args.pos_mass / float(K - 1)
conf_mat[wid][pred_aspect] = 1 - self.args.pos_mass
student_sum = s_pred_soft[relevant_ind].sum(axis=0) # adding student probabilities for all classes for all relevant samples
conf_mat[wid] *= student_sum
conf_mat[wid] /= conf_mat[wid].sum()
else:
raise(BaseException('{} not implemented'.format(args.teacher_type)))
# GRADIENT EM
prev_param = np.zeros(K)
prev_param[pred_aspect] = 1
conf_mat[wid] = self.args.teacher_memory * prev_param + (1 - self.args.teacher_memory) * conf_mat[wid] # (self.conf_mat[wid] + prev_param) / 2.0
self.logger.info("Teacher answers on the {}% ({}/{}) of the training set".format(100 * teacher_answers.sum() / teacher_answers.shape[0], teacher_answers.sum(), teacher_answers.shape[0]))
self.logger.info("Student-Teacher disagreement: {}/{} ({:.2f}%)".format((s_pred_hard[teacher_answers] != t_pred_hard[teacher_answers]).sum(), teacher_answers.sum(),100*self.disagreement))
self.logger.info("Avg of seed word occurences in training set: {:.2f}".format(np.average(seed_word_occurences.sum(axis=0))))
self.conf_mat = conf_mat
joblib.dump(self.conf_mat, self.args.logdir + 'conf_mat_{}.pkl'.format(self.epoch))
joblib.dump(self.q, self.args.logdir + 'prior_{}.pkl'.format(self.epoch))
return
def validate(self):
self.model.eval()
all_losses = []
all_preds = self.cuda(torch.Tensor())
all_labels = []
for batch in self.datahandler.get_eval_batches():
i = batch['ind']
pred = self.model(batch)
label = batch['label']
# import pdb; pdb.set_trace()
if self.args.loss not in ["SmoothCrossEntropy", "KL"]:
all_labels.extend(list(label))
label = self.cuda(Variable(torch.LongTensor(label)))
else:
# Convert the ground-truth label into a one-hot label and treat is as a prob distribution
all_labels.extend(list(label))
one_hot = np.zeros((len(label), self.args.num_aspects))
one_hot[np.arange(len(label)), label] = 1
label = self.cuda(Variable(torch.Tensor(one_hot)))
loss = self.loss_fn(pred, label)
all_losses.append(loss.data.cpu().numpy())
all_preds = torch.cat((all_preds, pred.data), dim=0)
all_proba = all_preds.cpu().numpy()
max_prob, all_preds = all_preds.max(dim=1)
all_preds = all_preds.cpu().numpy()
avg_loss = np.mean(all_losses)
res = self.evaluator.evaluate_group(all_preds, all_labels, all_proba, gt_classes=range(self.args.num_aspects), verbose=False)
res['loss'] = avg_loss
if res[self.metric] >= self.best_score:
# Save the best validation model
self.best_score = res[self.metric]
torch.save(self.model.state_dict(), os.path.join(self.args.logdir, 'best_valid_model.pt'))
self.epoch_results['valid'] = res
self.writer.add_histogram('valid_loss{}'.format(self.comment), np.array(all_losses), self.epoch, bins=100)
self.flattened_valid_result_dict = self.evaluator.flattened_result_dict
def validate_test(self):
# Giannis: also validate on the test set
self.model.eval()
all_losses = []
all_preds = self.cuda(torch.Tensor())
all_labels = []
for batch in self.datahandler.get_test_batches():
i = batch['ind']
pred = self.model(batch)
label = batch['label']
# import pdb; pdb.set_trace()
if self.args.loss not in ["SmoothCrossEntropy", "KL"]:
all_labels.extend(list(label))
label = self.cuda(Variable(torch.LongTensor(label)))
else:
# Convert the ground-truth label into a one-hot label and treat is as a prob distribution
all_labels.extend(list(label))
one_hot = np.zeros((len(label), self.args.num_aspects))
one_hot[np.arange(len(label)), label] = 1
label = self.cuda(Variable(torch.Tensor(one_hot)))
loss = self.loss_fn(pred, label)
all_losses.append(loss.data.cpu().numpy())
all_preds = torch.cat((all_preds, pred.data), dim=0)
all_proba = all_preds.cpu().numpy()
max_prob, all_preds = all_preds.max(dim=1)
all_preds = all_preds.cpu().numpy()
avg_loss = np.mean(all_losses)
res = self.evaluator.evaluate_group(all_preds, all_labels, all_proba, gt_classes=range(self.args.num_aspects), verbose=False)
res['loss'] = avg_loss
if res[self.metric] >= self.best_test_score:
# Save the best test model
self.best_test_score = res[self.metric]
torch.save(self.model.state_dict(), os.path.join(self.args.logdir, 'best_test_model.pt'))
self.epoch_results['test'] = res
self.writer.add_histogram('test_loss{}'.format(self.comment), np.array(all_losses), self.epoch, bins=100)
self.flattened_test_result_dict = self.evaluator.flattened_result_dict
def test(self, savename='results.pkl'):
self.model.eval()
all_preds = self.cuda(torch.Tensor())
all_labels = []
teacher_scores_test = []
for batch in self.datahandler.get_test_batches():
i = batch['ind']
pred = self.model(batch)
teacher_scores, teacher_seed_word_pred = map(list, zip(*[self.teacher.predict_verbose(seg) for seg in batch['ids'].tolist()]))
label = batch['label']
all_preds = torch.cat((all_preds, pred.data), dim=0)
teacher_scores_test.extend(teacher_scores)
all_labels.extend(list(label))
all_proba = all_preds.cpu().numpy()
max_prob, all_preds = all_preds.max(dim=1)
all_preds = all_preds.cpu().numpy()
res = self.evaluator.evaluate_group(all_preds, all_labels, all_proba, gt_classes=range(self.args.num_aspects),verbose=False)
self.epoch_results['test'] = res
teacher_scores_test = np.array(teacher_scores_test)
teacher_preds = np.argmax(teacher_scores_test, axis=1)
teacher_res = self.evaluator.evaluate_group(teacher_preds, all_labels, teacher_scores_test, gt_classes=range(self.args.num_aspects), verbose=False)
self.epoch_results['teacher_test'] = teacher_res
self.logger.info('Test {}:\t STUDENT={:.3}\t TEACHER={:.3}'.format(self.metric, res[self.metric], teacher_res[self.metric]))
self.logger.info('Train disagreement: {}%'.format(100*self.disagreement))
self.logger.info('STUDENT confusion Matrix:\n{}'.format(res['conf_mat']))
self.logger.info('TEACHER confusion Matrix:\n{}'.format(teacher_res['conf_mat']))
joblib.dump(res, os.path.join(self.args.logdir, savename))
def start_epoch(self):
# Do necessary staff at the beginning of each epoch
self.epoch_results = {}
return
def end_epoch(self):
# Do necessary staff at the end of each epoch
self.writer.add_scalars('loss{}'.format(self.comment), {
'train_loss': self.epoch_results['train']['loss'],
'valid_loss': self.epoch_results['valid']['loss']}, self.epoch)
score = self.epoch_results['valid'][self.metric]
test_score = self.epoch_results['test'][self.metric]
self.logger.info('{}: {:.3}'.format(self.metric, score))
self.logger.info('{} (test): {:.3}'.format(self.metric, test_score))
self.writer.add_scalars(self.metric, {self.args.domain: score}, self.epoch)
self.writer.add_scalars('test_' + self.metric, {self.args.domain: score}, self.epoch)
res_flattened = self.flattened_test_result_dict
res_flattened['avg_prec'] = np.average(self.epoch_results['valid']['prec'])
res_flattened['avg_rec'] = np.average(self.epoch_results['valid']['rec'])
important_list = ['acc', 'avg_prec', 'avg_rec', 'macro_average_f1', 'micro_average_f1']
self.writer.add_scalars('average_test_results{}'.format(self.comment), {x: res_flattened[x] for x in important_list}, self.epoch)
self.writer.add_scalars('test_results{}'.format(self.comment), {x:res_flattened[x] for x in res_flattened if not 'conf' in x}, self.epoch)
self.writer.add_scalars('test_conf_matrix{}'.format(self.comment), {x: res_flattened[x] for x in res_flattened if 'conf' in x}, self.epoch)
self.results.append(self.epoch_results)
joblib.dump(self.results, os.path.join(self.args.logdir, 'epoch_results.pkl')) # saving intermediate results
return
def close(self):
self.writer.close()
torch.save(self.model.state_dict(), os.path.join(self.args.logdir, 'last_model.pt'))
joblib.dump(self.results, os.path.join(self.args.logdir, 'results.pkl'))
self.logger.info("Process ended in {:.3f} s".format(self.total_time))
self.logger.info("Results stored at {}".format(self.args.logdir))
def process(self):
self.total_time = 0
self.test()
for epoch in range(self.args.num_epochs):
if epoch == 0:
# Do not regularize the model in the first epochs until we start bootstrapping.
mem_reg = self.args.memory_reg
self.args.memory_reg = 0
# Use CrossEntropyLoss with hard targets for the first epochs.
target_loss_fn = self.args.loss
elif epoch == self.args.bootstrap_epoch + 1:
# When we're done with the burnout epochs, we restore the right cotraining parameters.
if mem_reg > 0:
self.logger.info("Adding prev_model regularization with mem_reg={}".format(mem_reg))
self.args.memory_reg = mem_reg
self.prev_model.load_state_dict(deepcopy(self.model.state_dict()))
self.logger.info("Switching to loss={}".format(target_loss_fn))
self.args.loss = target_loss_fn
self.loss_fn = self.get_loss_fn(self.args)
t0 = time()
self.epoch = epoch
self.start_epoch()
self.train()
if epoch >= self.args.bootstrap_epoch:
self.update_teacher()
if not args.fix_teacher:
self.teacher.conf_mat = self.conf_mat
self.teacher.prior = self.q
self.validate()
self.validate_test()
epoch_time = time() - t0
self.total_time += epoch_time
self.logger.info("Epoch {} Done in {} s.".format(self.epoch, epoch_time))
self.epoch_results['time'] = epoch_time
self.test()
self.end_epoch()
self.test()
self.close()
def run_cotrain(args, domain):
print("Running {}".format(domain))
args.domain = domain
# Define output paths
args.logdir += '/' + domain + '/'
if not os.path.exists(args.logdir):
os.mkdir(args.logdir)
args.pretrained_model += '/' + domain + '/'
args.student_folder = args.logdir + \
'student' + \
'_{}'.format(args.loss) + \
'_lr{}'.format(args.lr) + \
'_memloss{}'.format(args.memory_loss) + \
'_memreg{}'.format(args.memory_reg)
args.teacher_folder = args.logdir + \
'teacher' + \
"_{}".format(args.teacher_type) + \
"_memory{}".format(args.teacher_memory)
if not os.path.exists(args.student_folder):
os.mkdir(args.student_folder)
if not os.path.exists(args.teacher_folder):
os.mkdir(args.teacher_folder)
trainer = Trainer(args)
trainer.process()
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('domain', help="Domain name (without extension)", type=str, default='pairs')
# Trainer Specific
parser.add_argument('--logdir', help="log directory for tensorboard", type=str, default='../experiments/')
parser.add_argument('--debug', help="Enable debug mode", action='store_true')
parser.add_argument('--num_epochs', help="Number of epochs (default: 25)", type=int, default=5)
parser.add_argument('--loss', help="Loss Function (CrossEntropy / NLL)", type=str, default='CrossEntropy')
parser.add_argument('--optimizer', help="Optimizer (Adam / Adadelta)", type=str, default='Adam')
parser.add_argument('--lr', help="Learning rate (default: 0.0001)", type=float, default=0.00005)
parser.add_argument('--weight_decay', help="Weight Decay", type=float, default=0.0)
parser.add_argument('--momentum', help="Momentum (used for optimizer=SGD)", type=float, default=0.9)
parser.add_argument('--report_every', help="Report every x number of batches", type=int, default=50)
parser.add_argument('--cuda_device', help="CUDA Device ID", type=int, default=0)
parser.add_argument('--batch_size', help="Batch Size", type=int, default=1024)
parser.add_argument('--target_metric', help="Target Metric to report", type=str, default='micro_average_f1')
parser.add_argument('--version', help="Run # (0..4)", type=int, default=0)
parser.add_argument('--memory_loss', help="Loss Function for the memory regularization term", type=str, default='SmoothCrossEntropy')
parser.add_argument('--memory_reg', help="Memory regularization (not forget the previous model)", type=float, default=0.0)
parser.add_argument('--teacher_memory', help="Teacher memory (not forget the initial teacher model)", type=float, default=0.0)
parser.add_argument('--scheduler_gamma', help="Scheduler's multiplier of lr in each epoch", type=float, default=0.1)
parser.add_argument('--bootstrap_epoch', help="Epoch at which we start the teacher updates", type=int, default=0)
parser.add_argument('--disable_gpu', help="Disable GPU", action='store_true')
# Domain Specific
parser.add_argument('--test_data', help="hdf5 file of test segments", type=str, default='')
parser.add_argument('--min_len', help="Minimum number of non-stop-words in segment (default: 2)", type=int, default=2)
parser.add_argument('--num_aspects', help="Number of aspects (default: 9)", type=int, default=9)
parser.add_argument('--aspect_seeds', help='file that contains aspect seed words (overrides number of aspects)', type=str, default='')
parser.add_argument('-q', '--quiet', help="No information to stdout", action='store_true')
parser.add_argument('--num_seeds', help="Number of seed words to use (default: 30)", type=int, default=30)
parser.add_argument('--no_seed_weights', help="Forcing the *unweighted* avg of seed word embeddings", action='store_true')
parser.add_argument('--batch_norm', help="Batch normalization on segment encodings", action='store_true')
parser.add_argument('--emb_dropout', help="Dropout at the segment embedding layer", type=float, default=0.0)
parser.add_argument('--swd', help="Seed Word Dropout (default=0.0 i.e., never drop the seed word)", type=float, default=0.0)
parser.add_argument('--no_pretrained_emb', help="Do NOT use pre-trained word embeddings", action='store_true')
parser.add_argument('--use_bert', help="Use BERT (base uncased) for segment embedding", action='store_true')
parser.add_argument('--bert_model', help="Type of BERT model: base/large", type=str, default='base')
parser.add_argument('--simple_aspects', help="Use fine/coarse grained aspects (-1: original A#B label, 0: first part, 1: second part of A#B label", type=int, default=-1)
# Model Specific
parser.add_argument('--pretrained_model', help="Pre-trained model", type=str, default='')
parser.add_argument('--attention', help="Use word attention", action='store_true')
parser.add_argument('--fix_w_emb', help="Fix word embeddings", action='store_true')
parser.add_argument('--fix_a_emb', help="Fix aspect embeddings", action='store_true')
parser.add_argument('--model_type', help="Model type (embedding_based vs bow_based)", type=str, default='embedding_based')
parser.add_argument('--deep_aspect_clf', help="Use a deep CLF on top of word embeddings", type=str, default='NO')
parser.add_argument('--teacher_type', help="Teacher Type (v1..3)", type=str, default='v1')
parser.add_argument('--pos_mass', help="Probability mass to cut from the given aspect and distribute to the remaining aspects", type=float, default=0.2)
parser.add_argument('--soft_updates', help="Soft (instead of hard) teacher (precision-based) updates (only for v1)", action='store_true')
parser.add_argument('--hard_teacher_pred', help="Hard aspect predictions per seed word (only the most probable aspect)", action='store_true')
parser.add_argument('--fix_teacher', help="Fix teacher throughout training (instead of updating)", action='store_true')
args = parser.parse_args()
args.enable_gpu = not args.disable_gpu
seeds = [20, 7, 1993, 42, 127]
args.seed = seeds[args.version]
torch.cuda.manual_seed(args.seed)
seed(args.seed)
args.num_epochs += args.bootstrap_epoch
if args.logdir == '../experiments/':
args.logdir += datetime.now().strftime('%b%d_%H-%M-%S') + '_'
if args.debug:
args.logdir = './debug'
if os.path.exists(args.logdir):
os.system('rm -rf {}'.format(args.logdir))
else:
args.logdir = args.logdir + \
"COTRAINING" + \
"_att{}".format(args.attention) + \
"_fixw{}".format(args.fix_w_emb) + \
"_fixa{}".format(args.fix_a_emb) + \
"_{}".format(args.loss) + \
"_lr{}".format(args.lr) + \
"_dropout{}".format(args.emb_dropout) + \
'_memloss{}'.format(args.memory_loss) + \
'_memreg{}'.format(args.memory_reg) + \
"_teacher{}".format(args.teacher_type) + \
"_tmem{}".format(args.teacher_memory) + \
'_schedgamma{}'.format(args.scheduler_gamma) + \
"_bepoch{}".format(args.bootstrap_epoch)
if not os.path.exists(args.logdir):
os.mkdir(args.logdir)
original_logdir = args.logdir
args.logdir += '/v{}'.format(args.version)
if not os.path.exists(args.logdir):
os.mkdir(args.logdir)
args.pretrained_model += '/v{}'.format(args.version)
print('\t\tEXPERIMENT with domain={}\nargs: {}\nlogdir: {}'.format(args.domain, args, args.logdir))
run_cotrain(args, args.domain)
| [
"torch.nn.NLLLoss",
"torch.cuda.manual_seed",
"torch.cat",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.nn.KLDivLoss",
"torch.nn.functional.softmax",
"torch.Tensor",
"torch.nn.CrossEntropyLoss"
] | 0.4.1 | gkaramanolakis/ISWD | 41452f447284491cf8ade8e09f3bc4e314ec64f7 |
1.6 | import numpy as np
import os
import shutil
import sys
from torch.utils.tensorboard import SummaryWriter
import torch
def model_input(data, device):
datum = data.data[0:1]
if isinstance(datum, np.ndarray):
return torch.from_numpy(datum).float().to(device)
else:
return datum.float().to(device)
def get_script():
py_script = os.path.basename(sys.argv[0])
return os.path.splitext(py_script)[0]
def get_specified_params(hparams):
keys = [k.split("=")[0][2:] for k in sys.argv[1:]]
specified = {k: hparams[k] for k in keys}
return specified
def make_hparam_str(hparams, exclude):
return ",".join([f"{key}_{value}"
for key, value in sorted(hparams.items())
if key not in exclude])
class Logger(object):
def __init__(self, logdir):
if logdir is None:
self.writer = None
else:
if os.path.exists(logdir) and os.path.isdir(logdir):
shutil.rmtree(logdir)
self.writer = SummaryWriter(log_dir=logdir)
def log_model(self, model, input_to_model):
if self.writer is None:
return
self.writer.add_graph(model, input_to_model)
def log_epoch(self, epoch, train_loss, train_acc, test_loss, test_acc, epsilon=None):
if self.writer is None:
return
self.writer.add_scalar("Loss/train", train_loss, epoch)
self.writer.add_scalar("Loss/test", test_loss, epoch)
self.writer.add_scalar("Accuracy/train", train_acc, epoch)
self.writer.add_scalar("Accuracy/test", test_acc, epoch)
if epsilon is not None:
self.writer.add_scalar("Acc@Eps/train", train_acc, 100*epsilon)
self.writer.add_scalar("Acc@Eps/test", test_acc, 100*epsilon)
def log_scalar(self, tag, scalar_value, global_step):
if self.writer is None or scalar_value is None:
return
self.writer.add_scalar(tag, scalar_value, global_step)
| [
"torch.utils.tensorboard.SummaryWriter",
"torch.from_numpy"
] | 1.6.0 | skat00sh/Handcrafted-DP | d1f8bc004adc240d5c424a10bdcc30fc266c8218 |
1.9 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import pytest
import torch
from pyro.distributions import (
Delta,
NegativeBinomial,
Normal,
Poisson,
ZeroInflatedDistribution,
ZeroInflatedNegativeBinomial,
ZeroInflatedPoisson,
)
from pyro.distributions.util import broadcast_shape
from tests.common import assert_close
@pytest.mark.parametrize("gate_shape", [(), (2,), (3, 1), (3, 2)])
@pytest.mark.parametrize("base_shape", [(), (2,), (3, 1), (3, 2)])
def test_zid_shape(gate_shape, base_shape):
gate = torch.rand(gate_shape)
base_dist = Normal(torch.randn(base_shape), torch.randn(base_shape).exp())
d = ZeroInflatedDistribution(base_dist, gate=gate)
assert d.batch_shape == broadcast_shape(gate_shape, base_shape)
assert d.support == base_dist.support
d2 = d.expand([4, 3, 2])
assert d2.batch_shape == (4, 3, 2)
@pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
def test_zip_0_gate(rate):
# if gate is 0 ZIP is Poisson
zip1 = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.zeros(1))
zip2 = ZeroInflatedPoisson(torch.tensor(rate), gate_logits=torch.tensor(-99.9))
pois = Poisson(torch.tensor(rate))
s = pois.sample((20,))
zip1_prob = zip1.log_prob(s)
zip2_prob = zip2.log_prob(s)
pois_prob = pois.log_prob(s)
assert_close(zip1_prob, pois_prob)
assert_close(zip2_prob, pois_prob)
@pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
def test_zip_1_gate(rate):
# if gate is 1 ZIP is Delta(0)
zip1 = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.ones(1))
zip2 = ZeroInflatedPoisson(torch.tensor(rate), gate_logits=torch.tensor(math.inf))
delta = Delta(torch.zeros(1))
s = torch.tensor([0.0, 1.0])
zip1_prob = zip1.log_prob(s)
zip2_prob = zip2.log_prob(s)
delta_prob = delta.log_prob(s)
assert_close(zip1_prob, delta_prob)
assert_close(zip2_prob, delta_prob)
@pytest.mark.parametrize("gate", [0.0, 0.25, 0.5, 0.75, 1.0])
@pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
def test_zip_mean_variance(gate, rate):
num_samples = 1000000
zip_ = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.tensor(gate))
s = zip_.sample((num_samples,))
expected_mean = zip_.mean
estimated_mean = s.mean()
expected_std = zip_.stddev
estimated_std = s.std()
assert_close(expected_mean, estimated_mean, atol=1e-02)
assert_close(expected_std, estimated_std, atol=1e-02)
@pytest.mark.parametrize("total_count", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
@pytest.mark.parametrize("probs", [0.1, 0.5, 0.9])
def test_zinb_0_gate(total_count, probs):
# if gate is 0 ZINB is NegativeBinomial
zinb1 = ZeroInflatedNegativeBinomial(
total_count=torch.tensor(total_count),
gate=torch.zeros(1),
probs=torch.tensor(probs),
)
zinb2 = ZeroInflatedNegativeBinomial(
total_count=torch.tensor(total_count),
gate_logits=torch.tensor(-99.9),
probs=torch.tensor(probs),
)
neg_bin = NegativeBinomial(torch.tensor(total_count), probs=torch.tensor(probs))
s = neg_bin.sample((20,))
zinb1_prob = zinb1.log_prob(s)
zinb2_prob = zinb2.log_prob(s)
neg_bin_prob = neg_bin.log_prob(s)
assert_close(zinb1_prob, neg_bin_prob)
assert_close(zinb2_prob, neg_bin_prob)
@pytest.mark.parametrize("total_count", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
@pytest.mark.parametrize("probs", [0.1, 0.5, 0.9])
def test_zinb_1_gate(total_count, probs):
# if gate is 1 ZINB is Delta(0)
zinb1 = ZeroInflatedNegativeBinomial(
total_count=torch.tensor(total_count),
gate=torch.ones(1),
probs=torch.tensor(probs),
)
zinb2 = ZeroInflatedNegativeBinomial(
total_count=torch.tensor(total_count),
gate_logits=torch.tensor(math.inf),
probs=torch.tensor(probs),
)
delta = Delta(torch.zeros(1))
s = torch.tensor([0.0, 1.0])
zinb1_prob = zinb1.log_prob(s)
zinb2_prob = zinb2.log_prob(s)
delta_prob = delta.log_prob(s)
assert_close(zinb1_prob, delta_prob)
assert_close(zinb2_prob, delta_prob)
@pytest.mark.parametrize("gate", [0.0, 0.25, 0.5, 0.75, 1.0])
@pytest.mark.parametrize("total_count", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
@pytest.mark.parametrize("logits", [-0.5, 0.5, -0.9, 1.9])
def test_zinb_mean_variance(gate, total_count, logits):
num_samples = 1000000
zinb_ = ZeroInflatedNegativeBinomial(
total_count=torch.tensor(total_count),
gate=torch.tensor(gate),
logits=torch.tensor(logits),
)
s = zinb_.sample((num_samples,))
expected_mean = zinb_.mean
estimated_mean = s.mean()
expected_std = zinb_.stddev
estimated_std = s.std()
assert_close(expected_mean, estimated_mean, atol=1e-01)
assert_close(expected_std, estimated_std, atol=1e-1)
| [
"torch.zeros",
"torch.rand",
"torch.ones",
"torch.tensor",
"torch.randn"
] | 1.9.0 | tianjuchen/pyro | d5b0545c4f992d435692080db6969314a2c32f05 |
1.9 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from torch.distributions import constraints
from torch.nn import Parameter
import pyro
import pyro.distributions as dist
from pyro.contrib.gp.parameterized import Parameterized
from pyro.nn.module import PyroParam, PyroSample
from tests.common import assert_equal
def test_parameterized():
class Linear(Parameterized):
def __init__(self):
super().__init__()
self._pyro_name = "Linear"
self.a = PyroParam(torch.tensor(1.0), constraints.positive)
self.b = PyroSample(dist.Normal(0, 1))
self.c = PyroSample(dist.Normal(0, 1))
self.d = PyroSample(dist.Normal(0, 4).expand([1]).to_event())
self.e = PyroSample(dist.LogNormal(0, 1))
self.f = PyroSample(dist.MultivariateNormal(torch.zeros(2), torch.eye(2)))
self.g = PyroSample(dist.Exponential(1))
def forward(self, x):
return (
self.a * x
+ self.b
+ self.c
+ self.d
+ self.e
+ self.f
+ self.g
+ self.e
)
linear = Linear()
linear.autoguide("c", dist.Normal)
linear.autoguide("d", dist.MultivariateNormal)
linear.autoguide("e", dist.Normal)
assert set(dict(linear.named_parameters()).keys()) == {
"a_unconstrained",
"b_map",
"c_loc",
"c_scale_unconstrained",
"d_loc",
"d_scale_tril_unconstrained",
"e_loc",
"e_scale_unconstrained",
"f_map",
"g_map_unconstrained",
}
def model(x):
linear.mode = "model"
return linear(x)
def guide(x):
linear.mode = "guide"
return linear(x)
model_trace = pyro.poutine.trace(model).get_trace(torch.tensor(5.0))
guide_trace = pyro.poutine.trace(guide).get_trace(torch.tensor(5.0))
for p in ["b", "c", "d"]:
assert "Linear.{}".format(p) in model_trace.nodes
assert "Linear.{}".format(p) in guide_trace.nodes
assert isinstance(guide_trace.nodes["Linear.b"]["fn"], dist.Delta)
c_dist = guide_trace.nodes["Linear.c"]["fn"]
assert isinstance(getattr(c_dist, "base_dist", c_dist), dist.Normal)
d_dist = guide_trace.nodes["Linear.d"]["fn"]
assert isinstance(getattr(d_dist, "base_dist", d_dist), dist.MultivariateNormal)
def test_nested_parameterized():
class Linear(Parameterized):
def __init__(self, a):
super().__init__()
self.a = Parameter(a)
def forward(self, x):
return self.a * x
class Quadratic(Parameterized):
def __init__(self, linear1, linear2, a):
super().__init__()
self._pyro_name = "Quadratic"
self.linear1 = linear1
self.linear2 = linear2
self.a = Parameter(a)
def forward(self, x):
return self.linear1(x) * x + self.linear2(self.a)
linear1 = Linear(torch.tensor(1.0))
linear1.a = PyroSample(dist.Normal(0, 1))
linear2 = Linear(torch.tensor(1.0))
linear2.a = PyroSample(dist.Normal(0, 1))
q = Quadratic(linear1, linear2, torch.tensor(2.0))
q.a = PyroSample(dist.Cauchy(0, 1))
def model(x):
q.set_mode("model")
return q(x)
trace = pyro.poutine.trace(model).get_trace(torch.tensor(5.0))
assert "Quadratic.a" in trace.nodes
assert "Quadratic.linear1.a" in trace.nodes
assert "Quadratic.linear2.a" in trace.nodes
def test_inference():
class Linear(Parameterized):
def __init__(self, a):
super().__init__()
self.a = Parameter(a)
def forward(self, x):
return self.a * x
target_a = torch.tensor(2.0)
x_train = torch.rand(100)
y_train = target_a * x_train + torch.rand(100) * 0.001
linear = Linear(torch.tensor(1.0))
linear.a = PyroSample(dist.Normal(0, 10))
linear.autoguide("a", dist.Normal)
def model(x, y):
linear.set_mode("model")
mu = linear(x)
with pyro.plate("plate"):
return pyro.sample("y", dist.Normal(mu, 0.1), obs=y)
def guide(x, y):
linear.set_mode("guide")
linear._load_pyro_samples()
loss_fn = pyro.infer.Trace_ELBO().differentiable_loss
optimizer = torch.optim.Adam(linear.parameters(), lr=0.5)
def closure():
optimizer.zero_grad()
loss = loss_fn(model, guide, x_train, y_train)
loss.backward()
return loss
for i in range(200):
optimizer.step(closure)
linear.mode = "guide"
assert_equal(linear.a, target_a, prec=0.05)
| [
"torch.zeros",
"torch.rand",
"torch.nn.Parameter",
"torch.eye",
"torch.tensor"
] | 1.9.0 | tianjuchen/pyro | d5b0545c4f992d435692080db6969314a2c32f05 |
1.9 | from __future__ import print_function, absolute_import
import torch
import torch.nn as nn
from torch.nn.modules.loss import _Loss
from super_gradients.training.utils import convert_to_tensor
class RSquaredLoss(_Loss):
def forward(self, output, target):
# FIXME - THIS NEEDS TO BE CHANGED SUCH THAT THIS CLASS INHERETS FROM _Loss (TAKE A LOOK AT YoLoV3DetectionLoss)
"""Computes the R-squared for the output and target values
:param output: Tensor / Numpy / List
The prediction
:param target: Tensor / Numpy / List
The corresponding lables
"""
# Convert to tensor
output = convert_to_tensor(output)
target = convert_to_tensor(target)
criterion_mse = nn.MSELoss()
return 1 - criterion_mse(output, target).item() / torch.var(target).item()
| [
"torch.var",
"torch.nn.MSELoss"
] | 1.9.0 | Deci-AI/super-gradients | bfed440ecaf485af183570bf965eb5b74cb9f832 |
1.9 | import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.distributions import Bernoulli
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class DropBlock(nn.Module):
def __init__(self, block_size):
super(DropBlock, self).__init__()
self.block_size = block_size
# self.gamma = gamma
# self.bernouli = Bernoulli(gamma)
def forward(self, x, gamma):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# shape: (bsize, channels, height, width)
if self.training:
batch_size, channels, height, width = x.shape
bernoulli = Bernoulli(gamma)
mask = bernoulli.sample(
(batch_size, channels, height - (self.block_size - 1), width - (self.block_size - 1))).to(device)
block_mask = self._compute_block_mask(mask)
countM = block_mask.size()[0] * block_mask.size()[1] * block_mask.size()[2] * block_mask.size()[3]
count_ones = block_mask.sum()
return block_mask * x * (countM / count_ones)
else:
return x
def _compute_block_mask(self, mask):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
left_padding = int((self.block_size - 1) / 2)
right_padding = int(self.block_size / 2)
batch_size, channels, height, width = mask.shape
# print ("mask", mask[0][0])
non_zero_idxs = mask.nonzero()
nr_blocks = non_zero_idxs.shape[0]
offsets = torch.stack(
[
torch.arange(self.block_size).view(-1, 1).expand(self.block_size, self.block_size).reshape(-1),
# - left_padding,
torch.arange(self.block_size).repeat(self.block_size), # - left_padding
]
).t().to(device)
offsets = torch.cat((torch.zeros(self.block_size ** 2, 2).to(device).long(), offsets.long()), 1)
if nr_blocks > 0:
non_zero_idxs = non_zero_idxs.repeat(self.block_size ** 2, 1)
offsets = offsets.repeat(nr_blocks, 1).view(-1, 4)
offsets = offsets.long()
block_idxs = non_zero_idxs + offsets
# block_idxs += left_padding
padded_mask = F.pad(mask, (left_padding, right_padding, left_padding, right_padding))
padded_mask[block_idxs[:, 0], block_idxs[:, 1], block_idxs[:, 2], block_idxs[:, 3]] = 1.
else:
padded_mask = F.pad(mask, (left_padding, right_padding, left_padding, right_padding))
block_mask = 1 - padded_mask # [:height, :width]
return block_mask
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, drop_rate=0.0, drop_block=False,
block_size=1, use_se=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.LeakyReLU(0.1)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv3x3(planes, planes)
self.bn3 = nn.BatchNorm2d(planes)
self.maxpool = nn.MaxPool2d(stride)
self.downsample = downsample
self.stride = stride
self.drop_rate = drop_rate
self.num_batches_tracked = 0
self.drop_block = drop_block
self.block_size = block_size
self.DropBlock = DropBlock(block_size=self.block_size)
self.use_se = use_se
if self.use_se:
self.se = SELayer(planes, 4)
def forward(self, x):
self.num_batches_tracked += 1
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.use_se:
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
out = self.maxpool(out)
if self.drop_rate > 0:
if self.drop_block == True:
feat_size = out.size()[2]
keep_rate = max(1.0 - self.drop_rate / (20 * 2000) * (self.num_batches_tracked), 1.0 - self.drop_rate)
gamma = (1 - keep_rate) / self.block_size ** 2 * feat_size ** 2 / (feat_size - self.block_size + 1) ** 2
out = self.DropBlock(out, gamma=gamma)
else:
out = F.dropout(out, p=self.drop_rate, training=self.training, inplace=True)
return out
class ResNet(nn.Module):
def __init__(self, block, n_blocks, keep_prob=1.0, avg_pool=False, drop_rate=0.0,
dropblock_size=5, num_classes=-1, use_se=False):
super(ResNet, self).__init__()
self.inplanes = 3
self.use_se = use_se
self.layer1 = self._make_layer(block, n_blocks[0], 64,
stride=2, drop_rate=drop_rate)
self.layer2 = self._make_layer(block, n_blocks[1], 160,
stride=2, drop_rate=drop_rate)
self.layer3 = self._make_layer(block, n_blocks[2], 320,
stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)
self.layer4 = self._make_layer(block, n_blocks[3], 640,
stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)
if avg_pool:
# self.avgpool = nn.AvgPool2d(5, stride=1)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.keep_prob = keep_prob
self.keep_avg_pool = avg_pool
self.dropout = nn.Dropout(p=1 - self.keep_prob, inplace=False)
self.drop_rate = drop_rate
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.num_classes = num_classes
if self.num_classes > 0:
self.classifier = nn.Linear(640, self.num_classes)
def _make_layer(self, block, n_block, planes, stride=1, drop_rate=0.0, drop_block=False, block_size=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
if n_block == 1:
layer = block(self.inplanes, planes, stride, downsample, drop_rate, drop_block, block_size, self.use_se)
else:
layer = block(self.inplanes, planes, stride, downsample, drop_rate, self.use_se)
layers.append(layer)
self.inplanes = planes * block.expansion
for i in range(1, n_block):
if i == n_block - 1:
layer = block(self.inplanes, planes, drop_rate=drop_rate, drop_block=drop_block,
block_size=block_size, use_se=self.use_se)
else:
layer = block(self.inplanes, planes, drop_rate=drop_rate, use_se=self.use_se)
layers.append(layer)
return nn.Sequential(*layers)
def forward(self, x, is_feat=False):
x = self.layer1(x)
f0 = x
x = self.layer2(x)
f1 = x
x = self.layer3(x)
f2 = x
x = self.layer4(x)
f3 = x
if self.keep_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
feat = x
if self.num_classes > 0:
x = self.classifier(x)
if is_feat:
return [f0, f1, f2, f3, feat], x
else:
return x
def get_embedding(self, x):
[f0, f1, f2, f3, feat], x = self.forward(x, is_feat=True)
return feat
def resnet12(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-12 model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)
return model
def resnet18(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [1, 1, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)
return model
def resnet24(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-24 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)
return model
def resnet50(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-50 model.
indeed, only (3 + 4 + 6 + 3) * 3 + 1 = 49 layers
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)
return model
def resnet101(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-101 model.
indeed, only (3 + 4 + 23 + 3) * 3 + 1 = 100 layers
"""
model = ResNet(BasicBlock, [3, 4, 23, 3], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)
return model
def seresnet12(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-12 model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)
return model
def seresnet18(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [1, 1, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)
return model
def seresnet24(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-24 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)
return model
def seresnet50(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-50 model.
indeed, only (3 + 4 + 6 + 3) * 3 + 1 = 49 layers
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)
return model
def seresnet101(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-101 model.
indeed, only (3 + 4 + 23 + 3) * 3 + 1 = 100 layers
"""
model = ResNet(BasicBlock, [3, 4, 23, 3], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)
return model
if __name__ == '__main__':
from types import SimpleNamespace
# import argparse
#
# parser = argparse.ArgumentParser('argument for training')
# parser.add_argument('--model', type=str, choices=['resnet12', 'resnet18', 'resnet24', 'resnet50', 'resnet101',
# 'seresnet12', 'seresnet18', 'seresnet24', 'seresnet50',
# 'seresnet101'])
# args = parser.parse_args()
args = SimpleNamespace(model='resnet12')
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_dict = {
'resnet12': resnet12,
'resnet18': resnet18,
'resnet24': resnet24,
'resnet50': resnet50,
'resnet101': resnet101,
'seresnet12': seresnet12,
'seresnet18': seresnet18,
'seresnet24': seresnet24,
'seresnet50': seresnet50,
'seresnet101': seresnet101,
}
model = model_dict[args.model](avg_pool=True, drop_rate=0.1, dropblock_size=5, num_classes=64).to(args.device)
data = torch.randn(2, 3, 84, 84)
model = model.to(args.device)
data = data.to(args.device)
feat, logit = model(data, is_feat=True)
print(feat[-1].shape)
print(logit.shape)
print("DONE")
| [
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.init.kaiming_normal_",
"torch.distributions.Bernoulli",
"torch.cuda.is_available",
"torch.nn.functional.pad",
"torch.nn.MaxPool2d",
"torch.nn.init.constant_",
"torch.zeros",
"torch.nn.Sequential",
"torch.nn.functional.dropout",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Dropout",
"torch.arange",
"torch.nn.Sigmoid",
"torch.nn.AdaptiveAvgPool2d",
"torch.randn"
] | 1.9.1 | brando90/Does-MAML-Only-Work-via-Feature-Re-use-A-Data-Set-Centric-Perspective | 45c4fabf35d6d8d19e49092e84e8ac9fa55aee8d |
1.9 | import torch
import torch.nn as nn
from collections import OrderedDict
# import torchvision.transforms as transforms
# from torch.models.custom_layers import Flatten
class Flatten(nn.Module):
def forward(self, input):
batch_size = input.size(0)
out = input.view(batch_size,-1)
return out # (batch_size, *size)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f'{torch.cuda.is_available()}')
print(f'Device = {device}')
assert(torch.cuda.is_available())
data = torch.randn(2, 3, 84, 84).to(device)
out_features = 64
model = nn.Sequential(OrderedDict([
('features', nn.Sequential(OrderedDict([('flatten', Flatten())]))),
('cls', torch.nn.Linear(in_features=84 * 84 * 3, out_features=out_features, bias=True))
]))
model = nn.Sequential(OrderedDict([('model', model)])).to(device)
out = model(data)
print(out.sum())
print('Success! Your code works with gpu')
| [
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.randn"
] | 1.9.1 | brando90/Does-MAML-Only-Work-via-Feature-Re-use-A-Data-Set-Centric-Perspective | 45c4fabf35d6d8d19e49092e84e8ac9fa55aee8d |
1.9 | """
Union of data sets for SL training.
"""
from typing import Union
import torchvision
from torch import Tensor
from torch.utils.data import Dataset
from pathlib import Path
import torch
from task2vec import Task2Vec
from models import get_model
import datasets
import task_similarity
def get_datasets(root: Union[str, Path], dataset_names: list[str]) -> list[torchvision.datasets]:
import datasets
root: Path = Path(root).expanduser() if isinstance(root, str) else root.expanduser()
data_sets: list[torchvision.datasets] = [datasets.__dict__[name](root=root)[0] for name in dataset_names]
return data_sets
class UnionDatasets(Dataset):
"""
todo:
- bisect into the right data set
- make sure we are using the right split
"""
def __init__(self, root: Union[str, Path], dataset_names: list[str], split: str):
root: Path = Path(root).expanduser() if isinstance(root, str) else root.expanduser()
# - set fields
self.root: Path = root
self.dataset_names: list[str] = dataset_names
self.split
# - get data sets
self.data_sets: list[torchvision.datasets] = get_datasets(dataset_names, root)
def __len__(self):
total_numer_of_data_examples: int = sum([len(dataset) for dataset in self.data_sets])
return total_numer_of_data_examples
def __getitem__(self, idx: int):
pass
# - tests
def go_through_hdml1_test():
# - get data set list
# dataset_names = ('stl10', 'mnist', 'cifar10', 'cifar100', 'letters', 'kmnist')
# dataset_names = ('mnist',)
dataset_names = ('stl10', 'letters', 'kmnist')
root: Path = Path('~/data').expanduser()
print(f'{root=}')
dataset_list: list[torchvision.datasets] = [datasets.__dict__[name](root=root)[0] for name in dataset_names]
print(f'{dataset_list=}')
device = torch.device(f"cuda:{0}" if torch.cuda.is_available() else "cpu")
print(f'{device=}')
# - get union data loader
union_datasets: UnionDatasets = UnionDatasets(root, dataset_names)
# - go through the union data loader
if __name__ == '__main__':
go_through_hdml1_test()
print('Done!\n\a')
| [
"torch.cuda.is_available"
] | 1.9.1 | brando90/Does-MAML-Only-Work-via-Feature-Re-use-A-Data-Set-Centric-Perspective | 45c4fabf35d6d8d19e49092e84e8ac9fa55aee8d |
1.7 | import os
import torch
from tensorboardX import SummaryWriter
use_cuda = torch.cuda.is_available()
default_checkpoint = {
"epoch": 0,
"train_losses": [],
"train_symbol_accuracy": [],
"train_sentence_accuracy": [],
"train_wer": [],
"train_score": [],
"validation_losses": [],
"validation_symbol_accuracy": [],
"validation_sentence_accuracy": [],
"validation_wer": [],
"validation_score": [],
"lr": [],
"grad_norm": [],
"model": {},
"configs":{},
"token_to_id":{},
"id_to_token":{},
}
def save_checkpoint(checkpoint, dir="./checkpoints", prefix=""):
""" Saving check point
Args:
checkpoint(dict) : Checkpoint to save
dir(str) : Path to save the checkpoint
prefix(str) : Path of location of dir
"""
# Padded to 4 digits because of lexical sorting of numbers.
# e.g. 0009.pth
filename = "{num:0>4}.pth".format(num=checkpoint["epoch"])
if not os.path.exists(os.path.join(prefix, dir)):
os.makedirs(os.path.join(prefix, dir))
torch.save(checkpoint, os.path.join(prefix, dir, filename))
def load_checkpoint(path, cuda=use_cuda):
""" Load check point
Args:
path(str) : Path checkpoint located
cuda : Whether use cuda or not [Default: use_cuda]
Returns
Loaded checkpoints
"""
if cuda:
return torch.load(path)
else:
# Load GPU model on CPU
return torch.load(path, map_location=lambda storage, loc: storage)
def init_tensorboard(name="", base_dir="./tensorboard"):
"""Init tensorboard
Args:
name(str) : name of tensorboard
base_dir(str): path of tesnorboard
"""
return SummaryWriter(os.path.join(name, base_dir))
def write_tensorboard(
writer,
epoch,
grad_norm,
train_loss,
train_symbol_accuracy,
train_sentence_accuracy,
train_wer,
train_score,
validation_loss,
validation_symbol_accuracy,
validation_sentence_accuracy,
validation_wer,
validation_score,
model,
):
writer.add_scalar("train_loss", train_loss, epoch)
writer.add_scalar("train_symbol_accuracy", train_symbol_accuracy, epoch)
writer.add_scalar("train_sentence_accuracy",train_sentence_accuracy,epoch)
writer.add_scalar("train_wer", train_wer, epoch)
writer.add_scalar("train_score", train_score, epoch)
writer.add_scalar("validation_loss", validation_loss, epoch)
writer.add_scalar("validation_symbol_accuracy", validation_symbol_accuracy, epoch)
writer.add_scalar("validation_sentence_accuracy",validation_sentence_accuracy,epoch)
writer.add_scalar("validation_wer",validation_wer,epoch)
writer.add_scalar("validation_score", validation_score, epoch)
writer.add_scalar("grad_norm", grad_norm, epoch)
for name, param in model.encoder.named_parameters():
writer.add_histogram(
"encoder/{}".format(name), param.detach().cpu().numpy(), epoch
)
if param.grad is not None:
writer.add_histogram(
"encoder/{}/grad".format(name), param.grad.detach().cpu().numpy(), epoch
)
for name, param in model.decoder.named_parameters():
writer.add_histogram(
"decoder/{}".format(name), param.detach().cpu().numpy(), epoch
)
if param.grad is not None:
writer.add_histogram(
"decoder/{}/grad".format(name), param.grad.detach().cpu().numpy(), epoch
)
| [
"torch.cuda.is_available",
"torch.load"
] | 1.7.1 | bsm8734/formula-image-latex-recognition | 86d5070e8f907571a47967d64facaee246d92a35 |
1.3 | """!
@brief Running an experiment with the improved version of SuDoRmRf on
universal source separation with multiple sources.
@author Efthymios Tzinis {[email protected]}
@copyright University of Illinois at Urbana-Champaign
"""
import os
import sys
current_dir = os.path.dirname(os.path.abspath('__file__'))
root_dir = os.path.abspath(os.path.join(current_dir, '../../../'))
sys.path.append(root_dir)
from __config__ import API_KEY
from comet_ml import Experiment, OfflineExperiment
import torch
from torch.nn import functional as F
from tqdm import tqdm
from pprint import pprint
import sudo_rm_rf.dnn.experiments.utils.improved_cmd_args_parser_v2 as parser
import sudo_rm_rf.dnn.experiments.utils.mixture_consistency \
as mixture_consistency
import sudo_rm_rf.dnn.experiments.utils.dataset_setup as dataset_setup
import sudo_rm_rf.dnn.losses.sisdr as sisdr_lib
import sudo_rm_rf.dnn.losses.snr as snr_lib
import sudo_rm_rf.dnn.losses.norm as norm_lib
import sudo_rm_rf.dnn.models.improved_sudormrf as improved_sudormrf
import sudo_rm_rf.dnn.models.groupcomm_sudormrf_v2 as sudormrf_gc_v2
import sudo_rm_rf.dnn.models.causal_improved_sudormrf_v3 as \
causal_improved_sudormrf
import sudo_rm_rf.dnn.models.sudormrf as initial_sudormrf
import sudo_rm_rf.dnn.utils.cometml_loss_report as cometml_report
import sudo_rm_rf.dnn.utils.cometml_log_audio as cometml_audio_logger
import sudo_rm_rf.dnn.utils.log_audio as offline_audio_logger
# torch.backends.cudnn.enabled = False
args = parser.get_args()
hparams = vars(args)
generators = dataset_setup.setup(hparams)
# Hardcode n_sources for all the experiments with musdb
assert hparams['n_channels'] == 1, 'Mono source separation is available for now'
audio_loggers = dict(
[(n_src,
cometml_audio_logger.AudioLogger(fs=hparams["fs"],
bs=1,
n_sources=n_src))
for n_src in range(1, hparams['max_num_sources'] + 1)])
# offline_savedir = os.path.join('/home/thymios/offline_exps',
# hparams["project_name"],
# '_'.join(hparams['cometml_tags']))
# if not os.path.exists(offline_savedir):
# os.makedirs(offline_savedir)
# audio_logger = offline_audio_logger.AudioLogger(dirpath=offline_savedir,
# fs=hparams["fs"], bs=hparams["batch_size"], n_sources=4)
# Hardcode the test generator for each one of the number of sources
for n_src in range(hparams['min_num_sources'], hparams['max_num_sources']+1):
for split_name in ['val', 'test']:
loader = dataset_setup.create_loader_for_simple_dataset(
dataset_name='FUSS',
separation_task=hparams['separation_task'],
data_split=split_name, sample_rate=hparams['fs'],
n_channels=hparams['n_channels'], min_or_max=hparams['min_or_max'],
zero_pad=hparams['zero_pad_audio'],
timelegth=hparams['audio_timelength'],
normalize_audio=hparams['normalize_audio'],
n_samples=0, min_num_sources=n_src, max_num_sources=n_src)
gen_name = '{}_{}_srcs'.format(split_name, n_src)
generators[gen_name] = loader.get_generator(
batch_size=hparams['batch_size'], num_workers=hparams['n_jobs'])
# experiment = OfflineExperiment(API_KEY, offline_directory=offline_savedir)
experiment = Experiment(API_KEY, project_name=hparams['project_name'])
experiment.log_parameters(hparams)
experiment_name = '_'.join(hparams['cometml_tags'])
for tag in hparams['cometml_tags']:
experiment.add_tag(tag)
if hparams['experiment_name'] is not None:
experiment.set_name(hparams['experiment_name'])
else:
experiment.set_name(experiment_name)
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
[cad for cad in hparams['cuda_available_devices']])
back_loss_tr_loss_name, back_loss_tr_loss = (
'tr_back_loss_SNR',
# norm_lib.L1(return_individual_results=False)
# norm_lib.PermInvariantL1(n_sources=hparams["n_sources"],
# weighted_norm=True)
# 'tr_back_loss_SISDRi',
snr_lib.PermInvariantSNRwithZeroRefs(
n_sources=hparams["max_num_sources"],
zero_mean=False,
backward_loss=True,
inactivity_threshold=-40.)
)
val_losses = {}
all_losses = []
for val_set in [x for x in generators if not x == 'train']:
if generators[val_set] is None:
continue
n_actual_sources = int(val_set.split('_')[1])
if n_actual_sources == 1:
single_source = False
improvement = False
metric_name = 'SISDR'
n_estimated_sources = 1
else:
single_source = False
improvement = True
n_estimated_sources = hparams['max_num_sources']
metric_name = 'SISDRi'
val_losses[val_set] = {}
all_losses.append(val_set + '_{}'.format(metric_name))
val_losses[val_set][val_set + '_{}'.format(metric_name)] = \
sisdr_lib.StabilizedPermInvSISDRMetric(
zero_mean=True,
single_source=single_source,
n_estimated_sources=n_estimated_sources,
n_actual_sources=n_actual_sources,
backward_loss=False,
improvement=improvement,
return_individual_results=True)
all_losses.append(back_loss_tr_loss_name)
if hparams['model_type'] == 'relu':
model = improved_sudormrf.SuDORMRF(out_channels=hparams['out_channels'],
in_channels=hparams['in_channels'],
num_blocks=hparams['num_blocks'],
upsampling_depth=hparams['upsampling_depth'],
enc_kernel_size=hparams['enc_kernel_size'],
enc_num_basis=hparams['enc_num_basis'],
num_sources=hparams['max_num_sources'])
elif hparams['model_type'] == 'causal':
model = causal_improved_sudormrf.CausalSuDORMRF(
in_audio_channels=1,
out_channels=hparams['out_channels'],
in_channels=hparams['in_channels'],
num_blocks=hparams['num_blocks'],
upsampling_depth=hparams['upsampling_depth'],
enc_kernel_size=hparams['enc_kernel_size'],
enc_num_basis=hparams['enc_num_basis'],
num_sources=hparams['max_num_sources'])
elif hparams['model_type'] == 'softmax':
model = initial_sudormrf.SuDORMRF(out_channels=hparams['out_channels'],
in_channels=hparams['in_channels'],
num_blocks=hparams['num_blocks'],
upsampling_depth=hparams['upsampling_depth'],
enc_kernel_size=hparams['enc_kernel_size'],
enc_num_basis=hparams['enc_num_basis'],
num_sources=hparams['max_num_sources'])
elif hparams['model_type'] == 'groupcomm_v2':
model = sudormrf_gc_v2.GroupCommSudoRmRf(
in_audio_channels=hparams['n_channels'],
out_channels=hparams['out_channels'],
in_channels=hparams['in_channels'],
num_blocks=hparams['num_blocks'],
upsampling_depth=hparams['upsampling_depth'],
enc_kernel_size=hparams['enc_kernel_size'],
enc_num_basis=hparams['enc_num_basis'],
num_sources=hparams['max_num_sources'],
group_size=16)
else:
raise ValueError('Invalid model: {}.'.format(hparams['model_type']))
numparams = 0
for f in model.parameters():
if f.requires_grad:
numparams += f.numel()
experiment.log_parameter('Parameters', numparams)
print('Trainable Parameters: {}'.format(numparams))
model = torch.nn.DataParallel(model).cuda()
opt = torch.optim.Adam(model.parameters(), lr=hparams['learning_rate'])
# lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
# optimizer=opt, mode='max', factor=1. / hparams['divide_lr_by'],
# patience=hparams['patience'], verbose=True)
def normalize_tensor_wav(wav_tensor, eps=1e-8, std=None):
mean = wav_tensor.mean(-1, keepdim=True)
if std is None:
std = wav_tensor.std(-1, keepdim=True)
return (wav_tensor - mean) / (std + eps)
def online_augment(clean_sources):
# clean_sources: (batch, n_sources, time)
# Online mixing over samples of the batch. (This might cause to get
# mixtures from the same type of sound but it's highly improbable).
# Keep the exact same SNR distribution with the initial mixtures.
n_sources = clean_sources.shape[1]
batch_size = clean_sources.shape[0]
initial_biases = torch.mean(clean_sources, dim=-1, keepdim=True)
initial_energies = torch.std(clean_sources, dim=-1, keepdim=True)
augmented_wavs_l = []
for i in range(n_sources):
augmented_wavs_l.append(clean_sources[torch.randperm(batch_size), i])
augmented_wavs = torch.stack(augmented_wavs_l, 1)
# augmented_wavs = normalize_tensor_wav(augmented_wavs)
# augmented_wavs = (augmented_wavs * initial_energies) + initial_biases
augmented_wavs = augmented_wavs[:, torch.randperm(n_sources)]
augmented_wavs *= (torch.rand(batch_size, n_sources).unsqueeze(-1) + 0.5)
return augmented_wavs
tr_step = 0
val_step = 0
prev_epoch_val_loss = 0.
for i in range(hparams['n_epochs']):
res_dic = {}
for loss_name in all_losses:
res_dic[loss_name] = {'mean': 0., 'std': 0., 'median': 0., 'acc': []}
print("FUSS Sudo-RM-RF: {} - {} || Epoch: {}/{}".format(
experiment.get_key(), experiment.get_tags(), i+1, hparams['n_epochs']))
model.train()
sum_loss = 0.
train_tqdm_gen = tqdm(generators['train'], desc='Training')
for cnt, data in enumerate(train_tqdm_gen):
opt.zero_grad()
# data shape: (batch, n_sources, time_samples)
clean_wavs = online_augment(data)
clean_wavs = clean_wavs.cuda()
input_mixture = torch.sum(clean_wavs, -2, keepdim=True)
# input_mixture = normalize_tensor_wav(input_mixture)
input_mix_std = input_mixture.std(-1, keepdim=True)
input_mix_mean = input_mixture.mean(-1, keepdim=True)
input_mixture = (input_mixture - input_mix_mean) / (
input_mix_std + 1e-9)
# input_mix_std = input_mixture.std(-1, keepdim=True)
# input_mix_mean = input_mixture.mean(-1, keepdim=True)
# input_mixture = (input_mixture - input_mix_mean) / (input_mix_std + 1e-9)
# clean_wavs = normalize_tensor_wav(clean_wavs, std=input_mix_std)
rec_sources_wavs = model(input_mixture)
# rec_sources_wavs = (rec_sources_wavs * input_mix_std) + input_mix_mean
rec_sources_wavs = mixture_consistency.apply(rec_sources_wavs,
input_mixture)
# l = back_loss_tr_loss(normalize_tensor_wav(rec_sources_wavs),
# normalize_tensor_wav(clean_wavs))
l = back_loss_tr_loss(rec_sources_wavs,
clean_wavs)
l.backward()
if hparams['clip_grad_norm'] > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(),
hparams['clip_grad_norm'])
opt.step()
sum_loss += l.detach().item()
train_tqdm_gen.set_description(
"Training, Running Avg Loss: {}".format(sum_loss / (cnt + 1)))
if hparams['patience'] > 0:
if tr_step % hparams['patience'] == 0:
new_lr = (hparams['learning_rate']
/ (hparams['divide_lr_by'] ** (tr_step // hparams['patience'])))
print('Reducing Learning rate to: {}'.format(new_lr))
for param_group in opt.param_groups:
param_group['lr'] = new_lr
tr_step += 1
for val_set in [x for x in generators if not x == 'train']:
if generators[val_set] is not None:
n_actual_sources = int(val_set.split('_')[1])
model.eval()
n_songs_written = 10
with torch.no_grad():
for data in tqdm(generators[val_set],
desc='Validation on {}'.format(val_set)):
clean_wavs = data.cuda()
input_mixture = torch.sum(clean_wavs, -2, keepdim=True)
# input_mixture = normalize_tensor_wav(input_mixture)
input_mix_std = input_mixture.std(-1, keepdim=True)
input_mix_mean = input_mixture.mean(-1, keepdim=True)
input_mixture = (input_mixture - input_mix_mean) / (
input_mix_std + 1e-9)
rec_sources_wavs = model(input_mixture)
# rec_sources_wavs = (rec_sources_wavs * input_mix_std) + input_mix_mean
rec_sources_wavs = mixture_consistency.apply(
rec_sources_wavs,
input_mixture)
for loss_name, loss_func in val_losses[val_set].items():
# l, best_perm = loss_func(
# normalize_tensor_wav(rec_sources_wavs),
# normalize_tensor_wav(clean_wavs),
# return_best_permutation=True)
l, best_perm = loss_func(
rec_sources_wavs,
clean_wavs,
return_best_permutation=True)
res_dic[loss_name]['acc'] += l.tolist()
audio_loggers[n_actual_sources].log_batch(
rec_sources_wavs[:, best_perm.long().cuda()][0, 0].unsqueeze(0),
clean_wavs[0].unsqueeze(0),
input_mixture[0].unsqueeze(0),
experiment, step=val_step, tag=val_set)
val_step += 1
res_dic = cometml_report.report_losses_mean_and_std(res_dic,
experiment,
tr_step,
val_step)
for loss_name in res_dic:
res_dic[loss_name]['acc'] = []
pprint(res_dic)
| [
"torch.rand",
"torch.stack",
"torch.no_grad",
"torch.std",
"torch.randperm",
"torch.mean",
"torch.nn.DataParallel",
"torch.sum"
] | 1.3.0 | ishine/sudo_rm_rf | ec3fae1e2c9d85710f933a600f3ab93f92468dee |
1.6 | """This script is used to measure output dispersion score of synthetic datasets
"""
import os
import sys
import numpy as np
import torch
import random
import tqdm
import time
from pathlib import Path
from os.path import join
from model.model import EncoderDecoder
sys.path.append(join(os.path.dirname(os.path.abspath(__file__)), "../"))
from dataset.toy_dataset.toydataset import ToyDataset
from auxiliary.my_utils import plant_seeds
from auxiliary.metric_parser import parser
from model.pseudo_network import Generator
from eval.metric import ChamferDistanceL2, compute_ptcloud_dismatrix_batch, cluster_eval
from eval.eval_utils import get_logger, CountFrequency, dic_to_array, mean_std
import auxiliary.ChamferDistancePytorch.chamfer3D.dist_chamfer_3D as dist_chamfer_3D
opt = parser()
###Mkdir and logger
opt.device = torch.device("cuda")
res_path = join(opt.dir_name, opt.res_folder)
Path(res_path).mkdir(parents=True, exist_ok=True)
proc_logger = get_logger("process", res_path, "process.log")
res_logger = get_logger("results", res_path, "score.log")
opt.logger = proc_logger
print(opt.trained_exp_dir)
nviews_dic = {"train":opt.nviews_train, "test":opt.nviews_test}
num_seed = max(len(opt.seed_list), 1)
score_collect = {}
eval_label_list = set()
for seed_idx in range(num_seed):
if opt.seed_list:
opt.seed = opt.seed_list[seed_idx]
score_collect.update({str(opt.seed):{}})
plant_seeds(opt.seed)
##Loading Data and Network
if opt.split == 'pred':
eval_loss = ChamferDistanceL2().to(opt.device)
distChamfer = dist_chamfer_3D.chamfer_3DDist()
if opt.network=='atlasnet':
network = EncoderDecoder(opt)
opt.logger.info(f"Reloading Network Weights from {opt.reload_model_path}...")
network.load_state_dict(torch.load(opt.reload_model_path)['model_state_dict'])
network.to(opt.device)
if opt.split == "train":
dataset = ToyDataset(data_base_dir=opt.data_base_dir,
json_file=opt.train_json_file,
num_points=opt.number_points,
train=True,
normalization=opt.normalization,
logger=opt.logger)
elif opt.split == "test" or opt.split == "pred":
dataset = ToyDataset(data_base_dir=opt.data_base_dir,
json_file=opt.test_json_file,
num_points=opt.number_points,
train=False,
normalization=opt.normalization,
logger=opt.logger)
else:
raise NotImplementedError()
loader = torch.utils.data.DataLoader(dataset,
batch_size=opt.pred_batch_size,
shuffle=False, num_workers=8)
if opt.rsample == 1:
sample_num = len(dataset)
opt.nsample = len(dataset)
else:
if opt.rsample != -1:
opt.nsample = int(opt.rsample * len(dataset))
subset_index = random.sample(range(len(dataset)), opt.nsample)
dataset = torch.utils.data.Subset(dataset, subset_index)
sample_num = len(subset_index)
data = None
pred_loss = 0.0
with torch.set_grad_enabled(False):
for batch in tqdm.tqdm(loader, desc=f"loading {opt.split} {opt.type} data"):
if opt.split == 'pred':
input_img = batch['image'].to(opt.device)
pred_points = network(input_img, train=False)
pred_points = pred_points.transpose(2, 3).contiguous()
B = pred_points.shape[0]
pred_points = pred_points.view(B, -1, 3)
gt_points = batch['points'].to(opt.device)
assert gt_points.shape[0] == B, f'gt {gt_points.shape[0]}, while pred {B}'
if data is None:
data = pred_points
else:
data = torch.cat((data, pred_points), dim=0)
pred_loss += eval_loss(gt_points, pred_points).item()
dist1, dist2, idx1, idx2 = distChamfer(gt_points, pred_points)
opt.type = 'points'
pred_loss /= len(loader)
proc_logger.info(f"Pred Chamfer Loss: {pred_loss:4f}")
start_time = time.time()
if opt.type == 'points':
data = data.to(opt.device)
metric = ChamferDistanceL2().to(opt.device)
distance_matrix = compute_ptcloud_dismatrix_batch(data, data, metric,
opt.pred_batch_size, opt.device, proc_logger)
else:
raise NotImplementedError()
elasp_time = (time.time() - start_time) / 60
distance_matrix = distance_matrix.cpu().numpy()
score_collect[str(opt.seed)].update({"dm": distance_matrix})
score_collect[str(opt.seed)].update({"pred_chamfer": pred_loss})
n_evals = len(opt.perf_pc_list)
for index in range(n_evals):
c_method, e_method, n_cluster, perf_pc = opt.c_method[index], opt.e_method[index], opt.cluster_k[index], opt.perf_pc_list[index]
score, part_label = cluster_eval(c_method=c_method, e_method=e_method, distance_matrix=distance_matrix,
seed=opt.seed, n_cluster=n_cluster, pc=perf_pc)
label_stat_verbose = ""
freq = CountFrequency(part_label)
for key, value in freq.items():
label_stat_verbose += "% d :% d | "%(key, value)
proc_logger.info(f"{opt.type} mode: {opt.mode}, split: {opt.split} " +
f"nviews: train {opt.nviews_train}, test {opt.nviews_test}, sample num:{sample_num} " +
f"seed{opt.seed}, metric{opt.metric} perf{perf_pc}% " +
f"samp{distance_matrix.shape[0]}, Pred Chamfer: {pred_loss:.4f}, score: {score:.4f} DM" +
f"{distance_matrix.shape[0]}, compute time {elasp_time:2f} min")
eval_label = f"{c_method}_{e_method}_k{n_cluster}p{perf_pc}"
score_collect[str(opt.seed)].update({eval_label: {}})
eval_label_list.add(eval_label)
score_collect[str(opt.seed)][eval_label].update({"score": score})
score_collect[str(opt.seed)][eval_label].update({"label": np.array(part_label)}) # cluster label
score_collect[str(opt.seed)][eval_label].update({"perf_percent": perf_pc})
score_collect[str(opt.seed)][eval_label].update({"label_stats": dic_to_array(freq)})
eval_label_list = list(eval_label_list)
eval_label_list.sort()
ss_list = {}
for eval_label in eval_label_list:
ss_list.update({eval_label:[]})
pred_list = []
for seed in score_collect:
pred_list.append(score_collect[seed]['pred_chamfer'])
for eval_label in eval_label_list:
ss_list[eval_label].append(score_collect[seed][eval_label]["score"])
for eval_label in eval_label_list:
avg_score_lst = [score/sample_num for score in ss_list[eval_label]]
ss_mean, ss_std = mean_std(ss_list[eval_label])
avg_ss_mean, avg_ss_std = mean_std(avg_score_lst)
score_collect.update({f'{eval_label}': np.array([ss_mean, ss_std])})
score_collect.update({f'avg_{eval_label}': np.array([avg_ss_mean, avg_ss_std])})
pred_loss_mean, pred_loss_std = mean_std(pred_list)
score_collect.update({'split': opt.split})
score_collect.update({'type': opt.type})
score_collect.update({'mode': opt.mode})
score_collect.update({'sample_num': sample_num})
score_collect.update({'chamfer_stats': np.array([pred_loss_mean, pred_loss_std])})
score_collect.update({'trainnv': np.array([opt.nviews_train])})
score_collect.update({'testnv': np.array([opt.nviews_test])})
for eval_label in eval_label_list:
ss_mean, ss_std = score_collect[f'{eval_label}'][0], score_collect[f'{eval_label}'][1]
avg_ss_mean, avg_ss_std = score_collect[f'avg_{eval_label}'][0], score_collect[f'avg_{eval_label}'][1]
res_logger.info(f"{opt.network} {opt.type} mode: {opt.mode}, split: {opt.split}, " +
f"nviews: train {opt.nviews_train}, test {opt.nviews_test}, sample num: {sample_num} " +
f"seed_list {opt.seed_list}, metric {opt.metric} perf: {perf_pc} % {opt.metric} {opt.trained_exp_dir} {eval_label} " +
f"Sum of Score: (mean: {ss_mean:.4f}|std: {ss_std:.4f}) "+
f"Average Score: (mean: {avg_ss_mean:.4f}|std: {avg_ss_std:.4f}) "+
f"Pred Chamfer: (mean:{pred_loss_mean:.4f}|std: {pred_loss_std:.4f}) " +
f"DM compute time {elasp_time:.2f} min")
np.savez_compressed(os.path.join(res_path,
f"{opt.network}_{opt.mode}_{opt.split}_{opt.type}_{sample_num}_{opt.trained_exp_dir.split('/')[-1]}.npz"), **score_collect)
res_logger.info(f"###############END OF {opt.type} {opt.network} {opt.trained_exp_dir} PIPELINE#################")
| [
"torch.device",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.load",
"torch.utils.data.Subset",
"torch.set_grad_enabled"
] | 1.6.0 | nsfzyzz/dispersion-score | ac0c633fe3af091e83d2d198809d98545a0a311a |
0.4 | from torch import nn
from torch.nn import init
import torch.nn.functional as F
from core.config import cfg
from modeling.generate_anchors import generate_anchors
from modeling.generate_proposals import GenerateProposalsOp
from modeling.generate_proposal_labels import GenerateProposalLabelsOp
import modeling.FPN as FPN
import utils.net as net_utils
from model.utils.loss import focal_loss
# ---------------------------------------------------------------------------- #
# RPN and Faster R-CNN outputs and losses
# ---------------------------------------------------------------------------- #
def generic_rpn_outputs(dim_in, spatial_scale_in):
"""Add RPN outputs (objectness classification and bounding box regression)
to an RPN model. Abstracts away the use of FPN.
"""
if cfg.FPN.FPN_ON:
# Delegate to the FPN module
return FPN.fpn_rpn_outputs(dim_in, spatial_scale_in)
else:
# Not using FPN, add RPN to a single scale
return single_scale_rpn_outputs(dim_in, spatial_scale_in)
def generic_rpn_losses(*inputs, **kwargs):
"""Add RPN losses. Abstracts away the use of FPN."""
if cfg.FPN.FPN_ON:
return FPN.fpn_rpn_losses(*inputs, **kwargs)
else:
return single_scale_rpn_losses(*inputs, **kwargs)
class single_scale_rpn_outputs(nn.Module):
"""Add RPN outputs to a single scale model (i.e., no FPN)."""
def __init__(self, dim_in, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_in if cfg.RPN.OUT_DIM_AS_IN_DIM else cfg.RPN.OUT_DIM
anchors = generate_anchors(
stride=1. / spatial_scale,
sizes=cfg.RPN.SIZES,
aspect_ratios=cfg.RPN.ASPECT_RATIOS)
num_anchors = anchors.shape[0]
# RPN hidden representation
self.RPN_conv = nn.Conv2d(self.dim_in, self.dim_out, 3, 1, 1)
# Proposal classification scores
self.n_score_out = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \
else num_anchors
self.RPN_cls_score = nn.Conv2d(self.dim_out, self.n_score_out, 1, 1, 0)
# Proposal bbox regression deltas
self.RPN_bbox_pred = nn.Conv2d(self.dim_out, num_anchors * 4, 1, 1, 0)
self.RPN_GenerateProposals = GenerateProposalsOp(anchors, spatial_scale)
self.RPN_GenerateProposalLabels = GenerateProposalLabelsOp()
self._init_weights()
def _init_weights(self):
init.normal_(self.RPN_conv.weight, std=0.01)
init.constant_(self.RPN_conv.bias, 0)
init.normal_(self.RPN_cls_score.weight, std=0.01)
init.constant_(self.RPN_cls_score.bias, 0)
init.normal_(self.RPN_bbox_pred.weight, std=0.01)
init.constant_(self.RPN_bbox_pred.bias, 0)
def detectron_weight_mapping(self):
detectron_weight_mapping = {
'RPN_conv.weight': 'conv_rpn_w',
'RPN_conv.bias': 'conv_rpn_b',
'RPN_cls_score.weight': 'rpn_cls_logits_w',
'RPN_cls_score.bias': 'rpn_cls_logits_b',
'RPN_bbox_pred.weight': 'rpn_bbox_pred_w',
'RPN_bbox_pred.bias': 'rpn_bbox_pred_b'
}
orphan_in_detectron = []
return detectron_weight_mapping, orphan_in_detectron
def forward(self, x, im_info, roidb=None):
"""
x: feature maps from the backbone network. (Variable)
im_info: (CPU Variable)
roidb: (list of ndarray)
"""
rpn_conv = F.relu(self.RPN_conv(x), inplace=True)
rpn_cls_logits = self.RPN_cls_score(rpn_conv)
rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv)
return_dict = {
'rpn_cls_logits': rpn_cls_logits, 'rpn_bbox_pred': rpn_bbox_pred}
if not self.training or cfg.MODEL.FASTER_RCNN:
# Proposals are needed during:
# 1) inference (== not model.train) for RPN only and Faster R-CNN
# OR
# 2) training for Faster R-CNN
# Otherwise (== training for RPN only), proposals are not needed
if cfg.RPN.CLS_ACTIVATION == 'softmax':
B, C, H, W = rpn_cls_logits.size()
rpn_cls_prob = F.softmax(
rpn_cls_logits.view(B, 2, C // 2, H, W), dim=1)
rpn_cls_prob = rpn_cls_prob[:, 1].squeeze(dim=1)
else:
rpn_cls_prob = F.sigmoid(rpn_cls_logits)
rpn_rois, rpn_rois_prob = self.RPN_GenerateProposals(
rpn_cls_prob, rpn_bbox_pred, im_info)
return_dict['rpn_rois'] = rpn_rois
return_dict['rpn_roi_probs'] = rpn_rois_prob
if cfg.MODEL.FASTER_RCNN :
if self.training:
# Add op that generates training labels for in-network RPN proposals
blobs_out = self.RPN_GenerateProposalLabels(rpn_rois, roidb, im_info)
return_dict.update(blobs_out)
else:
# Alias rois to rpn_rois for inference
return_dict['rois'] = return_dict['rpn_rois']
return return_dict
def single_scale_rpn_losses(
rpn_cls_logits, rpn_bbox_pred,
rpn_labels_int32_wide, rpn_bbox_targets_wide,
rpn_bbox_inside_weights_wide, rpn_bbox_outside_weights_wide):
"""Add losses for a single scale RPN model (i.e., no FPN)."""
h, w = rpn_cls_logits.shape[2:]
rpn_labels_int32 = rpn_labels_int32_wide[:, :, :h, :w] # -1 means ignore
h, w = rpn_bbox_pred.shape[2:]
rpn_bbox_targets = rpn_bbox_targets_wide[:, :, :h, :w]
rpn_bbox_inside_weights = rpn_bbox_inside_weights_wide[:, :, :h, :w]
rpn_bbox_outside_weights = rpn_bbox_outside_weights_wide[:, :, :h, :w]
#fg_num = (rpn_labels_int32_wide==1).data.sum()
#bg_num = (rpn_labels_int32_wide==0).data.sum()
#print("RCNN training fg/bg: %d/%d"%(fg_num, bg_num))
if cfg.RPN.CLS_ACTIVATION == 'softmax':
B, C, H, W = rpn_cls_logits.size()
rpn_cls_logits = rpn_cls_logits.view(
B, 2, C // 2, H, W).permute(0, 2, 3, 4, 1).contiguous().view(-1, 2)
rpn_labels_int32 = rpn_labels_int32.contiguous().view(-1).long()
# the loss is averaged over non-ignored targets
if cfg.TRAIN.FOCAL_LOSS:
loss_rpn_cls = focal_loss(rpn_cls_logits, rpn_labels_int32, softmax=False, size_average=False)
else:
loss_rpn_cls = F.cross_entropy(
rpn_cls_logits, rpn_labels_int32, ignore_index=-1)
else:
weight = (rpn_labels_int32 >= 0).float()
if cfg.TRAIN.FOCAL_LOSS:
loss_rpn_cls = focal_loss(
rpn_cls_logits.view(-1, 1), rpn_labels_int32.contiguous().view(-1, 1).float(), weight.view(-1, 1).float(), softmax=True, size_average=False)
else:
loss_rpn_cls = F.binary_cross_entropy_with_logits(
rpn_cls_logits, rpn_labels_int32.float(), weight, size_average=False)
loss_rpn_cls /= weight.sum()
loss_rpn_bbox = net_utils.smooth_l1_loss(
rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights,
beta=1/9)
return loss_rpn_cls, loss_rpn_bbox
| [
"torch.nn.functional.sigmoid",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.functional.cross_entropy",
"torch.nn.init.normal_"
] | 0.4.1 | xixiobba/MVP-Net | 07bf00390080670b5d9a643b99f633419322a1ec |
1.0 | import time
import numpy as np
import torch
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from archived.s3.get_object import get_object
from archived.s3 import clear_bucket
from archived.sync import reduce_epoch, delete_expired_merged_epoch
from archived.old_model.SVM import SVM
from data_loader.libsvm_dataset import DenseDatasetWithLines
# lambda setting
# file_bucket = "s3-libsvm"
# tmp_bucket = "tmp-grads"
# merged_bucket = "merged-params"
local_dir = "/tmp"
# algorithm setting
num_features = 30
num_classes = 2
learning_rate = 0.01
batch_size = 300
num_epochs = 10
num_admm_epochs = 30
validation_ratio = .2
shuffle_dataset = True
random_seed = 42
ep_abs=1e-4
ep_rel=1e-2
def initialize_z_and_u(shape):
z = np.random.rand(shape[0], shape[1]).astype(np.float)
u = np.random.rand(shape[0], shape[1]).astype(np.float)
return z, u
def update_z_u(w, z, u, rho, n, lam_0):
z_new = w + u
z_tem = abs(z_new) - lam_0 / float(n * rho)
z_new = np.sign(z_new) * z_tem * (z_tem > 0)
s = z_new - z
r = w - np.ones(w.shape[0] * w.shape[1]).astype(np.float).reshape(w.shape) * z_new
u_new = u + r
return z_new, s, r, s
def update_z(w, u, rho, n, lam_0):
z_new = w + u
z_tem = abs(z_new) - lam_0 / float(n * rho)
z_new = np.sign(z_new) * z_tem * (z_tem > 0)
return z_new
def check_stop(ep_abs, ep_rel, r, s, n, p, w, z, u, rho):
e_pri = (n*p)**(0.5) * ep_abs + ep_rel * (max(np.sum(w**2),np.sum(n*z**2)))**(0.5)
e_dual = (p)**(0.5) * ep_abs + ep_rel * rho * (np.sum(u**2))**(0.5)/(n)**(0.5)
print("r^2 = {}, s^2 = {}, e_pri = {}, e_dual = {}".
format(np.sum(r**2), e_pri, np.sum(s**2), e_dual))
stop = (np.sum(r**2) <= e_pri**2) & (np.sum(s**2) <= e_dual**2)
return(stop)
def handler(event, context):
start_time = time.time()
bucket = event['bucket_name']
worker_index = event['rank']
num_workers = event['num_workers']
key = event['file']
tmp_bucket = event['tmp_bucket']
merged_bucket = event['merged_bucket']
num_epochs = event['num_epochs']
num_admm_epochs = event['num_admm_epochs']
learning_rate = event['learning_rate']
lam = event['lambda']
rho = event['rho']
batch_size = event['batch_size']
print('bucket = {}'.format(bucket))
print("file = {}".format(key))
print('number of workers = {}'.format(num_workers))
print('worker index = {}'.format(worker_index))
print('tmp bucket = {}'.format(tmp_bucket))
print('merge bucket = {}'.format(merged_bucket))
print('num epochs = {}'.format(num_epochs))
print('num admm epochs = {}'.format(num_admm_epochs))
print('learning rate = {}'.format(learning_rate))
print("lambda = {}".format(lam))
print("rho = {}".format(rho))
print("batch_size = {}".format(batch_size))
# read file from s3
file = get_object(bucket, key).read().decode('utf-8').split("\n")
print("read data cost {} s".format(time.time() - start_time))
# file_path = "../../dataset/agaricus_127d_train.libsvm"
# file = open(file_path).readlines()
parse_start = time.time()
dataset = DenseDatasetWithLines(file, num_features)
print("parse data cost {} s".format(time.time() - parse_start))
preprocess_start = time.time()
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_ratio * dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=valid_sampler)
print("preprocess data cost {} s, dataset size = {}"
.format(time.time() - preprocess_start, dataset_size))
model = SVM(num_features, num_classes).float()
print("size of w = {}".format(model.linear.weight.data.size()))
z, u = initialize_z_and_u(model.linear.weight.data.size())
print("size of z = {}".format(z.shape))
print("size of u = {}".format(u.shape))
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Training the Model
train_start = time.time()
stop = False
for admm_epoch in range(num_admm_epochs):
print("ADMM Epoch >>> {}".format(admm_epoch))
for epoch in range(num_epochs):
epoch_start = time.time()
epoch_loss = 0
for batch_index, (items, labels) in enumerate(train_loader):
# print("------worker {} epoch {} batch {}------".format(worker_index, epoch, batch_index))
batch_start = time.time()
items = Variable(items.view(-1, num_features))
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = model(items)
classify_loss = torch.mean(torch.clamp(1 - outputs.t() * labels.float(), min=0)) # hinge loss
epoch_loss += classify_loss
u_z = torch.from_numpy(u).float() - torch.from_numpy(z).float()
loss = classify_loss
for name, param in model.named_parameters():
if name.split('.')[-1] == "weight":
loss += rho / 2.0 * torch.norm(param + u_z, p=2)
#loss = classify_loss + rho / 2.0 * torch.norm(torch.sum(model.linear.weight, u_z))
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
train_time = time.time() - epoch_start
# Test the Model
test_start = time.time()
correct = 0
total = 0
test_loss = 0
for items, labels in validation_loader:
items = Variable(items.view(-1, num_features))
labels = Variable(labels)
outputs = model(items)
test_loss += torch.mean(torch.clamp(1 - outputs.t() * labels.float(), min=0))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
test_time = time.time() - test_start
print('Epoch: [%d/%d], Step: [%d/%d], Time: %.4f, Loss: %.4f, epoch cost %.4f, '
'train cost %.4f s, test cost %.4f s: '
'accuracy of the model on the %d test samples: %d %%, test loss = %f'
% (epoch + 1, num_epochs, batch_index + 1, len(train_indices) / batch_size,
time.time() - train_start, epoch_loss.data, time.time() - epoch_start,
train_time, test_time, len(val_indices), 100 * correct / total, test_loss / total))
w = model.linear.weight.data.numpy()
w_shape = w.shape
b = model.linear.bias.data.numpy()
b_shape = b.shape
u_shape = u.shape
w_and_b = np.concatenate((w.flatten(), b.flatten()))
u_w_b = np.concatenate((u.flatten(), w_and_b.flatten()))
cal_time = time.time() - epoch_start
print("Epoch {} calculation cost = {} s".format(epoch, cal_time))
sync_start = time.time()
postfix = "{}".format(admm_epoch)
u_w_b_merge = reduce_epoch(u_w_b, tmp_bucket, merged_bucket, num_workers, worker_index, postfix)
u_mean = u_w_b_merge[:u_shape[0] * u_shape[1]].reshape(u_shape) / float(num_workers)
w_mean = u_w_b_merge[u_shape[0]*u_shape[1] : u_shape[0]*u_shape[1]+w_shape[0]*w_shape[1]].reshape(w_shape) / float(num_workers)
b_mean = u_w_b_merge[u_shape[0]*u_shape[1]+w_shape[0]*w_shape[1]:].reshape(b_shape[0]) / float(num_workers)
#model.linear.weight.data = torch.from_numpy(w)
model.linear.bias.data = torch.from_numpy(b_mean).float()
sync_time = time.time() - sync_start
print("Epoch {} synchronization cost {} s".format(epoch, sync_time))
if worker_index == 0:
delete_expired_merged_epoch(merged_bucket, admm_epoch)
#z, u, r, s = update_z_u(w, z, u, rho, num_workers, lam)
#stop = check_stop(ep_abs, ep_rel, r, s, dataset_size, num_features, w, z, u, rho)
#print("stop = {}".format(stop))
#z = num_workers * rho / (2 * lam + num_workers * rho) * (w + u_mean)
z = update_z(w_mean, u_mean, rho, num_workers, lam)
#print(z)
u = u + model.linear.weight.data.numpy() - z
#print(u)
# Test the Model
correct = 0
total = 0
test_loss = 0
for items, labels in validation_loader:
items = Variable(items.view(-1, num_features))
labels = Variable(labels)
outputs = model(items)
test_loss += torch.mean(torch.clamp(1 - outputs.t() * labels.float(), min=0))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Epoch: %d, time = %.4f, accuracy of the model on the %d test samples: %d %%, loss = %f'
% (epoch, time.time() - train_start, len(val_indices), 100 * correct / total, test_loss / total))
if worker_index == 0:
clear_bucket(merged_bucket)
clear_bucket(tmp_bucket)
end_time = time.time()
print("Elapsed time = {} s".format(end_time - start_time))
| [
"torch.max",
"torch.autograd.Variable",
"torch.norm",
"torch.from_numpy",
"torch.utils.data.DataLoader",
"torch.utils.data.sampler.SubsetRandomSampler"
] | 1.0.1 | DS3Lab/LambdaML | 0afca7819e08632ba116fec8e102084e4040a47a |
1.7 | from dataset.dataset import Im2LatexDataset
import os
import sys
import argparse
import logging
import yaml
import numpy as np
import torch
from torchtext.data import metrics
from munch import Munch
from tqdm.auto import tqdm
import wandb
from Levenshtein import distance
from models import get_model, Model
from utils import *
def detokenize(tokens, tokenizer):
toks = [tokenizer.convert_ids_to_tokens(tok) for tok in tokens]
for b in range(len(toks)):
for i in reversed(range(len(toks[b]))):
if toks[b][i] is None:
toks[b][i] = ''
toks[b][i] = toks[b][i].replace('Ġ', ' ').strip()
if toks[b][i] in (['[BOS]', '[EOS]', '[PAD]']):
del toks[b][i]
return toks
@torch.no_grad()
def evaluate(model: Model, dataset: Im2LatexDataset, args: Munch, num_batches: int = None, name: str = 'test'):
"""evaluates the model. Returns bleu score on the dataset
Args:
model (torch.nn.Module): the model
dataset (Im2LatexDataset): test dataset
args (Munch): arguments
num_batches (int): How many batches to evaluate on. Defaults to None (all batches).
name (str, optional): name of the test e.g. val or test for wandb. Defaults to 'test'.
Returns:
bleu_score: BLEU score of validation set.
"""
assert len(dataset) > 0
device = args.device
log = {}
bleus, edit_dists = [], []
bleu_score, edit_distance = 0, 1
pbar = tqdm(enumerate(iter(dataset)), total=len(dataset))
for i, (seq, im) in pbar:
if seq is None or im is None:
continue
tgt_seq, tgt_mask = seq['input_ids'].to(device), seq['attention_mask'].bool().to(device)
encoded = model.encoder(im.to(device))
#loss = decoder(tgt_seq, mask=tgt_mask, context=encoded)
dec = model.decoder.generate(torch.LongTensor([args.bos_token]*len(encoded))[:, None].to(device), args.max_seq_len,
eos_token=args.pad_token, context=encoded, temperature=args.get('temperature', .2))
pred = detokenize(dec, dataset.tokenizer)
truth = detokenize(seq['input_ids'], dataset.tokenizer)
bleus.append(metrics.bleu_score(pred, [alternatives(x) for x in truth]))
for predi, truthi in zip(token2str(dec, dataset.tokenizer), token2str(seq['input_ids'], dataset.tokenizer)):
ts = post_process(truthi)
if len(ts) > 0:
edit_dists.append(distance(post_process(predi), ts)/len(ts))
pbar.set_description('BLEU: %.3f, ED: %.2e' % (np.mean(bleus), np.mean(edit_dists)))
if num_batches is not None and i >= num_batches:
break
if len(bleus) > 0:
bleu_score = np.mean(bleus)
log[name+'/bleu'] = bleu_score
if len(edit_dists) > 0:
edit_distance = np.mean(edit_dists)
log[name+'/edit_distance'] = edit_distance
if args.wandb:
# samples
pred = token2str(dec, dataset.tokenizer)
truth = token2str(seq['input_ids'], dataset.tokenizer)
table = wandb.Table(columns=["Truth", "Prediction"])
for k in range(min([len(pred), args.test_samples])):
table.add_data(post_process(truth[k]), post_process(pred[k]))
log[name+'/examples'] = table
wandb.log(log)
else:
print('\n%s\n%s' % (truth, pred))
print('BLEU: %.2f' % bleu_score)
return bleu_score, edit_distance
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test model')
parser.add_argument('--config', default='settings/config.yaml', help='path to yaml config file', type=argparse.FileType('r'))
parser.add_argument('-c', '--checkpoint', default='checkpoints/weights.pth', type=str, help='path to model checkpoint')
parser.add_argument('-d', '--data', default='dataset/data/val.pkl', type=str, help='Path to Dataset pkl file')
parser.add_argument('--no-cuda', action='store_true', help='Use CPU')
parser.add_argument('-b', '--batchsize', type=int, default=10, help='Batch size')
parser.add_argument('--debug', action='store_true', help='DEBUG')
parser.add_argument('-t', '--temperature', type=float, default=.333, help='sampling emperature')
parser.add_argument('-n', '--num-batches', type=int, default=None, help='how many batches to evaluate on. Defaults to None (all)')
parsed_args = parser.parse_args()
with parsed_args.config as f:
params = yaml.load(f, Loader=yaml.FullLoader)
args = parse_args(Munch(params))
args.testbatchsize = parsed_args.batchsize
args.wandb = False
args.temperature = parsed_args.temperature
logging.getLogger().setLevel(logging.DEBUG if parsed_args.debug else logging.WARNING)
seed_everything(args.seed if 'seed' in args else 42)
model = get_model(args)
if parsed_args.checkpoint is not None:
model.load_state_dict(torch.load(parsed_args.checkpoint, args.device))
dataset = Im2LatexDataset().load(parsed_args.data)
valargs = args.copy()
valargs.update(batchsize=args.testbatchsize, keep_smaller_batches=True, test=True)
dataset.update(**valargs)
evaluate(model, dataset, args, num_batches=parsed_args.num_batches)
| [
"torch.no_grad",
"torch.load"
] | 1.7.1 | dc2016bte0006/Latex_OCR | 2e919617da8f2f7f3445ed8d1953a5664c1aaba7 |
1.8 | import numpy as np
import torch
import os
import random
class EarlyStopMonitor(object):
def __init__(self, max_round=3, higher_better=True, tolerance=1e-3):
self.max_round = max_round
self.num_round = 0
self.epoch_count = 0
self.best_epoch = 0
self.last_best = None
self.higher_better = higher_better
self.tolerance = tolerance
def early_stop_check(self, curr_val):
if not self.higher_better:
curr_val *= -1
if self.last_best is None:
self.last_best = curr_val
elif (curr_val - self.last_best) / np.abs(self.last_best) > self.tolerance:
self.last_best = curr_val
self.num_round = 0
self.best_epoch = self.epoch_count
else:
self.num_round += 1
self.epoch_count += 1
return self.num_round >= self.max_round
class RandEdgeSampler(object):
def __init__(self, src_list, dst_list):
src_list = np.concatenate(src_list)
dst_list = np.concatenate(dst_list)
self.src_list = np.unique(src_list)
self.dst_list = np.unique(dst_list)
def sample(self, size):
src_index = np.random.randint(0, len(self.src_list), size)
dst_index = np.random.randint(0, len(self.dst_list), size)
return self.src_list[src_index], self.dst_list[dst_index]
def set_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def process_sampling_numbers(num_neighbors, num_layers):
num_neighbors = [int(n) for n in num_neighbors]
if len(num_neighbors) == 1:
num_neighbors = num_neighbors * num_layers
else:
num_layers = len(num_neighbors)
return num_neighbors, num_layers
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed"
] | 1.8.0 | HSE-DynGraph-Research-team/DynGraphModelling | 890326f4bd7991ef88a7a79cd2c8a77541621423 |
0.3 | # encoding:utf-8
'''
@Author: catnlp
@Email: [email protected]
@Time: 2018/5/2 15:02
'''
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
START_TAG = -2
STOP_TAG = -1
def log_sum_exp(vec, m_size):
_, idx = torch.max(vec, 1)
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size)
return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1, m_size)
class CRF(nn.Module):
def __init__(self, tagset_size, gpu):
super(CRF, self).__init__()
print('---build batched CRF---')
self.tagset_size = tagset_size
self.gpu = gpu
init_transitions = torch.zeros(self.tagset_size+2, self.tagset_size+2)
init_transitions[:, START_TAG] = -1000.0
init_transitions[STOP_TAG, :] = -1000.0
if gpu:
init_transitions = init_transitions.cuda()
self.transitions = nn.Parameter(init_transitions)
def _calculate_PZ(self, feats, mask):
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
assert(tag_size == self.tagset_size+2)
mask = mask.transpose(1, 0).contiguous()
ins_num = seq_len * batch_size
feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
seq_iter = enumerate(scores)
_, inivalues = seq_iter.__next__()
partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size, 1)
for idx, cur_values in seq_iter:
cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
cur_partition = log_sum_exp(cur_values, tag_size)
mask_idx = mask[idx, :].view(batch_size, 1).expand(batch_size, tag_size)
masked_cur_partition = cur_partition.masked_select(mask_idx)
mask_idx = mask_idx.contiguous().view(batch_size, tag_size, 1)
partition.masked_scatter_(mask_idx, masked_cur_partition)
cur_values = self.transitions.view(1, tag_size, tag_size).expand(batch_size, tag_size, tag_size) + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
cur_partition = log_sum_exp(cur_values, tag_size)
final_partition = cur_partition[:, STOP_TAG]
return final_partition.sum(), scores
def viterbi_decode(self, feats, mask):
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
assert(tag_size == self.tagset_size+2)
length_mask = torch.sum(mask, dim=1).view(batch_size, 1).long()
mask = mask.transpose(1, 0).contiguous()
ins_num = seq_len * batch_size
feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
seq_iter = enumerate(scores)
back_points = list()
partition_history = list()
mask = (1 - mask.long()).byte()
_, inivalues = seq_iter.__next__()
partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size)
partition_history.append(partition)
for idx, cur_values in seq_iter:
cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
partition, cur_bp = torch.max(cur_values, 1)
partition_history.append(partition)
cur_bp.masked_fill_(mask[idx].view(batch_size, 1).expand(batch_size, tag_size), 0)
back_points.append(cur_bp)
partition_history = torch.cat(partition_history, 0)
partition_history = partition_history.view(seq_len, batch_size, -1).transpose(1, 0).contiguous()
last_position = length_mask.view(batch_size, 1, 1).expand(batch_size, 1, tag_size) - 1
last_partition = torch.gather(partition_history, 1, last_position).view(batch_size, tag_size, 1)
last_values = last_partition.expand(batch_size, tag_size, tag_size) + self.transitions.view(1, tag_size, tag_size).expand(batch_size, tag_size, tag_size)
_, last_bp = torch.max(last_values, 1)
pad_zero = autograd.Variable(torch.zeros(batch_size, tag_size)).long()
if self.gpu:
pad_zero = pad_zero.cuda()
back_points.append(pad_zero)
back_points = torch.cat(back_points).view(seq_len, batch_size, tag_size)
pointer = last_bp[:, STOP_TAG]
insert_last = pointer.contiguous().view(batch_size, 1, 1).expand(batch_size, 1, tag_size)
back_points = back_points.transpose(1, 0).contiguous()
back_points.scatter_(1, last_position, insert_last)
back_points = back_points.transpose(1, 0).contiguous()
decode_idx = autograd.Variable(torch.LongTensor(seq_len, batch_size))
if self.gpu:
decode_idx = decode_idx.cuda()
decode_idx[-1] = pointer.data
for idx in range(len(back_points)-2, -1, -1):
pointer = torch.gather(back_points[idx], 1, pointer.contiguous().view(batch_size, 1))
decode_idx[idx] = pointer.data
path_score = None
decode_idx = decode_idx.transpose(1, 0)
return path_score, decode_idx
def forward(self, feats, mask):
path_score, best_path = self._viterbi_decode(feats, mask)
return path_score, best_path
def _score_sentence(self, scores, tags, mask):
batch_size = scores.size(1)
seq_len = scores.size(0)
tag_size = scores.size(2)
new_tags = autograd.Variable(torch.LongTensor(batch_size, seq_len))
if self.gpu:
new_tags = new_tags.cuda()
for idx in range(seq_len):
if idx == 0:
new_tags[:, 0] = (tag_size - 2) * tag_size + tags[:, 0]
else:
new_tags[:, idx] = tags[:, idx-1] * tag_size + tags[:, idx]
end_transition = self.transitions[:, STOP_TAG].contiguous().view(1, tag_size).expand(batch_size, tag_size)
length_mask = torch.sum(mask, dim=1).view(batch_size, 1).long()
end_ids = torch.gather(tags, 1, length_mask-1)
end_energy = torch.gather(end_transition, 1, end_ids)
new_tags = new_tags.transpose(1, 0).contiguous().view(seq_len, batch_size, 1)
tg_energy = torch.gather(scores.view(seq_len, batch_size, -1), 2, new_tags).view(seq_len, batch_size)
tg_energy = tg_energy.masked_select(mask.transpose(1, 0))
gold_score = tg_energy.sum() + end_energy.sum()
return gold_score
def neg_log_likelihood_loss(self, feats, tags, mask):
forward_score, scores = self._calculate_PZ(feats, mask)
gold_score = self._score_sentence(scores, tags, mask)
return forward_score - gold_score
| [
"torch.zeros",
"torch.cat",
"torch.gather",
"torch.max",
"torch.nn.Parameter",
"torch.LongTensor",
"torch.sum"
] | 0.3.1 | catnlp/metaLSTM | 08b3086ebc558b936898022dd7eea7d726e6d491 |
1.9 | import torch
from sklearn.preprocessing import LabelEncoder
from torch.utils.data import Dataset, DataLoader
class JobsDataset(Dataset):
def __init__(self, X, y, tokenizer, max_len=512):
self.len = len(X)
self.data = X
self.y = y
self.tokenizer = tokenizer
self.max_len = max_len
self._label_encode()
def _label_encode(self):
self.label_encoder = LabelEncoder()
self.y = self.label_encoder.fit_transform(self.y)
def __getitem__(self, index):
title = str(self.data.title[index])
title = " ".join(title.split())
description = str(self.data.description[index])
description = " ".join(description.split())
inputs = self.tokenizer.encode_plus(
text=title,
text_pair=description,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length',
return_token_type_ids=True,
truncation=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'targets': torch.tensor(self.y[index], dtype=torch.long)
}
def __len__(self):
return self.len
def get_data_loader(X_train, X_valid, y_train, y_valid, tokenizer, batch_size=16, num_workers=1):
training_set = JobsDataset(X_train, y_train, tokenizer, max_len=512)
validation_set = JobsDataset(X_valid, y_valid, tokenizer, max_len=512)
train_params = {'batch_size': batch_size,
'shuffle': True,
'num_workers': num_workers
}
test_params = {'batch_size': batch_size,
'shuffle': True,
'num_workers': num_workers
}
training_loader = DataLoader(training_set, **train_params)
validation_loader = DataLoader(validation_set, **test_params)
return training_loader, validation_loader
| [
"torch.tensor",
"torch.utils.data.DataLoader"
] | 1.9.0 | kantharajucn/job_seniority_prediction | cad9147ffddab1c5ead878c2f9d9e48199dc0da9 |
0.4 | import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.vdn import VDNMixer
from modules.mixers.qmix import QMixer
import torch as th
import numpy as np
from torch.optim import RMSprop
# learning for 6h_vs_8z scenario
class QLearner_6h_vs_8z:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.regularization_const = self.args.normalization_const
self.params = list(mac.parameters())
self.last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
if args.mixer == "vdn":
self.mixer = VDNMixer()
elif args.mixer == "qmix":
self.mixer = QMixer(args)
else:
raise ValueError("Mixer {} not recognised.".format(args.mixer))
self.params += list(self.mixer.parameters())
self.target_mixer = copy.deepcopy(self.mixer)
self.params += list(self.mac.env_blender.parameters())
self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
# Calculate estimated Q-Values
mac_out = []
difference_out = []
difference_out1 = []
self.mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_local_outputs, hidden_states = self.mac.forward(batch, t=t)
dummy0 = self.mac.env_blender(hidden_states[:,0,:].view(32,-1))
dummy1 = self.mac.env_blender(hidden_states[:,1,:].view(32,-1))
dummy2 = self.mac.env_blender(hidden_states[:,2,:].view(32,-1))
dummy3 = self.mac.env_blender(hidden_states[:,3,:].view(32,-1))
dummy4 = self.mac.env_blender(hidden_states[:,4,:].view(32,-1))
dummy5 = self.mac.env_blender(hidden_states[:,5,:].view(32,-1))
agent0 = (dummy1 + dummy2 + dummy3 + dummy4 + dummy5)/5.0
agent1 = (dummy0 + dummy2 + dummy3 + dummy4 + dummy5)/5.0
agent2 = (dummy0 + dummy1 + dummy3 + dummy4 + dummy5)/5.0
agent3 = (dummy0 + dummy1 + dummy2 + dummy4 + dummy5)/5.0
agent4 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy5)/5.0
agent5 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy4)/5.0
agent_global_outputs =th.cat((agent0.view((32,1,14)),agent1.view((32,1,14)),agent2.view((32,1,14)),agent3.view((32,1,14)),agent4.view((32,1,14)),agent5.view((32,1,14))),1)
agent_outs = agent_local_outputs + agent_global_outputs
difference = agent_global_outputs
mac_out.append(agent_outs)
difference_out.append(difference)
mac_out = th.stack(mac_out, dim=1) # Concat over time
difference_out = th.stack(difference_out, dim=1) # Concat over time
difference_out = th.std(difference_out,dim = 3).sum()
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
avg_difference = (difference_out.sum())/((agent_outs.shape[0]*agent_outs.shape[1]*agent_outs.shape[2]*batch.max_seq_length))
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_local_outputs, target_hidden_states = self.target_mac.forward(batch, t=t)
dummy0 = self.mac.env_blender(target_hidden_states[:,0,:].view(32,-1))
dummy1 = self.mac.env_blender(target_hidden_states[:,1,:].view(32,-1))
dummy2 = self.mac.env_blender(target_hidden_states[:,2,:].view(32,-1))
dummy3 = self.mac.env_blender(target_hidden_states[:,3,:].view(32,-1))
dummy4 = self.mac.env_blender(target_hidden_states[:,4,:].view(32,-1))
dummy5 = self.mac.env_blender(target_hidden_states[:,5,:].view(32,-1))
target_agent0 = (dummy1 + dummy2 + dummy3 + dummy4 + dummy5)/5.0
target_agent1 = (dummy0 + dummy2 + dummy3 + dummy4 + dummy5)/5.0
target_agent2 = (dummy0 + dummy1 + dummy3 + dummy4 + dummy5)/5.0
target_agent3 = (dummy0 + dummy1 + dummy2 + dummy4 + dummy5)/5.0
target_agent4 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy5)/5.0
target_agent5 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy4)/5.0
target_agent_global_outputs = th.cat((target_agent0.view((32,1,14)),target_agent1.view((32,1,14)),target_agent2.view((32,1,14)),target_agent3.view((32,1,14)),target_agent4.view((32,1,14)),target_agent5.view((32,1,14))),1)
target_agent_outs = target_agent_local_outputs + target_agent_global_outputs
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out[avail_actions == 0] = -9999999
cur_max_actions = mac_out[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:])
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error ** 2).sum() / mask.sum() + self.args.normalization_const * avg_difference
# Optimise
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss", loss.item(), t_env)
self.logger.log_stat("grad_norm", grad_norm, t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
self.logger.log_stat("q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.log_stats_t = t_env
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
if self.mixer is not None:
self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
| [
"torch.stack",
"torch.optim.RMSprop",
"torch.gather",
"torch.nn.utils.clip_grad_norm_",
"torch.std"
] | 0.4.1 | jyericlin/VBC | cc34169e4f4ece500ad8c33ab69378f0a700a73e |
1.4 | # Copyright (c) 2020 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CarlaEnvironment suite.
To use this, there are two ways:
1. Run the code within docker image horizonrobotics/alf:0.0.3-carla
Both `Docker <https://docs.docker.com/engine/install/ubuntu/>`_ and
`Nvidia-Docker2 <https://github.com/NVIDIA/nvidia-docker>`_ need to be installed.
2. Install carla:
.. code-block:: bash
wget https://carla-releases.s3.eu-west-3.amazonaws.com/Linux/CARLA_0.9.9.tar.gz
mkdir carla
tar zxf CARLA_0.9.9.tar.gz -C carla
cd carla/Import
wget https://carla-releases.s3.eu-west-3.amazonaws.com/Linux/AdditionalMaps_0.9.9.tar.gz
cd ..
./ImportAssert.sh
easy_install PythonAPI/carla/dist/carla-0.9.9-py3.7-linux-x86_64.egg
Make sure you are using python3.7
"""
from collections import OrderedDict
from absl import logging
import gin
import math
import numpy as np
import os
import random
import subprocess
import sys
import time
import torch
try:
import carla
except ImportError:
carla = None
import alf
import alf.data_structures as ds
from alf.utils import common
from .suite_socialbot import _get_unused_port
from .alf_environment import AlfEnvironment
from .carla_sensors import (CameraSensor, CollisionSensor, GnssSensor,
IMUSensor, LaneInvasionSensor, NavigationSensor,
RadarSensor, World, MINIMUM_RENDER_WIDTH,
MINIMUM_RENDER_HEIGHT)
def is_available():
return carla is not None
def geo_distance(loc1, loc2):
"""
Args:
loc1 (np.array): [latitude, longitude, altitude]. The units for altitude
is meter.
loc2 (np.array):
Returns:
float: distance in meters
"""
earth_radius = 6371 * 1000
d2r = math.pi / 180
d = loc1 - loc2
dlat = d[0] * d2r
dlon = d[1] * d2r
lat1 = loc1[0] * d2r
lat2 = loc2[0] * d2r
a = np.sin(
0.5 * dlat)**2 + np.sin(0.5 * dlon)**2 * np.cos(lat1) * np.cos(lat2)
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
c = earth_radius * c
return np.sqrt(c * c + d[2] * d[2])
def _calculate_relative_position(self_transform, location):
"""
Args:
self_transform (carla.Transform): transform of self actor
location (np.ndarray): shape is [3] or [N, 3]
Returns:
np.ndarray: shape is same as location
"""
trans = self_transform
self_loc = trans.location
yaw = math.radians(trans.rotation.yaw)
self_loc = np.array([self_loc.x, self_loc.y, self_loc.z])
cos, sin = np.cos(yaw), np.sin(yaw)
rot = np.array([[cos, -sin, 0.], [sin, cos, 0.], [0., 0., 1.]])
return np.matmul(location - self_loc, rot).astype(np.float32)
def _calculate_relative_velocity(self_transform, velocity):
"""
Args:
self_transform (carla.Transform): transform of self actor
velocity (np.ndarray): shape is [3] or [N, 3]
Returns:
np.ndarray: shape is same as location
"""
trans = self_transform
yaw = math.radians(trans.rotation.yaw)
cos, sin = np.cos(yaw), np.sin(yaw)
rot = np.array([[cos, -sin, 0.], [sin, cos, 0.], [0., 0., 1.]])
return np.matmul(velocity, rot).astype(np.float32)
def _to_numpy_loc(loc: carla.Location):
return np.array([loc.x, loc.y, loc.z])
@gin.configurable(blacklist=['actor', 'alf_world'])
class Player(object):
"""Player is a vehicle with some sensors.
An episode terminates if it reaches one of the following situations:
1. the vehicle arrives at the goal.
2. the time exceeds ``route_length / min_speed``.
3. it get stuck because of a collision.
At each step, the reward is given based on the following components:
1. Arriving goal: ``success_reward``
2. Moving in the navigation direction: the number of meters moved
This moving reward can be either dense of sparse depending on the argument
``sparse_reward``.
3. Negative reward caused by collision: ``-min(max_collision_reward, max(epside_reward, 0))``
Currently, the player has the these sensors: ``CollisionSensor``, ``GnssSensor``,
``IMUSensor``, ``CameraSensor``, ``LaneInvasionSensor`` , ``RadarSensor``,
``NavigationSensor``. See the documentation for these class for the definition
the data generated by these sensors.
"""
# over all reward
REWARD_OVERALL = 0
# distance in meter for moving along route
# If using sparse reward (`sparse_reward` is True), this reward is only given
# about every `sparse_reward_interval` meters
# If not using sparse reward, this reward is given every steps.
REWARD_DISTANCE = 1
# 0/1 valued indicating whether there is collision
REWARD_COLLISION = 2
# 0/1 valued indicating reaching goal
REWARD_SUCCESS = 3
# dimension of the reward vector
REWARD_DIMENSION = 4
def __init__(self,
actor,
alf_world,
success_reward=100.,
success_distance_thresh=5.0,
max_collision_penalty=100.,
max_stuck_at_collision_seconds=5.0,
stuck_at_collision_distance=1.0,
sparse_reward=False,
sparse_reward_interval=10.,
allow_negative_distance_reward=True,
min_speed=5.,
with_gnss_sensor=True,
with_imu_sensor=True,
with_camera_sensor=True,
with_radar_sensor=True):
"""
Args:
actor (carla.Actor): the carla actor object
alf_world (Wolrd): the world containing the player
success_reward (float): the reward for arriving the goal location.
success_distance_thresh (float): success is achieved if the current
location is with such distance of the goal
max_collision_penalty (float): the maximum penalty (i.e. negative reward)
for collision. We don't want the collision penalty to be too large
if the player cannot even get enough positive moving reward. So the
panalty is capped at ``max(0., episode_reward))``. Note that this
reward is only given once at the first step of contiguous collisions.
max_stuck_at_collision_seconds (float): the episode will end and is
considerred as failure if the car is stuck at the collision for
so many seconds,
stuck_at_collision_distance (float): the car is considerred as being
stuck at the collision if it is within such distance of the first
collision location.
sparse_reward (bool): If False, the distance reward is given at every
step based on how much it moves along the navigation route. If
True, the distance reward is only given after moving ``sparse_reward_distance``.
sparse_reward_interval (float): the sparse reward is given after
approximately every such distance along the route has been driven.
allow_negative_distance_reward (True): whether to allow negative distance
reward. If True, the agent will receive positive reward for moving
ahead along the route, and negative rewad for moving back along
the route. If False, the agent still receives positive reward for
moving ahead along the route, but will not receive negative rewad
for moving back along the route. Instead, the negative distance
will be accumulated to the future distance reward. This may ease
the learning if the right behavior is to temporarily go back along
the route in order, for examle, to avoid obstacle.
min_speed (float): unit is m/s. Failure if initial_distance / min_speed
seconds passed
with_gnss_sensor (bool): whether to use ``GnssSensor``.
with_imu_sensor (bool): whether to use ``IMUSensor``.
with_camera_sensor (bool): whether to use ``CameraSensor``.
with_radar_sensor (bool): whether to use ``RadarSensor``.
"""
self._actor = actor
self._alf_world = alf_world
self._observation_sensors = {}
self._collision_sensor = CollisionSensor(actor)
self._observation_sensors['collision'] = self._collision_sensor
if with_gnss_sensor:
self._gnss_sensor = GnssSensor(actor)
self._observation_sensors['gnss'] = self._gnss_sensor
else:
self._gnss_sensor = None
if with_imu_sensor:
self._imu_sensor = IMUSensor(actor)
self._observation_sensors['imu'] = self._imu_sensor
else:
self._imu_sensor = None
if with_camera_sensor:
self._camera_sensor = CameraSensor(actor)
self._observation_sensors['camera'] = self._camera_sensor
else:
self._camera_sensor = None
self._lane_invasion_sensor = LaneInvasionSensor(actor)
if with_radar_sensor:
self._radar_sensor = RadarSensor(actor)
self._observation_sensors['radar'] = self._radar_sensor
else:
self._radar_sensor = None
self._navigation = NavigationSensor(actor, alf_world)
self._observation_sensors['navigation'] = self._navigation
self._success_reward = success_reward
self._success_distance_thresh = success_distance_thresh
self._min_speed = min_speed
self._delta_seconds = actor.get_world().get_settings(
).fixed_delta_seconds
self._max_collision_penalty = max_collision_penalty
self._max_stuck_at_collision_frames = max_stuck_at_collision_seconds / self._delta_seconds
self._stuck_at_collision_distance = stuck_at_collision_distance
self._sparse_reward = sparse_reward
self._sparse_reward_index_interval = int(
max(1, sparse_reward_interval // self._alf_world.route_resolution))
self._allow_negative_distance_reward = allow_negative_distance_reward
self._observation_spec = dict()
self._observation_desc = dict()
for sensor_name, sensor in self._observation_sensors.items():
self._observation_spec[sensor_name] = sensor.observation_spec()
self._observation_desc[sensor_name] = sensor.observation_desc()
self._observation_spec['goal'] = alf.TensorSpec([3])
self._observation_spec['velocity'] = alf.TensorSpec([3])
# UE4 coordinate system is right handed:
# https://forums.unrealengine.com/development-discussion/c-gameplay-programming/103787-ue4-coordinate-system-not-right-handed
self._observation_desc['goal'] = (
"Target location relative to the vehicle coordinate system in "
"meters. X axis: front, Y axis: right, Z axis: up. Only the "
"rotation around Z axis is taken into account when calculating the "
"vehicle's coordinate system.")
self._observation_desc['navigation'] = (
'Relative positions of the future waypoints in the route')
self._observation_desc[
'velocity'] = "3D Velocity relative to self coordinate in m/s"
self._info_spec = OrderedDict(
success=alf.TensorSpec(()), collision=alf.TensorSpec(()))
self._control = carla.VehicleControl()
self.reset()
# for rendering
self._surface = None
self._font = None
self._clock = None
def reset(self):
"""Reset the player location and goal.
Use ``carla.Client.apply_batch_sync()`` to actually reset.
Returns:
list[carla.command]:
"""
wp = random.choice(self._alf_world.get_waypoints())
goal_loc = wp.transform.location
self._goal_location = np.array([goal_loc.x, goal_loc.y, goal_loc.z],
dtype=np.float32)
forbidden_locations = []
for v in self._alf_world.get_actors():
if v.id == self._actor.id:
continue
forbidden_locations.append(
self._alf_world.get_actor_location(v.id))
# find a waypoint far enough from other vehicles
ok = False
i = 0
while not ok and i < 100:
wp = random.choice(self._alf_world.get_waypoints())
loc = wp.transform.location
ok = True
for other_loc in forbidden_locations:
if loc.distance(other_loc) < 10.:
ok = False
break
i += 1
assert ok, "Fail to find new position"
# loc.z + 0.27531 to avoid Z-collision, see Carla documentation for
# carla.Map.get_spawn_points(). The value used by carla is slightly
# smaller: 0.27530714869499207
loc = carla.Location(loc.x, loc.y, loc.z + 0.3)
commands = [
carla.command.ApplyTransform(
self._actor, carla.Transform(loc, wp.transform.rotation)),
carla.command.ApplyVelocity(self._actor, carla.Vector3D()),
carla.command.ApplyAngularVelocity(self._actor, carla.Vector3D())
]
self._max_frame = None
self._done = False
self._prev_location = loc
self._prev_action = np.zeros(
self.action_spec().shape, dtype=np.float32)
self._alf_world.update_actor_location(self._actor.id, loc)
self._route_length = self._navigation.set_destination(goal_loc)
self._prev_collision = False # whether there is collision in the previous frame
self._collision = False # whether there is colliion in the current frame
self._collision_loc = None # the location of the car when it starts to have collition
# The intermediate goal for sparse reward
self._intermediate_goal_index = min(self._sparse_reward_index_interval,
self._navigation.num_waypoints - 1)
# The location of the car when the intermediate goal is set
self._intermediate_start = _to_numpy_loc(loc)
self._episode_reward = 0.
self._unrecorded_distance_reward = 0.
self._is_first_step = True
return commands
def destroy(self):
"""Get the commands for destroying the player.
Use carla.Client.apply_batch_sync() to actually destroy the sensor.
Returns:
list[carla.command]:
"""
commands = []
for sensor in self._observation_sensors.values():
commands.extend(sensor.destroy())
commands.extend(self._lane_invasion_sensor.destroy())
commands.append(carla.command.DestroyActor(self._actor))
if self._surface is not None:
import pygame
pygame.quit()
return commands
def observation_spec(self):
"""Get the observation spec.
Returns:
nested TensorSpec:
"""
return self._observation_spec
def observation_desc(self):
"""Get the description about the observation.
Returns:
nested str: each str corresponds to one TensorSpec from
``observatin_spec()``.
"""
return self._observation_desc
def action_spec(self):
"""Get the action spec.
The action is a 4-D vector of [throttle, steer, brake, reverse], where
throttle is in [-1.0, 1.0] (negative value is same as zero), steer is in
[-1.0, 1.0], brake is in [-1.0, 1.0] (negative value is same as zero),
and reverse is interpreted as a boolean value with values greater than
0.5 corrsponding to True.
Returns:
nested BoundedTensorSpec:
"""
return alf.BoundedTensorSpec([4],
minimum=[-1., -1., -1., 0.],
maximum=[1., 1., 1., 1.])
def info_spec(self):
"""Get the info spec."""
return self._info_spec
def action_desc(self):
"""Get the description about the action.
Returns:
nested str: each str corresponds to one TensorSpec from
``action_spec()``.
"""
return (
"4-D vector of [throttle, steer, brake, reverse], where "
"throttle is in [-1.0, 1.0] (negative value is same as zero), "
"steer is in [-1.0, 1.0], brake is in [-1.0, 1.0] (negative value "
"is same as zero), and reverse is interpreted as a boolean value "
"with values greater than 0.5 corrsponding to True.")
def reward_spec(self):
"""Get the reward spec."""
return alf.TensorSpec([Player.REWARD_DIMENSION])
def _get_goal(self):
return _calculate_relative_position(self._actor.get_transform(),
self._goal_location)
def get_current_time_step(self, current_frame):
"""Get the current time step for the player.
Args:
current_frame (int): current simulation frame no.
Returns:
TimeStep: all elements are ``np.ndarray`` or ``np.number``.
"""
obs = dict()
for sensor_name, sensor in self._observation_sensors.items():
obs[sensor_name] = sensor.get_current_observation(current_frame)
obs['goal'] = self._get_goal()
self._alf_world.update_actor_location(self._actor.id,
self._actor.get_location())
v = self._actor.get_velocity()
obs['velocity'] = _calculate_relative_velocity(
self._actor.get_transform(), _to_numpy_loc(v))
self._current_distance = np.linalg.norm(obs['goal'])
prev_loc = _to_numpy_loc(self._prev_location)
curr_loc = _to_numpy_loc(self._actor.get_location())
reward_vector = np.zeros(Player.REWARD_DIMENSION, np.float32)
reward = 0.
discount = 1.0
info = OrderedDict(success=np.float32(0.0), collision=np.float32(0.0))
# When the previous episode ends because of stucking at a collision with
# another vehicle, it may get an additional collision event in the new frame
# because the relocation of the car may happen after the simulation of the
# moving. So we ignore the collision at the first step.
self._collision = not np.all(
obs['collision'] == 0) and not self._is_first_step
if self._collision and not self._prev_collision:
# We only report the first collision event among contiguous collision
# events.
info['collision'] = np.float32(1.0)
logging.info("actor=%d frame=%d COLLISION" % (self._actor.id,
current_frame))
self._collision_loc = curr_loc
self._collision_frame = current_frame
# We don't want the collision penalty to be too large if the player
# cannot even get enough positive moving reward. So we cap the penalty
# at ``max(0., self._episode_reward)``
reward -= min(self._max_collision_penalty,
max(0., self._episode_reward))
reward_vector[Player.REWARD_COLLISION] = 1.
if self._max_frame is None:
step_type = ds.StepType.FIRST
max_frames = math.ceil(
self._route_length / self._min_speed / self._delta_seconds)
self._max_frame = current_frame + max_frames
elif (self._current_distance < self._success_distance_thresh
and self._actor.get_velocity() == carla.Location(0., 0., 0.)):
# TODO: include waypoint orientation as success critiria
step_type = ds.StepType.LAST
reward += self._success_reward
reward_vector[Player.REWARD_SUCCESS] = 1.
discount = 0.0
info['success'] = np.float32(1.0)
logging.info(
"actor=%d frame=%d SUCCESS" % (self._actor.id, current_frame))
elif current_frame >= self._max_frame:
logging.info("actor=%d frame=%d FAILURE: out of time" %
(self._actor.id, current_frame))
step_type = ds.StepType.LAST
elif (self._collision_loc is not None
and current_frame - self._collision_frame >
self._max_stuck_at_collision_frames
and np.linalg.norm(curr_loc - self._collision_loc) <
self._stuck_at_collision_distance):
logging.info("actor=%d frame=%d FAILURE: stuck at collision" %
(self._actor.id, current_frame))
step_type = ds.StepType.LAST
else:
step_type = ds.StepType.MID
if self._sparse_reward:
current_index = self._navigation.get_next_waypoint_index()
if step_type == ds.StepType.LAST and info['success'] == 1.0:
# Since the episode is finished, we need to incorporate the final
# progress towards the goal as reward to encourage stopping near the goal.
distance_reward = (
np.linalg.norm(self._intermediate_start -
self._goal_location) -
np.linalg.norm(curr_loc - self._goal_location))
elif self._intermediate_goal_index < current_index:
# This means that the car has passed the intermediate goal.
# And we give it a reward which is equal to the distance it
# travels.
intermediate_goal = self._navigation.get_waypoint(
self._intermediate_goal_index)
distance_reward = np.linalg.norm(intermediate_goal -
self._intermediate_start)
self._intermediate_start = intermediate_goal
self._intermediate_goal_index = min(
self._intermediate_goal_index +
self._sparse_reward_index_interval,
self._navigation.num_waypoints - 1)
else:
goal0 = obs['navigation'][2] # This is about 10m ahead
distance_reward = (np.linalg.norm(prev_loc - goal0) -
np.linalg.norm(curr_loc - goal0))
reward_vector[Player.REWARD_DISTANCE] = distance_reward
if not self._allow_negative_distance_reward:
distance_reward += self._unrecorded_distance_reward
if distance_reward < 0:
self._unrecorded_distance_reward = distance_reward
distance_reward = 0
else:
self._unrecorded_distance_reward = 0
reward += distance_reward
obs['navigation'] = _calculate_relative_position(
self._actor.get_transform(), obs['navigation'])
self._done = step_type == ds.StepType.LAST
self._episode_reward += reward
reward_vector[Player.REWARD_OVERALL] = reward
self._current_time_step = ds.TimeStep(
step_type=step_type,
reward=reward_vector,
discount=np.float32(discount),
observation=obs,
prev_action=self._prev_action,
env_info=info)
return self._current_time_step
def act(self, action):
"""Generate the carla command for taking the given action.
Use ``carla.Client.apply_batch_sync()`` to actually destroy the sensor.
Args:
action (nested np.ndarray):
Returns:
list[carla.command]:
"""
self._prev_collision = self._collision
self._prev_location = self._actor.get_location()
self._is_first_step = False
if self._done:
return self.reset()
self._control.throttle = max(float(action[0]), 0.0)
self._control.steer = float(action[1])
self._control.brake = max(float(action[2]), 0.0)
self._control.reverse = bool(action[3] > 0.5)
self._prev_action = action
return [carla.command.ApplyVehicleControl(self._actor, self._control)]
def render(self, mode):
"""Render the simulation.
Args:
mode (str): one of ['rgb_array', 'human']
Returns:
one of the following:
- None: if mode is 'human'
- np.ndarray: the image of shape [height, width, channeles] if
mode is 'rgb_array'
"""
import pygame
if self._surface is None:
pygame.init()
pygame.font.init()
self._clock = pygame.time.Clock()
if self._camera_sensor:
height, width = self._camera_sensor.observation_spec(
).shape[1:3]
height = max(height, MINIMUM_RENDER_HEIGHT)
width = max(width, MINIMUM_RENDER_WIDTH)
else:
height = MINIMUM_RENDER_HEIGHT
width = MINIMUM_RENDER_WIDTH
if mode == 'human':
self._surface = pygame.display.set_mode(
(width, height), pygame.HWSURFACE | pygame.DOUBLEBUF)
else:
self._surface = pygame.Surface((width, height))
if mode == 'human':
self._clock.tick_busy_loop(1000)
if self._camera_sensor:
self._camera_sensor.render(self._surface)
obs = self._current_time_step.observation
np_precision = np.get_printoptions()['precision']
np.set_printoptions(precision=1)
info_text = [
'FPS: %6.2f' % self._clock.get_fps(),
'GPS: (%7.4f, %8.4f, %5.2f)' % tuple(obs['gnss'].tolist()),
'Goal: (%7.1f, %8.1f, %5.1f)' % tuple(obs['goal'].tolist()),
'Ahead: (%7.1f, %8.1f, %5.1f)' % tuple(
obs['navigation'][2].tolist()),
'Distance: %7.2f' % np.linalg.norm(obs['goal']),
'Velocity: (%4.1f, %4.1f, %4.1f) km/h' % tuple(
(3.6 * obs['velocity']).tolist()),
'Acceleration: (%4.1f, %4.1f, %4.1f)' % tuple(
obs['imu'][0:3].tolist()),
'Compass: %5.1f' % math.degrees(float(obs['imu'][6])),
'Throttle: %4.2f' % self._control.throttle,
'Brake: %4.2f' % self._control.brake,
'Steer: %4.2f' % self._control.steer,
'Reverse: %4s' % self._control.reverse,
'Reward: (%s)' % self._current_time_step.reward,
]
np.set_printoptions(precision=np_precision)
self._draw_text(info_text)
if mode == 'human':
pygame.display.flip()
elif mode == 'rgb_array':
# (x, y, c) => (y, x, c)
return np.transpose(
pygame.surfarray.array3d(self._surface), (1, 0, 2))
else:
raise ValueError("Unsupported render mode: %s" % mode)
def _draw_text(self, texts):
import os
import pygame
if self._font is None:
font_name = 'courier' if os.name == 'nt' else 'mono'
fonts = [x for x in pygame.font.get_fonts() if font_name in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self._font = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)
info_surface = pygame.Surface((240, 240))
info_surface.set_alpha(100)
self._surface.blit(info_surface, (0, 0))
v_offset = 4
for item in texts:
surface = self._font.render(item, True, (255, 255, 255))
self._surface.blit(surface, (8, v_offset))
v_offset += 18
def _exec(command):
stream = os.popen(command)
ret = stream.read()
stream.close()
return ret
gin.constant('CarlaEnvironment.REWARD_DIMENSION', Player.REWARD_DIMENSION)
@gin.configurable
class CarlaServer(object):
"""CarlaServer for doing the simulation."""
def __init__(self,
rpc_port=2000,
streaming_port=2001,
docker_image="horizonrobotics/alf:0.0.3-carla",
quality_level="Low",
carla_root="/home/carla",
use_opengl=True):
"""
Args:
rpc_port (int): port for RPC
streaming_port (int): port for data streaming
docker_image (str): If provided, will use the docker image to start
the Carla server. Some valid images are "carlasim/carla:0.9.9"
and "horionrobotics/alf:0.0.3-carla"
quality_level (str): one of ['Low', 'Epic']. See the explanation at
`<https://carla.readthedocs.io/en/latest/adv_rendering_options/#graphics-quality>`_
carla_root (str): directorcy where CarlaUE4.sh is in. The default
value is correct for using docker image. If not using docker
image, make sure you provide the correct path. This is the directory
where you unzipped the file you downloaded from
`<https://github.com/carla-simulator/carla/releases/tag/0.9.9>`_.
use_opengl (bool): the default graphics engine of Carla is Vulkan,
which is supposed to be better than OpenGL. However, Vulkan is not
always available. It may not be installed or the nvidia driver does
not support vulkan.
"""
assert quality_level in ['Low', 'Epic'], "Unknown quality level"
use_docker = (not alf.utils.common.is_inside_docker_container()
and docker_image)
opengl = "-opengl" if use_opengl else ""
if use_docker:
dev = os.environ.get('CUDA_VISIBLE_DEVICES')
if not dev:
dev = 'all'
command = ("docker run -d "
"-p {rpc_port}:{rpc_port} "
"-p {streaming_port}:{streaming_port} "
"-u carla "
"--rm --gpus device=" + dev + " " + docker_image +
" {carla_root}/CarlaUE4.sh "
"--carla-rpc-port={rpc_port} "
"--carla-streaming-port={streaming_port} "
"--quality-level={quality_level} {opengl}")
else:
assert os.path.exists(carla_root + "/CarlaUE4.sh"), (
"%s/CarlaUE4.sh "
"does not exist. Please provide correct value for `carla_root`"
% carla_root)
# We do not use CarlaUE4.sh here in order to get the actual Carla
# server processs so that we can kill it.
command = (
"{carla_root}/CarlaUE4/Binaries/Linux/CarlaUE4-Linux-Shipping "
"CarlaUE4 " # perhaps most system does not have vulkan support, so we use opengl
"-carla-rpc-port={rpc_port} "
"-carla-streaming-port={streaming_port} "
"-quality-level={quality_level} {opengl}")
command = command.format(
rpc_port=rpc_port,
streaming_port=streaming_port,
quality_level=quality_level,
carla_root=carla_root,
opengl=opengl)
logging.info("Starting Carla server: %s" % command)
self._container_id = None
self._process = None
if use_docker:
self._container_id = _exec(command)
assert self._container_id, "Fail to start container"
logging.info("Starting carla in container %s" % self._container_id)
else:
new_env = os.environ.copy()
new_env['SDL_VIDEODRIVER'] = 'offscreen'
self._process = subprocess.Popen(
command.split(),
stdout=sys.stdout,
stderr=sys.stderr,
env=new_env)
def stop(self):
"""Stop the carla server."""
if self._container_id:
command = "docker kill %s" % self._container_id
logging.info("Stopping Carla server: %s" % command)
_exec(command)
self._container_id = None
if self._process:
self._process.kill()
self._process.communicate()
self._process = None
def __del__(self):
self.stop()
@gin.configurable
class CarlaEnvironment(AlfEnvironment):
"""Carla simulation environment.
In order to use it, you need to either download a valid docker image or
a Carla package.
"""
def __init__(self,
batch_size,
map_name,
vehicle_filter='vehicle.*',
walker_filter='walker.pedestrian.*',
num_other_vehicles=0,
num_walkers=0,
percentage_walkers_running=0.1,
percentage_walkers_crossing=0.1,
global_distance_to_leading_vehicle=2.0,
use_hybrid_physics_mode=True,
safe=True,
step_time=0.05):
"""
Args:
batch_size (int): the number of learning vehicles.
map_name (str): the name of the map (e.g. "Town01")
vehicle_filter (str): the filter for getting vehicle blueprints.
walker_filter (str): the filter for getting walker blueprints.
num_other_vehicles (int): the number of autopilot vehicles
num_walkers (int): the number of walkers
global_distance_to_leading_vehicle (str): the autopiloted vehicles
will try to keep such distance from other vehicles.
percentage_walkers_running (float): percent of running walkers
percentage_walkers_crossing (float): percent of walkers walking
across the road.
use_hybrid_physics_mode (bool): If true, the autopiloted vehicle will
not use physics for simulation if it is far from other vehicles.
safe (bool): avoid spawning vehicles prone to accidents.
step_time (float): how many seconds does each step of simulation represents.
"""
super().__init__()
with _get_unused_port(2000, n=2) as (rpc_port, streaming_port):
self._server = CarlaServer(rpc_port, streaming_port)
self._batch_size = batch_size
self._num_other_vehicles = num_other_vehicles
self._num_walkers = num_walkers
self._percentage_walkers_running = percentage_walkers_running
self._percentage_walkers_crossing = percentage_walkers_crossing
self._world = None
try:
for i in range(20):
try:
logging.info(
"Waiting for server to start. Try %d" % (i + 1))
self._client = carla.Client("localhost", rpc_port)
self._world = self._client.load_world(map_name)
break
except RuntimeError:
continue
finally:
if self._world is None:
self._server.stop()
assert self._world is not None, "Fail to start server."
logging.info("Server started.")
self._traffic_manager = None
if self._num_other_vehicles + self._num_walkers > 0:
with _get_unused_port(8000, n=1) as tm_port:
self._traffic_manager = self._client.get_trafficmanager(
tm_port)
self._traffic_manager.set_hybrid_physics_mode(
use_hybrid_physics_mode)
self._traffic_manager.set_global_distance_to_leading_vehicle(
global_distance_to_leading_vehicle)
self._client.set_timeout(20)
self._alf_world = World(self._world)
self._safe = safe
self._vehicle_filter = vehicle_filter
self._walker_filter = walker_filter
settings = self._world.get_settings()
settings.synchronous_mode = True
settings.fixed_delta_seconds = step_time
self._world.apply_settings(settings)
self._map_name = map_name
self._spawn_vehicles()
self._spawn_walkers()
self._observation_spec = self._players[0].observation_spec()
self._action_spec = self._players[0].action_spec()
self._env_info_spec = self._players[0].info_spec()
self._reward_spec = self._players[0].reward_spec()
# metadata property is required by video recording
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 1 / step_time
}
def _spawn_vehicles(self):
blueprints = self._world.get_blueprint_library().filter(
self._vehicle_filter)
assert len(
blueprints) > 0, "Cannot find vehicle '%s'" % self._vehicle_filter
if self._safe:
blueprints = [
x for x in blueprints
if int(x.get_attribute('number_of_wheels')) == 4
]
blueprints = [x for x in blueprints if not x.id.endswith('isetta')]
blueprints = [
x for x in blueprints if not x.id.endswith('carlacola')
]
blueprints = [
x for x in blueprints if not x.id.endswith('cybertruck')
]
blueprints = [x for x in blueprints if not x.id.endswith('t2')]
assert len(
blueprints
) > 0, "Cannot find safe vehicle '%s'" % self._vehicle_filter
spawn_points = self._world.get_map().get_spawn_points()
number_of_spawn_points = len(spawn_points)
num_vehicles = self._batch_size + self._num_other_vehicles
if num_vehicles <= number_of_spawn_points:
random.shuffle(spawn_points)
else:
raise ValueError(
"requested %d vehicles, but could only find %d spawn points" %
(self._batch_size, number_of_spawn_points))
commands = []
for i, transform in enumerate(spawn_points[:num_vehicles]):
blueprint = random.choice(blueprints)
if blueprint.has_attribute('color'):
color = random.choice(
blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(
blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
if i < self._batch_size:
blueprint.set_attribute('role_name', 'hero')
else:
blueprint.set_attribute('role_name', 'autopilot')
command = carla.command.SpawnActor(blueprint, transform)
if i >= self._batch_size:
# managed by traffic manager
command = command.then(
carla.command.SetAutopilot(
carla.command.FutureActor, True,
self._traffic_manager.get_port()))
commands.append(command)
self._players = []
self._other_vehicles = []
responses = self._client.apply_batch_sync(commands, True)
for i, response in enumerate(responses):
if response.error:
logging.error(response.error)
continue
vehicle = self._world.get_actor(response.actor_id)
if i < self._batch_size:
self._players.append(Player(vehicle, self._alf_world))
else:
self._other_vehicles.append(vehicle)
self._alf_world.add_actor(vehicle)
self._alf_world.update_actor_location(vehicle.id,
spawn_points[i].location)
assert len(self._players) + len(
self._other_vehicles) == num_vehicles, (
"Fail to create %s vehicles" % num_vehicles)
def _spawn_walkers(self):
walker_blueprints = self._world.get_blueprint_library().filter(
self._walker_filter)
# 1. take all the random locations to spawn
spawn_points = []
for _ in range(self._num_walkers):
spawn_point = carla.Transform()
loc = self._world.get_random_location_from_navigation()
if loc != None:
spawn_point.location = loc
spawn_points.append(spawn_point)
# 2. we spawn the walker object
commands = []
walker_speeds = []
for spawn_point in spawn_points:
walker_bp = random.choice(walker_blueprints)
# set as not invincible
if walker_bp.has_attribute('is_invincible'):
walker_bp.set_attribute('is_invincible', 'false')
# set the max speed
if walker_bp.has_attribute('speed'):
if (random.random() > self._percentage_walkers_running):
# walking
walker_speeds.append(
walker_bp.get_attribute('speed').recommended_values[1])
else:
# running
walker_speeds.append(
walker_bp.get_attribute('speed').recommended_values[2])
else:
logging.info("Walker has no speed")
walker_speeds.append(0.0)
commands.append(carla.command.SpawnActor(walker_bp, spawn_point))
responses = self._client.apply_batch_sync(commands, True)
walker_speeds2 = []
self._walkers = []
for response, walker_speed, spawn_point in zip(
responses, walker_speeds, spawn_points):
if response.error:
logging.error(
"%s: %s" % (response.error, spawn_point.location))
continue
walker = self._world.get_actor(response.actor_id)
self._walkers.append({"walker": walker})
walker_speeds2.append(walker_speed)
walker_speeds = walker_speeds2
# 3. we spawn the walker controller
commands = []
walker_controller_bp = self._world.get_blueprint_library().find(
'controller.ai.walker')
for walker in self._walkers:
commands.append(
carla.command.SpawnActor(walker_controller_bp,
carla.Transform(),
walker["walker"].id))
responses = self._client.apply_batch_sync(commands, True)
for response, walker in zip(responses, self._walkers):
if response.error:
logging.error(response.error)
continue
walker["controller"] = self._world.get_actor(response.actor_id)
# wait for a tick to ensure client receives the last transform of the walkers we have just created
self._world.tick()
# 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...])
# set how many pedestrians can cross the road
self._world.set_pedestrians_cross_factor(
self._percentage_walkers_crossing)
for walker, walker_speed in zip(self._walkers, walker_speeds):
# start walker
walker['controller'].start()
# set walk to random point
location = self._world.get_random_location_from_navigation()
walker['controller'].go_to_location(location)
# max speed
walker['controller'].set_max_speed(float(walker_speed))
self._alf_world.add_actor(walker['walker'])
self._alf_world.update_actor_location(walker['walker'].id,
location)
def _clear(self):
if self._world is None:
return
if self._players:
commands = []
for player in self._players:
commands.extend(player.destroy())
for response in self._client.apply_batch_sync(commands, True):
if response.error:
logging.error(response.error)
self._players.clear()
commands = []
for vehicle in self._other_vehicles:
commands.append(carla.command.DestroyActor(vehicle))
for walker in self._walkers:
walker['controller'].stop()
commands.append(carla.command.DestroyActor(walker['controller']))
commands.append(carla.command.DestroyActor(walker['walker']))
if commands:
for response in self._client.apply_batch_sync(commands, True):
if response.error:
logging.error(response.error)
self._other_vehicles.clear()
self._walkers.clear()
@property
def batched(self):
return True
@property
def batch_size(self):
return self._batch_size
def env_info_spec(self):
return self._env_info_spec
def observation_spec(self):
return self._observation_spec
def observation_desc(self):
return self._players[0].observation_desc()
def action_spec(self):
return self._action_spec
def action_desc(self):
return self._players[0].action_desc()
def reward_spec(self):
return self._reward_spec
def close(self):
self._clear()
self._server.stop()
def __del__(self):
self.close()
@property
def players(self):
"""Get all the players in the environment.
Returns:
list[Player]:
"""
return self._players
def render(self, mode):
return self._players[0].render(mode)
def _step(self, action):
action = alf.nest.map_structure(lambda x: x.cpu().numpy(), action)
commands = []
for player, act in zip(self._players, action):
commands.extend(player.act(act))
for response in self._client.apply_batch_sync(commands):
if response.error:
logging.error(response.error)
self._current_frame = self._world.tick()
for vehicle in self._other_vehicles:
self._alf_world.update_actor_location(vehicle.id,
vehicle.get_location())
for walker in self._walkers:
actor = walker['walker']
self._alf_world.update_actor_location(actor.id,
actor.get_location())
return self._get_current_time_step()
def _get_current_time_step(self):
time_step = [
player.get_current_time_step(self._current_frame)
for player in self._players
]
time_step = alf.nest.map_structure(lambda *a: np.stack(a), *time_step)
time_step = alf.nest.map_structure(torch.as_tensor, time_step)
common.check_numerics(time_step)
return time_step._replace(env_id=torch.arange(self._batch_size))
def _reset(self):
commands = []
for player in self._players:
commands.extend(player.reset())
for response in self._client.apply_batch_sync(commands):
if response.error:
logging.error(response.error)
self._current_frame = self._world.tick()
return self._get_current_time_step()
@gin.configurable(whitelist=['wrappers'])
def load(map_name, batch_size, wrappers=[]):
"""Load CarlaEnvironment
Args:
map_name (str): name of the map. Currently available maps are:
'Town01, Town02', 'Town03', 'Town04', 'Town05', 'Town06', 'Town07',
and 'Town10HD'
batch_size (int): the number of vehicles in the simulation.
wrappers (list[AlfEnvironmentBaseWrapper]): environment wrappers
Returns:
CarlaEnvironment
"""
env = CarlaEnvironment(batch_size, map_name)
for wrapper in wrappers:
env = wrapper(env)
return env
load.batched = True
| [
"torch.arange"
] | 1.4.0 | zhuboli/alf | b357565638c9336ebd88cecb9766a17d72d5d0c3 |
1.0 | # based on implementation: https://github.com/usuyama/pytorch-unet/blob/master/pytorch_unet.py
from ..registry import BACKBONES
import torch
import torch.nn as nn
def double_conv(in_channels, out_channels):
return nn.Sequential(
nn.Conv3d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv3d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
@BACKBONES.register_module
class UNet3D(nn.Module):
def __init__(self):
super(UNet3D, self).__init__()
self.dconv_down1 = double_conv(3, 16)
self.dconv_down2 = double_conv(16, 32)
self.dconv_down3 = double_conv(32, 64)
self.dconv_down4 = double_conv(64, 128)
self.maxpool = nn.MaxPool3d(2)
# self.upsample = nn.functional.interpolate(scale_factor=2, mode='trilinear', align_corners=True)
self.dconv_up3 = double_conv(64 + 128, 64)
self.dconv_up2 = double_conv(32 + 64, 32)
self.dconv_up1 = double_conv(32 + 16, 16)
# self.conv_last = nn.Conv2d(64, n_class, 1)
def init_weights(self, pretrained=None):
pass
def forward(self, x):
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.maxpool(conv3)
x = self.dconv_down4(x)
x = nn.functional.interpolate(x, scale_factor=2, mode='trilinear', align_corners=True)
x = torch.cat([x, conv3], dim=1)
x = self.dconv_up3(x)
x = nn.functional.interpolate(x, scale_factor=2, mode='trilinear', align_corners=True)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = nn.functional.interpolate(x, scale_factor=2, mode='trilinear', align_corners=True)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
return x | [
"torch.cat",
"torch.nn.MaxPool3d",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"torch.nn.Conv3d"
] | 1.0.1 | arthur801031/3d-multi-resolution-rcnn | 8e5454a72f8daa174bf3eabfa5964152f04ab287 |
1.0 | import torch
from .transforms import bbox2delta, bbox2delta3d
from ..utils import multi_apply
def bbox_target(pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
concat=True):
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
bbox_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=cfg,
reg_classes=reg_classes,
target_means=target_means,
target_stds=target_stds)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_3d(pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
concat=True):
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
bbox_target_single_3d,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=cfg,
reg_classes=reg_classes,
target_means=target_means,
target_stds=target_stds)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_3d_parcel(pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
pos_gt_bregions_list,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
concat=True):
labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights = multi_apply(
bbox_target_single_3d_parcel,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
pos_gt_bregions_list,
cfg=cfg,
reg_classes=reg_classes,
target_means=target_means,
target_stds=target_stds)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
bregions = torch.cat(bregions, 0)
bregion_weights = torch.cat(bregion_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights
def bbox_target_single(pos_bboxes,
neg_bboxes,
pos_gt_bboxes,
pos_gt_labels,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]):
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means,
target_stds)
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_single_3d(pos_bboxes,
neg_bboxes,
pos_gt_bboxes,
pos_gt_labels,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]):
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 6)
bbox_weights = pos_bboxes.new_zeros(num_samples, 6)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
pos_bbox_targets = bbox2delta3d(pos_bboxes, pos_gt_bboxes, target_means,
target_stds)
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
# if torch.isnan(bbox_targets).any().item() == 1:
# breakpoint()
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_single_3d_parcel(pos_bboxes,
neg_bboxes,
pos_gt_bboxes,
pos_gt_labels,
pos_gt_bregions,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]):
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
bregions = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bregion_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 6)
bbox_weights = pos_bboxes.new_zeros(num_samples, 6)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
bregions[:num_pos] = pos_gt_bregions
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
bregion_weights[:num_pos] = pos_weight
pos_bbox_targets = bbox2delta3d(pos_bboxes, pos_gt_bboxes, target_means,
target_stds)
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
bregion_weights[-num_neg:] = 1.0
# if torch.isnan(bbox_targets).any().item() == 1:
# breakpoint()
return labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights
def expand_target(bbox_targets, bbox_weights, labels, num_classes):
breakpoint()
bbox_targets_expand = bbox_targets.new_zeros((bbox_targets.size(0),
4 * num_classes))
bbox_weights_expand = bbox_weights.new_zeros((bbox_weights.size(0),
4 * num_classes))
for i in torch.nonzero(labels > 0).squeeze(-1):
start, end = labels[i] * 4, (labels[i] + 1) * 4
bbox_targets_expand[i, start:end] = bbox_targets[i, :]
bbox_weights_expand[i, start:end] = bbox_weights[i, :]
return bbox_targets_expand, bbox_weights_expand
| [
"torch.nonzero",
"torch.cat"
] | 1.0.1 | arthur801031/3d-multi-resolution-rcnn | 8e5454a72f8daa174bf3eabfa5964152f04ab287 |
1.7 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.sparse as sparse
from beta_rec.models.torch_engine import ModelEngine
class NGCF(torch.nn.Module):
"""Model initialisation, embedding generation and prediction of NGCF."""
def __init__(self, config, norm_adj):
"""Initialize NGCF Class."""
super(NGCF, self).__init__()
self.config = config
self.n_users = config["n_users"]
self.n_items = config["n_items"]
self.emb_dim = config["emb_dim"]
self.layer_size = config["layer_size"]
self.norm_adj = norm_adj
self.n_layers = len(self.layer_size)
self.dropout = nn.ModuleList()
self.GC_weights = nn.ModuleList()
self.Bi_weights = nn.ModuleList()
self.dropout_list = list(config["mess_dropout"])
self.layer_size = [self.emb_dim] + self.layer_size
# Create GNN layers
for i in range(self.n_layers):
self.GC_weights.append(
nn.Linear(self.layer_size[i], self.layer_size[i + 1])
)
self.Bi_weights.append(
nn.Linear(self.layer_size[i], self.layer_size[i + 1])
)
self.dropout.append(nn.Dropout(self.dropout_list[i]))
self.user_embedding = nn.Embedding(self.n_users, self.emb_dim)
self.item_embedding = nn.Embedding(self.n_items, self.emb_dim)
self.init_emb()
def init_emb(self):
"""Initialize users and itmes' embeddings."""
# Initialize users and items' embeddings
nn.init.xavier_uniform_(self.user_embedding.weight)
nn.init.xavier_uniform_(self.item_embedding.weight)
def forward(self, norm_adj):
"""Perform GNN function on users and item embeddings.
Args:
norm_adj (torch sparse tensor): the norm adjacent matrix of the user-item interaction matrix.
Returns:
u_g_embeddings (tensor): processed user embeddings.
i_g_embeddings (tensor): processed item embeddings.
"""
ego_embeddings = torch.cat(
(self.user_embedding.weight, self.item_embedding.weight), dim=0
)
all_embeddings = [ego_embeddings]
norm_adj = norm_adj.to(self.device)
for i in range(self.n_layers):
side_embeddings = sparse.mm(norm_adj, ego_embeddings)
sum_embeddings = F.leaky_relu(self.GC_weights[i](side_embeddings))
bi_embeddings = torch.mul(ego_embeddings, side_embeddings)
bi_embeddings = F.leaky_relu(self.Bi_weights[i](bi_embeddings))
ego_embeddings = sum_embeddings + bi_embeddings
ego_embeddings = self.dropout[i](ego_embeddings)
norm_embeddings = F.normalize(ego_embeddings, p=2, dim=1)
all_embeddings += [norm_embeddings]
all_embeddings = torch.cat(all_embeddings, dim=1)
u_g_embeddings, i_g_embeddings = torch.split(
all_embeddings, [self.n_users, self.n_items], dim=0
)
return u_g_embeddings, i_g_embeddings
def predict(self, users, items):
"""Predict result with the model.
Args:
users (int, or list of int): user id.
items (int, or list of int): item id.
Return:
scores (int): dot product.
"""
users_t = torch.tensor(users, dtype=torch.int64, device=self.device)
items_t = torch.tensor(items, dtype=torch.int64, device=self.device)
with torch.no_grad():
ua_embeddings, ia_embeddings = self.forward(self.norm_adj)
u_g_embeddings = ua_embeddings[users_t]
i_g_embeddings = ia_embeddings[items_t]
scores = torch.mul(u_g_embeddings, i_g_embeddings).sum(dim=1)
return scores
class NGCFEngine(ModelEngine):
"""NGCFEngine Class."""
# A class includes train an epoch and train a batch of NGCF
def __init__(self, config):
"""Initialize NGCFEngine Class."""
self.config = config
self.regs = config["model"]["regs"] # reg is the regularisation
self.decay = self.regs[0]
self.batch_size = config["model"]["batch_size"]
self.norm_adj = config["model"]["norm_adj"]
self.model = NGCF(config["model"], self.norm_adj)
super(NGCFEngine, self).__init__(config)
self.model.to(self.device)
def train_single_batch(self, batch_data):
"""Train the model in a single batch.
Args:
batch_data (list): batch users, positive items and negative items.
Return:
loss (float): batch loss.
"""
assert hasattr(self, "model"), "Please specify the exact model !"
self.optimizer.zero_grad()
norm_adj = self.norm_adj
ua_embeddings, ia_embeddings = self.model.forward(norm_adj)
batch_users, pos_items, neg_items = batch_data
u_g_embeddings = ua_embeddings[batch_users]
pos_i_g_embeddings = ia_embeddings[pos_items]
neg_i_g_embeddings = ia_embeddings[neg_items]
batch_mf_loss, batch_emb_loss, batch_reg_loss = self.bpr_loss(
u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings
)
batch_loss = batch_mf_loss + batch_emb_loss + batch_reg_loss
batch_loss.backward()
self.optimizer.step()
loss = batch_loss.item()
return loss, batch_reg_loss
def train_an_epoch(self, train_loader, epoch_id):
"""Train the model in one epoch.
Args:
epoch_id (int): the number of epoch.
train_loader (function): user, pos_items and neg_items generator.
"""
assert hasattr(self, "model"), "Please specify the exact model !"
self.model.train()
total_loss = 0.0
regularizer = 0.0
for batch_data in train_loader:
loss, reg = self.train_single_batch(batch_data)
total_loss += loss
regularizer += reg
print(f"[Training Epoch {epoch_id}], Loss {loss}, Regularizer {regularizer}")
self.writer.add_scalar("model/loss", total_loss, epoch_id)
self.writer.add_scalar("model/regularizer", regularizer, epoch_id)
def bpr_loss(self, users, pos_items, neg_items):
"""Bayesian Personalised Ranking (BPR) pairwise loss function.
Note that the sizes of pos_scores and neg_scores should be equal.
Args:
pos_scores (tensor): Tensor containing predictions for known positive items.
neg_scores (tensor): Tensor containing predictions for sampled negative items.
Returns:
loss.
"""
# Calculate BPR loss
pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)
neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)
regularizer = (
1.0 / 2 * (users ** 2).sum()
+ 1.0 / 2 * (pos_items ** 2).sum()
+ 1.0 / 2 * (neg_items ** 2).sum()
)
regularizer = regularizer / self.batch_size
maxi = F.logsigmoid(pos_scores - neg_scores)
mf_loss = -torch.mean(maxi)
emb_loss = self.decay * regularizer
reg_loss = 0.0
return mf_loss, emb_loss, reg_loss
| [
"torch.nn.Linear",
"torch.nn.functional.normalize",
"torch.cat",
"torch.mul",
"torch.nn.Dropout",
"torch.nn.ModuleList",
"torch.split",
"torch.nn.functional.logsigmoid",
"torch.nn.init.xavier_uniform_",
"torch.no_grad",
"torch.tensor",
"torch.mean",
"torch.nn.Embedding",
"torch.sparse.mm"
] | 1.7.1 | mengzaiqiao/TVBR | cdac86a753c41f8f3c55a025be8d88dd305325f5 |
1.5 | import math
from typing import Optional, Tuple
from overrides import overrides
import torch
import torch.nn.functional as F
from transformers import XLNetConfig
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn.util import batched_index_select
@TokenEmbedder.register("pretrained_transformer")
class PretrainedTransformerEmbedder(TokenEmbedder):
"""
Uses a pretrained model from `transformers` as a `TokenEmbedder`.
Registered as a `TokenEmbedder` with name "pretrained_transformer".
# Parameters
model_name : `str`
The name of the `transformers` model to use. Should be the same as the corresponding
`PretrainedTransformerIndexer`.
max_length : `int`, optional (default = `None`)
If positive, folds input token IDs into multiple segments of this length, pass them
through the transformer model independently, and concatenate the final representations.
Should be set to the same value as the `max_length` option on the
`PretrainedTransformerIndexer`.
sub_module: `str`, optional (default = `None`)
The name of a submodule of the transformer to be used as the embedder. Some transformers naturally act
as embedders such as BERT. However, other models consist of encoder and decoder, in which case we just
want to use the encoder.
train_parameters: `bool`, optional (default = `True`)
If this is `True`, the transformer weights get updated during training.
"""
def __init__(
self,
model_name: str,
*,
max_length: int = None,
sub_module: str = None,
train_parameters: bool = True,
override_weights_file: Optional[str] = None,
override_weights_strip_prefix: Optional[str] = None
) -> None:
super().__init__()
from allennlp.common import cached_transformers
self.transformer_model = cached_transformers.get(
model_name, True, override_weights_file, override_weights_strip_prefix
)
self.config = self.transformer_model.config
if sub_module:
assert hasattr(self.transformer_model, sub_module)
self.transformer_model = getattr(self.transformer_model, sub_module)
self._max_length = max_length
# I'm not sure if this works for all models; open an issue on github if you find a case
# where it doesn't work.
self.output_dim = self.config.hidden_size
tokenizer = PretrainedTransformerTokenizer(model_name)
self._num_added_start_tokens = len(tokenizer.single_sequence_start_tokens)
self._num_added_end_tokens = len(tokenizer.single_sequence_end_tokens)
self._num_added_tokens = self._num_added_start_tokens + self._num_added_end_tokens
if not train_parameters:
for param in self.transformer_model.parameters():
param.requires_grad = False
@overrides
def get_output_dim(self):
return self.output_dim
def _number_of_token_type_embeddings(self):
if isinstance(self.config, XLNetConfig):
return 3 # XLNet has 3 type ids
elif hasattr(self.config, "type_vocab_size"):
return self.config.type_vocab_size
else:
return 0
@overrides
def forward(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
segment_concat_mask: Optional[torch.BoolTensor] = None,
) -> torch.Tensor: # type: ignore
"""
# Parameters
token_ids: `torch.LongTensor`
Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.
num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the
middle, e.g. the length of: "[CLS] A B C [SEP] [CLS] D E F [SEP]" (see indexer logic).
mask: `torch.BoolTensor`
Shape: [batch_size, num_wordpieces].
type_ids: `Optional[torch.LongTensor]`
Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.
segment_concat_mask: `Optional[torch.BoolTensor]`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
# Returns
`torch.Tensor`
Shape: `[batch_size, num_wordpieces, embedding_size]`.
"""
# Some of the huggingface transformers don't support type ids at all and crash when you supply
# them. For others, you can supply a tensor of zeros, and if you don't, they act as if you did.
# There is no practical difference to the caller, so here we pretend that one case is the same
# as another case.
if type_ids is not None:
max_type_id = type_ids.max()
if max_type_id == 0:
type_ids = None
else:
if max_type_id >= self._number_of_token_type_embeddings():
raise ValueError("Found type ids too large for the chosen transformer model.")
assert token_ids.shape == type_ids.shape
fold_long_sequences = self._max_length is not None and token_ids.size(1) > self._max_length
if fold_long_sequences:
batch_size, num_segment_concat_wordpieces = token_ids.size()
token_ids, segment_concat_mask, type_ids = self._fold_long_sequences(
token_ids, segment_concat_mask, type_ids
)
transformer_mask = segment_concat_mask if self._max_length is not None else mask
# Shape: [batch_size, num_wordpieces, embedding_size],
# or if self._max_length is not None:
# [batch_size * num_segments, self._max_length, embedding_size]
# We call this with kwargs because some of the huggingface models don't have the
# token_type_ids parameter and fail even when it's given as None.
# Also, as of transformers v2.5.1, they are taking FloatTensor masks.
parameters = {"input_ids": token_ids, "attention_mask": transformer_mask.float()}
if type_ids is not None:
parameters["token_type_ids"] = type_ids
embeddings = self.transformer_model(**parameters)[0]
if fold_long_sequences:
embeddings = self._unfold_long_sequences(
embeddings, segment_concat_mask, batch_size, num_segment_concat_wordpieces
)
return embeddings
def _fold_long_sequences(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
) -> Tuple[torch.LongTensor, torch.LongTensor, Optional[torch.LongTensor]]:
"""
We fold 1D sequences (for each element in batch), returned by `PretrainedTransformerIndexer`
that are in reality multiple segments concatenated together, to 2D tensors, e.g.
[ [CLS] A B C [SEP] [CLS] D E [SEP] ]
-> [ [ [CLS] A B C [SEP] ], [ [CLS] D E [SEP] [PAD] ] ]
The [PAD] positions can be found in the returned `mask`.
# Parameters
token_ids: `torch.LongTensor`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the
middle, i.e. the length of: "[CLS] A B C [SEP] [CLS] D E F [SEP]" (see indexer logic).
mask: `torch.BoolTensor`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`
in `forward()`.
type_ids: `Optional[torch.LongTensor]`
Shape: [batch_size, num_segment_concat_wordpieces].
# Returns:
token_ids: `torch.LongTensor`
Shape: [batch_size * num_segments, self._max_length].
mask: `torch.BoolTensor`
Shape: [batch_size * num_segments, self._max_length].
"""
num_segment_concat_wordpieces = token_ids.size(1)
num_segments = math.ceil(num_segment_concat_wordpieces / self._max_length)
padded_length = num_segments * self._max_length
length_to_pad = padded_length - num_segment_concat_wordpieces
def fold(tensor): # Shape: [batch_size, num_segment_concat_wordpieces]
# Shape: [batch_size, num_segments * self._max_length]
tensor = F.pad(tensor, [0, length_to_pad], value=0)
# Shape: [batch_size * num_segments, self._max_length]
return tensor.reshape(-1, self._max_length)
return fold(token_ids), fold(mask), fold(type_ids) if type_ids is not None else None
def _unfold_long_sequences(
self,
embeddings: torch.FloatTensor,
mask: torch.BoolTensor,
batch_size: int,
num_segment_concat_wordpieces: int,
) -> torch.FloatTensor:
"""
We take 2D segments of a long sequence and flatten them out to get the whole sequence
representation while remove unnecessary special tokens.
[ [ [CLS]_emb A_emb B_emb C_emb [SEP]_emb ], [ [CLS]_emb D_emb E_emb [SEP]_emb [PAD]_emb ] ]
-> [ [CLS]_emb A_emb B_emb C_emb D_emb E_emb [SEP]_emb ]
We truncate the start and end tokens for all segments, recombine the segments,
and manually add back the start and end tokens.
# Parameters
embeddings: `torch.FloatTensor`
Shape: [batch_size * num_segments, self._max_length, embedding_size].
mask: `torch.BoolTensor`
Shape: [batch_size * num_segments, self._max_length].
The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`
in `forward()`.
batch_size: `int`
num_segment_concat_wordpieces: `int`
The length of the original "[ [CLS] A B C [SEP] [CLS] D E F [SEP] ]", i.e.
the original `token_ids.size(1)`.
# Returns:
embeddings: `torch.FloatTensor`
Shape: [batch_size, self._num_wordpieces, embedding_size].
"""
def lengths_to_mask(lengths, max_len, device):
return torch.arange(max_len, device=device).expand(
lengths.size(0), max_len
) < lengths.unsqueeze(1)
device = embeddings.device
num_segments = int(embeddings.size(0) / batch_size)
embedding_size = embeddings.size(2)
# We want to remove all segment-level special tokens but maintain sequence-level ones
num_wordpieces = num_segment_concat_wordpieces - (num_segments - 1) * self._num_added_tokens
embeddings = embeddings.reshape(batch_size, num_segments * self._max_length, embedding_size)
mask = mask.reshape(batch_size, num_segments * self._max_length)
# We assume that all 1s in the mask precede all 0s, and add an assert for that.
# Open an issue on GitHub if this breaks for you.
# Shape: (batch_size,)
seq_lengths = mask.sum(-1)
if not (lengths_to_mask(seq_lengths, mask.size(1), device) == mask).all():
raise ValueError(
"Long sequence splitting only supports masks with all 1s preceding all 0s."
)
# Shape: (batch_size, self._num_added_end_tokens); this is a broadcast op
end_token_indices = (
seq_lengths.unsqueeze(-1) - torch.arange(self._num_added_end_tokens, device=device) - 1
)
# Shape: (batch_size, self._num_added_start_tokens, embedding_size)
start_token_embeddings = embeddings[:, : self._num_added_start_tokens, :]
# Shape: (batch_size, self._num_added_end_tokens, embedding_size)
end_token_embeddings = batched_index_select(embeddings, end_token_indices)
embeddings = embeddings.reshape(batch_size, num_segments, self._max_length, embedding_size)
embeddings = embeddings[
:, :, self._num_added_start_tokens : -self._num_added_end_tokens, :
] # truncate segment-level start/end tokens
embeddings = embeddings.reshape(batch_size, -1, embedding_size) # flatten
# Now try to put end token embeddings back which is a little tricky.
# The number of segment each sequence spans, excluding padding. Mimicking ceiling operation.
# Shape: (batch_size,)
num_effective_segments = (seq_lengths + self._max_length - 1) / self._max_length
# The number of indices that end tokens should shift back.
num_removed_non_end_tokens = (
num_effective_segments * self._num_added_tokens - self._num_added_end_tokens
)
# Shape: (batch_size, self._num_added_end_tokens)
end_token_indices -= num_removed_non_end_tokens.unsqueeze(-1)
assert (end_token_indices >= self._num_added_start_tokens).all()
# Add space for end embeddings
embeddings = torch.cat([embeddings, torch.zeros_like(end_token_embeddings)], 1)
# Add end token embeddings back
embeddings.scatter_(
1, end_token_indices.unsqueeze(-1).expand_as(end_token_embeddings), end_token_embeddings
)
# Now put back start tokens. We can do this before putting back end tokens, but then
# we need to change `num_removed_non_end_tokens` a little.
embeddings = torch.cat([start_token_embeddings, embeddings], 1)
# Truncate to original length
embeddings = embeddings[:, :num_wordpieces, :]
return embeddings
| [
"torch.zeros_like",
"torch.nn.functional.pad",
"torch.cat",
"torch.arange"
] | 1.5.0 | Khushbu0610/allennlp | 60deece9fca2da6b66bfcde44484384bdefa3fe7 |
1.8 | import torch
import torch.nn as nn
from tcr_embedding.models.architectures.transformer import TransformerEncoder, TransformerDecoder
from tcr_embedding.models.architectures.mlp import MLP
from tcr_embedding.models.architectures.mlp_scRNA import build_mlp_encoder, build_mlp_decoder
from tcr_embedding.models.vae_base_model import VAEBaseModel
def none_model(hyperparams, hdim, xdim):
pass
class SeparateModelTorch(nn.Module):
def __init__(self, tcr_params, rna_params, joint_params):
super(SeparateModelTorch, self).__init__()
hdim = joint_params['hdim']
num_conditional_labels = joint_params['num_conditional_labels']
cond_dim = joint_params['cond_dim']
cond_input = joint_params['cond_input']
zdim = joint_params['zdim']
shared_hidden = joint_params['shared_hidden']
activation = joint_params['activation']
dropout = joint_params['dropout']
batch_norm = joint_params['batch_norm']
num_seq_labels = tcr_params['num_seq_labels']
self.use_rna = rna_params is not None
num_modalities = 1 if rna_params is None else 2
self.alpha_encoder = TransformerEncoder(tcr_params, hdim//2, num_seq_labels)
self.alpha_decoder = TransformerDecoder(tcr_params, hdim*num_modalities, num_seq_labels)
self.beta_encoder = TransformerEncoder(tcr_params, hdim//2, num_seq_labels)
self.beta_decoder = TransformerDecoder(tcr_params, hdim*num_modalities, num_seq_labels)
if not self.use_rna:
xdim = None
self.gene_encoder = none_model(rna_params, xdim, hdim)
self.gene_decoder = none_model(rna_params, xdim, hdim)
else:
xdim = rna_params['xdim']
self.gene_encoder = build_mlp_encoder(rna_params, xdim, hdim)
self.gene_decoder = build_mlp_decoder(rna_params, xdim, hdim*num_modalities)
# used for NB loss
self.theta = torch.nn.Parameter(torch.randn(xdim))
if cond_dim > 0:
self.cond_emb = torch.nn.Embedding(num_conditional_labels, cond_dim)
self.cond_input = cond_input
cond_input_dim = cond_dim if cond_input else 0
self.shared_encoder = MLP(hdim*num_modalities+cond_input_dim, zdim*2, shared_hidden, activation, 'linear',
dropout, batch_norm, regularize_last_layer=False)
self.shared_decoder = MLP(zdim+cond_dim, hdim*num_modalities, shared_hidden[::-1], activation, activation,
dropout, batch_norm, regularize_last_layer=True)
def forward(self, rna, tcr, tcr_len, conditional=None):
"""
Forward pass of autoencoder
:param rna: torch.Tensor shape=[batch_size, num_genes]
:param tcr: torch.Tensor shape=[batch_size, seq_len, num_seq_labels]
:param tcr_len: torch.LongTensor shape=[batch_size] indicating how long the real unpadded length is
:param conditional: torch.Tensor shape=[batch_size, n_cond] one-hot-encoded conditional covariates
:return: scRNA_pred, tcr_seq_pred
"""
alpha_seq = tcr[:, :tcr.shape[1]//2]
alpha_len = tcr_len[:, 0]
beta_seq = tcr[:, tcr.shape[1]//2:]
beta_len = tcr_len[:, 1]
h_beta = self.beta_encoder(beta_seq, beta_len) # shape=[batch_size, hdim//2]
h_alpha = self.alpha_encoder(alpha_seq, alpha_len) # shape=[batch_size, hdim//2]
if conditional is not None: # more efficient than doing two concatenations
cond_emb_vec = self.cond_emb(conditional)
if not self.use_rna:
if conditional is not None and self.cond_input: # more efficient than doing two concatenations
joint_feature = torch.cat([h_alpha, h_beta, cond_emb_vec], dim=-1) # shape=[batch_size, hdim+cond_dim]
else:
joint_feature = torch.cat([h_alpha, h_beta], dim=-1)
else:
h_rna = self.gene_encoder(rna) # shape=[batch_size, hdim]
if conditional is not None and self.cond_input:
joint_feature = torch.cat([h_rna, h_alpha, h_beta, cond_emb_vec], dim=-1)
else:
joint_feature = torch.cat([h_rna, h_alpha, h_beta], dim=-1)
z_ = self.shared_encoder(joint_feature) # shape=[batch_size, zdim*2]
mu, logvar = z_[:, :z_.shape[1]//2], z_[:, z_.shape[1]//2:] # mu.shape = logvar.shape = [batch_size, zdim]
z = self.reparameterize(mu, logvar) # shape=[batch_size, zdim]
if conditional is not None:
z_input = torch.cat([z, cond_emb_vec], dim=-1) # shape=[batch_size, zdim+cond_dim]
else:
z_input = z
joint_dec_feature = self.shared_decoder(z_input) # shape=[batch_size, hdim*2]
if not self.use_rna:
rna_pred = None
else:
rna_pred = self.gene_decoder(joint_dec_feature) # shape=[batch_size, num_genes]
alpha_seq_pred = self.alpha_decoder(joint_dec_feature, alpha_seq)
beta_seq_pred = self.beta_decoder(joint_dec_feature, beta_seq)
tcr_pred = torch.cat([alpha_seq_pred, beta_seq_pred], dim=1) # cat along sequence dim
return z, mu, logvar, rna_pred, tcr_pred
def reparameterize(self, mu, log_var):
"""
https://debuggercafe.com/getting-started-with-variational-autoencoder-using-pytorch/
:param mu: mean from the encoder's latent space
:param log_var: log variance from the encoder's latent space
"""
std = torch.exp(0.5 * log_var) # standard deviation
eps = torch.randn_like(std) # `randn_like` as we need the same size
z = mu + (eps * std) # sampling as if coming from the input space
return z
def predict_transcriptome(self, z_shared, conditional=None):
"""
Predict the transcriptome connected to an shared latent space
:param z_shared: torch.tensor, shared latent representation
:param conditional:
:return: torch.tensor, transcriptome profile
"""
if conditional is not None: # more efficient than doing two concatenations
cond_emb_vec = self.cond_emb(conditional)
z_shared = torch.cat([z_shared, cond_emb_vec], dim=-1) # shape=[batch_size, zdim+cond_dim]
joint_dec_feature = self.shared_decoder(z_shared)
if self.scRNA_model_arch == 'None' or self.scRNA_model_arch is None:
raise ValueError('Trying to predict transcriptome with a model without rna')
else:
transcriptome_pred = self.gene_decoder(joint_dec_feature) # shape=[batch_size, num_genes]
return transcriptome_pred
def get_latent_from_z(self, z):
return z
class SeparateModel(VAEBaseModel):
def __init__(self,
adata,
params_architecture,
balanced_sampling='clonotype',
metadata=None,
conditional=None,
optimization_mode_params=None,
label_key=None,
device=None
):
super(SeparateModel, self).__init__(adata, params_architecture, balanced_sampling, metadata,
conditional, optimization_mode_params, label_key, device)
self.model_type = 'separate'
self.params_tcr['max_tcr_length'] = adata.obsm['alpha_seq'].shape[1]
self.params_tcr['num_seq_labels'] = len(self.aa_to_id)
if self.params_rna is not None:
self.params_rna['xdim'] = adata[0].X.shape[1]
num_conditional_labels = 0
cond_dim = 0
if self.conditional is not None:
if self.conditional in adata.obsm:
num_conditional_labels = adata.obsm[self.conditional].shape[1]
else:
num_conditional_labels = len(adata.obs[self.conditional].unique())
if 'c_embedding_dim' not in self.params_joint:
cond_dim = 20
else:
cond_dim = self.params_joint['c_embedding_dim']
self.params_joint['num_conditional_labels'] = num_conditional_labels
self.params_joint['cond_dim'] = cond_dim
self.params_joint['cond_input'] = conditional is not None
self.model = SeparateModelTorch(self.params_tcr, self.params_rna, self.params_joint)
def calculate_loss(self, rna_pred, rna, tcr_pred, tcr):
# For GRU and Transformer, as they don't predict start token for alpha and beta chain, so -2
if tcr_pred.shape[1] == tcr.shape[1] - 2:
mask = torch.ones_like(tcr).bool()
mask[:, [0, mask.shape[1] // 2]] = False
tcr_loss = self.loss_weights[1] * self.loss_function_tcr(tcr_pred.flatten(end_dim=1), tcr[mask].flatten())
else: # For CNN, as it predicts start token
tcr_loss = self.loss_weights[1] * self.loss_function_tcr(tcr_pred.flatten(end_dim=1), tcr.flatten())
rna_loss = torch.FloatTensor([0]).to(self.device)
if rna_pred is not None:
rna_loss = self.loss_weights[0] * self.loss_function_rna(rna_pred, rna)
return rna_loss, tcr_loss
def calculate_kld_loss(self, mu, logvar, epoch):
kld_loss = self.loss_function_kld(mu, logvar)
kld_loss *= self.loss_weights[2] * self.get_kl_annealing_factor(epoch)
z = mu # make z deterministic by using the mean
return kld_loss, z
| [
"torch.nn.Embedding",
"torch.cat",
"torch.FloatTensor",
"torch.randn_like",
"torch.ones_like",
"torch.exp",
"torch.randn"
] | 1.8.0 | SchubertLab/mvTCR | d815749e24650f69ef68054e0078d490af91b71d |
1.5 | import torch
import pandas as pd
from io import BytesIO
from subprocess import check_output
from . import writing
import time
def memory(device=0):
total_mem = torch.cuda.get_device_properties(f'cuda:{device}').total_memory
writing.max(f'gpu-memory/cache/{device}', torch.cuda.max_memory_cached(device)/total_mem)
torch.cuda.reset_max_memory_cached()
writing.max(f'gpu-memory/alloc/{device}', torch.cuda.max_memory_allocated(device)/total_mem)
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_max_memory_cached()
def dataframe():
"""Use `nvidia-smi --help-query-gpu` to get a list of query params"""
params = {
'device': 'index',
'compute': 'utilization.gpu', 'access': 'utilization.memory',
'memused': 'memory.used', 'memtotal': 'memory.total',
'fan': 'fan.speed', 'power': 'power.draw', 'temp': 'temperature.gpu'}
command = f"""nvidia-smi --format=csv,nounits,noheader --query-gpu={','.join(params.values())}"""
df = pd.read_csv(BytesIO(check_output(command, shell=True)), header=None)
df.columns = list(params.keys())
df = df.set_index('device')
df = df.apply(pd.to_numeric, errors='coerce')
return df
_last = -1
def vitals(device=None, throttle=0):
# This is a fairly expensive op, so let's avoid doing it too often
global _last
if time.time() - _last < throttle:
return
_last = time.time()
df = dataframe()
if device is None:
pass
elif isinstance(device, int):
df = df.loc[[device]]
else:
df = df.loc[device]
fields = ['compute', 'access', 'fan', 'power', 'temp']
for (device, field), value in df[fields].stack().iteritems():
writing.mean(f'gpu/{field}/{device}', value)
for device in df.index:
writing.mean(f'gpu/memory/{device}', 100*df.loc[device, 'memused']/df.loc[device, 'memtotal']) | [
"torch.cuda.max_memory_cached",
"torch.cuda.get_device_properties",
"torch.cuda.reset_max_memory_allocated",
"torch.cuda.max_memory_allocated",
"torch.cuda.reset_max_memory_cached"
] | 1.5 | JulianKu/megastep | 21ac85510d03f20600d438618a02569c6f1e34e1 |
1.9 | #-*- coding:utf-8 -*-
#'''
# Created on 18-12-27 上午10:34
#
# @Author: Greg Gao(laygin)
#'''
import os
import xml.etree.ElementTree as ET
import numpy as np
import cv2
from torch.utils.data import Dataset
import torch
from config import IMAGE_MEAN
from ctpn_utils import cal_rpn
def readxml(path):
gtboxes = []
imgfile = ''
xml = ET.parse(path)
for elem in xml.iter():
if 'filename' in elem.tag:
imgfile = elem.text
if 'object' in elem.tag:
for attr in list(elem):
if 'bndbox' in attr.tag:
xmin = int(round(float(attr.find('xmin').text)))
ymin = int(round(float(attr.find('ymin').text)))
xmax = int(round(float(attr.find('xmax').text)))
ymax = int(round(float(attr.find('ymax').text)))
gtboxes.append((xmin, ymin, xmax, ymax))
return np.array(gtboxes), imgfile
# for ctpn text detection
class VOCDataset(Dataset):
def __init__(self,
datadir,
labelsdir):
'''
:param txtfile: image name list text file
:param datadir: image's directory
:param labelsdir: annotations' directory
'''
if not os.path.isdir(datadir):
raise Exception('[ERROR] {} is not a directory'.format(datadir))
if not os.path.isdir(labelsdir):
raise Exception('[ERROR] {} is not a directory'.format(labelsdir))
self.datadir = datadir
self.img_names = os.listdir(self.datadir)
self.labelsdir = labelsdir
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
img_name = self.img_names[idx]
img_path = os.path.join(self.datadir, img_name)
print(img_path)
xml_path = os.path.join(self.labelsdir, img_name.replace('.jpg', '.xml'))
gtbox, _ = readxml(xml_path)
img = cv2.imread(img_path)
h, w, c = img.shape
# clip image
if np.random.randint(2) == 1:
img = img[:, ::-1, :]
newx1 = w - gtbox[:, 2] - 1
newx2 = w - gtbox[:, 0] - 1
gtbox[:, 0] = newx1
gtbox[:, 2] = newx2
[cls, regr], _ = cal_rpn((h, w), (int(h / 16), int(w / 16)), 16, gtbox)
m_img = img - IMAGE_MEAN
regr = np.hstack([cls.reshape(cls.shape[0], 1), regr])
cls = np.expand_dims(cls, axis=0)
# transform to torch tensor
m_img = torch.from_numpy(m_img.transpose([2, 0, 1])).float()
cls = torch.from_numpy(cls).float()
regr = torch.from_numpy(regr).float()
return m_img, cls, regr
class ICDARDataset(Dataset):
def __init__(self,
datadir,
labelsdir):
'''
:param txtfile: image name list text file
:param datadir: image's directory
:param labelsdir: annotations' directory
'''
if not os.path.isdir(datadir):
raise Exception('[ERROR] {} is not a directory'.format(datadir))
if not os.path.isdir(labelsdir):
raise Exception('[ERROR] {} is not a directory'.format(labelsdir))
self.datadir = datadir
self.img_names = os.listdir(self.datadir)
self.labelsdir = labelsdir
def __len__(self):
return len(self.img_names)
def box_transfer(self,coor_lists,rescale_fac = 1.0):
gtboxes = []
for coor_list in coor_lists:
coors_x = [int(coor_list[2*i]) for i in range(4)]
coors_y = [int(coor_list[2*i+1]) for i in range(4)]
xmin = min(coors_x)
xmax = max(coors_x)
ymin = min(coors_y)
ymax = max(coors_y)
if rescale_fac>1.0:
xmin = int(xmin / rescale_fac)
xmax = int(xmax / rescale_fac)
ymin = int(ymin / rescale_fac)
ymax = int(ymax / rescale_fac)
gtboxes.append((xmin, ymin, xmax, ymax))
return np.array(gtboxes)
def box_transfer_v2(self,coor_lists,rescale_fac = 1.0):
gtboxes = []
for coor_list in coor_lists:
coors_x = [int(coor_list[2 * i]) for i in range(4)]
coors_y = [int(coor_list[2 * i + 1]) for i in range(4)]
xmin = min(coors_x)
xmax = max(coors_x)
ymin = min(coors_y)
ymax = max(coors_y)
if rescale_fac > 1.0:
xmin = int(xmin / rescale_fac)
xmax = int(xmax / rescale_fac)
ymin = int(ymin / rescale_fac)
ymax = int(ymax / rescale_fac)
prev = xmin
for i in range(xmin // 16 + 1, xmax // 16 + 1):
next = 16*i-0.5
gtboxes.append((prev, ymin, next, ymax))
prev = next
gtboxes.append((prev, ymin, xmax, ymax))
return np.array(gtboxes)
def parse_gtfile(self,gt_path,rescale_fac = 1.0):
coor_lists = list()
with open(gt_path) as f:
content = f.readlines()
for line in content:
coor_list = line.split(',')[:8]
if len(coor_list)==8:
coor_lists.append(coor_list)
return self.box_transfer_v2(coor_lists,rescale_fac)
def draw_boxes(self,img,cls,base_anchors,gt_box):
for i in range(len(cls)):
if cls[i]==1:
pt1 = (int(base_anchors[i][0]),int(base_anchors[i][1]))
pt2 = (int(base_anchors[i][2]),int(base_anchors[i][3]))
img = cv2.rectangle(img,pt1,pt2,(200,100,100))
for i in range(gt_box.shape[0]):
pt1 = (int(gt_box[i][0]),int(gt_box[i][1]))
pt2 = (int(gt_box[i][2]),int(gt_box[i][3]))
img = cv2.rectangle(img, pt1, pt2, (100, 200, 100))
return img
def __getitem__(self, idx):
img_name = self.img_names[idx]
img_path = os.path.join(self.datadir, img_name)
# print(img_path)
img = cv2.imread(img_path)
#####for read error, use default image#####
if img is None:
print(img_path)
with open('error_imgs.txt','a') as f:
f.write('{}\n'.format(img_path))
img_name = 'img_2647.jpg'
img_path = os.path.join(self.datadir, img_name)
img = cv2.imread(img_path)
#####for read error, use default image#####
h, w, c = img.shape
rescale_fac = max(h, w) / 1600
if rescale_fac>1.0:
h = int(h/rescale_fac)
w = int(w/rescale_fac)
img = cv2.resize(img,(w,h))
gt_path = os.path.join(self.labelsdir, 'gt_'+img_name.split('.')[0]+'.txt')
gtbox = self.parse_gtfile(gt_path,rescale_fac)
# clip image
if np.random.randint(2) == 1:
img = img[:, ::-1, :]
newx1 = w - gtbox[:, 2] - 1
newx2 = w - gtbox[:, 0] - 1
gtbox[:, 0] = newx1
gtbox[:, 2] = newx2
[cls, regr], base_anchors = cal_rpn((h, w), (int(h / 16), int(w / 16)), 16, gtbox)
# debug_img = self.draw_boxes(img.copy(),cls,base_anchors,gtbox)
# cv2.imwrite('debug/{}'.format(img_name),debug_img)
m_img = img - IMAGE_MEAN
regr = np.hstack([cls.reshape(cls.shape[0], 1), regr])
cls = np.expand_dims(cls, axis=0)
# transform to torch tensor
m_img = torch.from_numpy(m_img.transpose([2, 0, 1])).float()
cls = torch.from_numpy(cls).float()
regr = torch.from_numpy(regr).float()
return m_img, cls, regr
if __name__ == '__main__':
xmin = 15
xmax = 95
for i in range(xmin//16+1,xmax//16+1):
print(16*i-0.5) | [
"torch.from_numpy"
] | 1.9.0 | edgardeng/machine-learning-pytorch | 24a060894f5226b5ef20cc311db72f1adc037548 |
1.1 | from distutils.version import LooseVersion
from functools import reduce
from itertools import permutations
from typing import Dict
from typing import Optional
from typing import Tuple
import torch
from torch_complex.tensor import ComplexTensor
from typeguard import check_argument_types
from espnet2.enh.informed_encoder.abs_informed_encoder import AbsInformedEncoder
from espnet2.enh.encoder.abs_encoder import AbsEncoder
from espnet2.enh.encoder.conv_encoder import ConvEncoder
from espnet2.enh.fusion.abs_fusion import AbsFusion
from espnet2.enh.separator.abs_separator import AbsSeparator
from espnet2.enh.decoder.abs_decoder import AbsDecoder
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import (
LabelSmoothingLoss, # noqa: H301
)
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask, make_non_pad_mask
from einops import rearrange
import copy
import logging
import pdb
is_torch_1_3_plus = LooseVersion(torch.__version__) >= LooseVersion("1.3.0")
ALL_LOSS_TYPES = (
# mse_loss(predicted_mask, target_label)
"mask_mse",
# mse_loss(enhanced_magnitude_spectrum, target_magnitude_spectrum)
"magnitude",
# mse_loss(enhanced_complex_spectrum, target_complex_spectrum)
"spectrum",
# log_mse_loss(enhanced_complex_spectrum, target_complex_spectrum)
"spectrum_log",
# si_snr(enhanced_waveform, target_waveform)
"si_snr",
)
EPS = torch.finfo(torch.get_default_dtype()).eps
class ESPnetEnhancementInformedModel(AbsESPnetModel):
"""Speech enhancement or separation Frontend model"""
def __init__(
self,
informed_encoder: AbsInformedEncoder,
fusion: AbsFusion,
encoder: AbsEncoder,
separator: AbsSeparator,
decoder: AbsDecoder,
stft_consistency: bool = False,
loss_type: str = "mask_mse",
mask_type: Optional[str] = None,
component_loss: bool = False,
informed_ali_mtl: float = 0.0,
informed_ali_mtl_lastn: int = 2,
informed_ali_mtl_sil_scale: float = 1,
disable_enh_loss: bool = False,
expand_informed: bool = False,
mask_before_fusion: bool = True,
detached_fusion: bool = False,
multi_grained: bool = False,
):
assert check_argument_types()
super().__init__()
self.informed_encoder = informed_encoder
self.encoder = encoder
self.fusion = fusion
self.separator = separator
self.decoder = decoder
self.num_spk = separator.num_spk
self.num_noise_type = getattr(self.separator, "num_noise_type", 1)
self.component_loss = component_loss
self.informed_ali_mtl = informed_ali_mtl
self.informed_ali_mtl_lastn = informed_ali_mtl_lastn
self.informed_ali_mtl_sil_scale = informed_ali_mtl_sil_scale
self.disable_enh_loss = disable_enh_loss
self.expand_informed = expand_informed
self.mask_before_fusion = mask_before_fusion
self.detached_fusion = detached_fusion
self.multi_grained = multi_grained
if loss_type != "si_snr" and isinstance(encoder, ConvEncoder):
raise TypeError(f"{loss_type} is not supported with {type(ConvEncoder)}")
# get mask type for TF-domain models (only used when loss_type="mask_*")
self.mask_type = mask_type.upper() if mask_type else None
# get loss type for model training
self.loss_type = loss_type
# whether to compute the TF-domain loss while enforcing STFT consistency
self.stft_consistency = stft_consistency
if stft_consistency and loss_type in ["mask_mse", "si_snr"]:
raise ValueError(
f"stft_consistency will not work when '{loss_type}' loss is used"
)
assert self.loss_type in ALL_LOSS_TYPES, self.loss_type
# for multi-channel signal
self.ref_channel = getattr(self.separator, "ref_channel", -1)
if self.informed_ali_mtl != 0:
self.criterion_align = LabelSmoothingLoss(
size=self.informed_encoder.input_size,
padding_idx=-1,
smoothing=0,
normalize_length=True,
scales={1:self.informed_ali_mtl_sil_scale},
)
def make_post_from_att(self, informed, informed_lengths, attn):
bsz = informed.shape[0]
vocab_size = self.informed_encoder.input_size
M = torch.zeros(bsz, max(informed_lengths), vocab_size)
for b in range(bsz):
M[b,torch.arange(informed_lengths[b]),informed[b]] = 1
M = M.to(attn.device)
post = torch.bmm(attn, M)
return post
def make_ali_from_kaldi_ali(self, kaldi_ali, frame_len, frame_shift, real_lens):
assert (160 / frame_shift) == (160 // frame_shift), f"frame_shift {frame_shift} cannot be divided by 160"
repeats = 160 // frame_shift
ali_real = make_non_pad_mask(real_lens).type(real_lens.dtype) # sil = 1, padding = -1
# start from half of 15ms (offset the first frame to center), 16 samples per ms
start = round((7.5 * 16 - (frame_len - frame_shift) / 2) / frame_shift)
ali_real[:,start:kaldi_ali.shape[1] * repeats + start] = rearrange(kaldi_ali.unsqueeze(-1).repeat(1,1,repeats), 'b d r -> b (d r)')
ali_real = ali_real.to(kaldi_ali.device)
return ali_real
@staticmethod
def _create_mask_label(mix_spec, ref_spec, mask_type="IAM"):
"""Create mask label.
Args:
mix_spec: ComplexTensor(B, T, F)
ref_spec: List[ComplexTensor(B, T, F), ...]
mask_type: str
Returns:
labels: List[Tensor(B, T, F), ...] or List[ComplexTensor(B, T, F), ...]
"""
# Must be upper case
assert mask_type in [
"IBM",
"IRM",
"IAM",
"PSM",
"NPSM",
"PSM^2",
], f"mask type {mask_type} not supported"
mask_label = []
for r in ref_spec:
mask = None
if mask_type == "IBM":
flags = [abs(r) >= abs(n) for n in ref_spec]
mask = reduce(lambda x, y: x * y, flags)
mask = mask.int()
elif mask_type == "IRM":
# TODO(Wangyou): need to fix this,
# as noise referecens are provided separately
mask = abs(r) / (sum(([abs(n) for n in ref_spec])) + EPS)
elif mask_type == "IAM":
mask = abs(r) / (abs(mix_spec) + EPS)
mask = mask.clamp(min=0, max=1)
elif mask_type == "PSM" or mask_type == "NPSM":
phase_r = r / (abs(r) + EPS)
phase_mix = mix_spec / (abs(mix_spec) + EPS)
# cos(a - b) = cos(a)*cos(b) + sin(a)*sin(b)
cos_theta = (
phase_r.real * phase_mix.real + phase_r.imag * phase_mix.imag
)
mask = (abs(r) / (abs(mix_spec) + EPS)) * cos_theta
mask = (
mask.clamp(min=0, max=1)
if mask_type == "NPSM"
else mask.clamp(min=-1, max=1)
)
elif mask_type == "PSM^2":
# This is for training beamforming masks
phase_r = r / (abs(r) + EPS)
phase_mix = mix_spec / (abs(mix_spec) + EPS)
# cos(a - b) = cos(a)*cos(b) + sin(a)*sin(b)
cos_theta = (
phase_r.real * phase_mix.real + phase_r.imag * phase_mix.imag
)
mask = (abs(r).pow(2) / (abs(mix_spec).pow(2) + EPS)) * cos_theta
mask = mask.clamp(min=-1, max=1)
assert mask is not None, f"mask type {mask_type} not supported"
mask_label.append(mask)
return mask_label
def forward(
self,
speech_mix: torch.Tensor,
informed: torch.Tensor,
speech_mix_lengths: torch.Tensor = None,
informed_lengths: torch.Tensor = None,
informed_ali_ref: torch.Tensor = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech_mix: (Batch, samples) or (Batch, samples, channels)
speech_ref: (Batch, num_speaker, samples)
or (Batch, num_speaker, samples, channels)
speech_mix_lengths: (Batch,), default None for chunk interator,
because the chunk-iterator does not have the
speech_lengths returned. see in
espnet2/iterators/chunk_iter_factory.py
"""
# clean speech signal of each speaker
speech_ref = [
kwargs["speech_ref{}".format(spk + 1)] for spk in range(self.num_spk)
]
# (Batch, num_speaker, samples) or (Batch, num_speaker, samples, channels)
speech_ref = torch.stack(speech_ref, dim=1)
if "noise_ref1" in kwargs:
# noise signal (optional, required when using
# frontend models with beamformering)
noise_ref = [
kwargs["noise_ref{}".format(n + 1)] for n in range(self.num_noise_type)
]
# (Batch, num_noise_type, samples) or
# (Batch, num_noise_type, samples, channels)
noise_ref = torch.stack(noise_ref, dim=1)
else:
noise_ref = None
# dereverberated (noisy) signal
# (optional, only used for frontend models with WPE)
if "dereverb_ref1" in kwargs:
# noise signal (optional, required when using
# frontend models with beamformering)
dereverb_speech_ref = [
kwargs["dereverb_ref{}".format(n + 1)]
for n in range(self.num_spk)
if "dereverb_ref{}".format(n + 1) in kwargs
]
assert len(dereverb_speech_ref) in (1, self.num_spk), len(
dereverb_speech_ref
)
# (Batch, N, samples) or (Batch, N, samples, channels)
dereverb_speech_ref = torch.stack(dereverb_speech_ref, dim=1)
else:
dereverb_speech_ref = None
batch_size = speech_mix.shape[0]
speech_lengths = (
speech_mix_lengths
if speech_mix_lengths is not None
else torch.ones(batch_size).int().fill_(speech_mix.shape[1])
)
assert speech_lengths.dim() == 1, speech_lengths.shape
# Check that batch_size is unified
assert speech_mix.shape[0] == speech_ref.shape[0] == speech_lengths.shape[0], (
speech_mix.shape,
speech_ref.shape,
speech_lengths.shape,
)
# for data-parallel
speech_ref = speech_ref[:, :, : speech_lengths.max()]
speech_mix = speech_mix[:, : speech_lengths.max()]
loss, speech_pre, others, out_lengths, perm = self._compute_loss(
speech_mix,
informed,
speech_lengths,
informed_lengths,
speech_ref,
dereverb_speech_ref=dereverb_speech_ref,
noise_ref=noise_ref,
informed_ali_ref=informed_ali_ref
)
# raise RuntimeError("out of memory")
# add stats for logging
if self.loss_type != "si_snr":
if self.training:
si_snr = None
else:
speech_pre = [self.decoder(ps, speech_lengths)[0] for ps in speech_pre]
speech_ref = torch.unbind(speech_ref, dim=1)
if speech_ref[0].dim() == 3:
# For si_snr loss, only select one channel as the reference
speech_ref = [sr[..., self.ref_channel] for sr in speech_ref]
# compute si-snr loss
si_snr_loss, perm = self._permutation_loss(
speech_ref, speech_pre, self.si_snr_loss, perm=perm
)
si_snr = -si_snr_loss.detach()
stats = dict(
si_snr=si_snr,
loss=loss.detach(),
)
else:
stats = dict(si_snr=-loss.detach(), loss=loss.detach())
# informed align CE loss
if self.informed_ali_mtl > 0:
assert informed_ali_ref is not None, "informed align ref is not available"
assert isinstance(self.encoder, ConvEncoder), "informed align mtl support only ConvEncoder"
assert (160 / self.encoder.stride) == (160 // self.encoder.stride), f"encoder stride {self.encoder.stride} cannot be divided by 160"
repeats = 160 // self.encoder.stride
speech_flens = (speech_mix_lengths - self.encoder.kernel_size) // self.encoder.stride + 1
informed_ali_ref_re = make_non_pad_mask(speech_flens).type(speech_flens.dtype) * 2 - 1 # sil = 1, padding = -1
# start from half of 15ms (offset the first frame to center), 16 samples per ms
start = round((7.5 * 16 - (self.encoder.kernel_size - self.encoder.stride) / 2) / self.encoder.stride)
informed_ali_ref_re[:,start:informed_ali_ref.shape[1] * repeats + start] = rearrange(informed_ali_ref.unsqueeze(-1).repeat(1,1,repeats), 'b d r -> b (d r)')
informed_ali_ref_re = informed_ali_ref_re.to(informed_ali_ref.device)
loss_align = 0
for idx in range(-1, -1 - self.informed_ali_mtl_lastn, -1):
post = self.make_post_from_att(informed, informed_lengths, self.fusion.encoders[idx].src_attn.attn[:,0,:,:])
loss_align += self.criterion_align(post, informed_ali_ref_re)
loss_align /= self.informed_ali_mtl_lastn
stats["loss_align"] = loss_align.detach()
if self.disable_enh_loss:
loss = loss_align
stats["loss"] = loss.detach()
del stats["si_snr"]
else:
loss += loss_align * self.informed_ali_mtl
stats["loss"] = loss.detach()
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def _compute_loss(
self,
speech_mix,
informed,
speech_lengths,
informed_lengths,
speech_ref,
dereverb_speech_ref=None,
noise_ref=None,
cal_loss=True,
informed_ali_ref=None
):
"""Compute loss according to self.loss_type.
Args:
speech_mix: (Batch, samples) or (Batch, samples, channels)
speech_lengths: (Batch,), default None for chunk interator,
because the chunk-iterator does not have the
speech_lengths returned. see in
espnet2/iterators/chunk_iter_factory.py
speech_ref: (Batch, num_speaker, samples)
or (Batch, num_speaker, samples, channels)
dereverb_speech_ref: (Batch, N, samples)
or (Batch, num_speaker, samples, channels)
noise_ref: (Batch, num_noise_type, samples)
or (Batch, num_speaker, samples, channels)
cal_loss: whether to calculate enh loss, defualt is True
Returns:
loss: (torch.Tensor) speech enhancement loss
speech_pre: (List[torch.Tensor] or List[ComplexTensor])
enhanced speech or spectrum(s)
others: (OrderedDict) estimated masks or None
output_lengths: (Batch,)
perm: () best permutation
"""
# pdb.set_trace()
speech_feature_mix, speech_flens = self.encoder(speech_mix, speech_lengths)
if self.expand_informed:
informed = self.make_ali_from_kaldi_ali(informed_ali_ref, self.encoder.kernel_size, self.encoder.stride, speech_flens)
informed_lengths = speech_flens
informed_feature, informed_flens = self.informed_encoder(informed, informed_lengths)
if self.detached_fusion:
assert self.mask_before_fusion, "detached fusion must work together with mask_before_fusion"
detached_speech_feature_mix = speech_feature_mix.detach()
feature_mix, flens, _ = self.fusion(detached_speech_feature_mix, informed_feature, speech_flens, informed_flens)
else:
feature_mix, flens, _ = self.fusion(speech_feature_mix, informed_feature, speech_flens, informed_flens)
if self.mask_before_fusion:
feature_pre, flens, others = self.separator(feature_mix, flens, speech_feature_mix)
else:
feature_pre, flens, others = self.separator(feature_mix, flens)
if self.loss_type != "si_snr":
spectrum_mix = feature_mix
spectrum_pre = feature_pre
# predict separated speech and masks
if self.stft_consistency:
# pseudo STFT -> time-domain -> STFT (compute loss)
tmp_t_domain = [
self.decoder(sp, speech_lengths)[0] for sp in spectrum_pre
]
spectrum_pre = [
self.encoder(sp, speech_lengths)[0] for sp in tmp_t_domain
]
pass
if spectrum_pre is not None and not isinstance(
spectrum_pre[0], ComplexTensor
):
spectrum_pre = [
ComplexTensor(*torch.unbind(sp, dim=-1)) for sp in spectrum_pre
]
if not cal_loss:
loss, perm = None, None
return loss, spectrum_pre, others, flens, perm
# prepare reference speech and reference spectrum
speech_ref = torch.unbind(speech_ref, dim=1)
# List[ComplexTensor(Batch, T, F)] or List[ComplexTensor(Batch, T, C, F)]
spectrum_ref = [self.encoder(sr, speech_lengths)[0] for sr in speech_ref]
# compute TF masking loss
if self.loss_type == "magnitude":
# compute loss on magnitude spectrum
assert spectrum_pre is not None
magnitude_pre = [abs(ps + 1e-15) for ps in spectrum_pre]
if spectrum_ref[0].dim() > magnitude_pre[0].dim():
# only select one channel as the reference
magnitude_ref = [
abs(sr[..., self.ref_channel, :]) for sr in spectrum_ref
]
else:
magnitude_ref = [abs(sr) for sr in spectrum_ref]
tf_loss, perm = self._permutation_loss(
magnitude_ref, magnitude_pre, self.tf_mse_loss
)
elif self.loss_type.startswith("spectrum"):
# compute loss on complex spectrum
if self.loss_type == "spectrum":
loss_func = self.tf_mse_loss
elif self.loss_type == "spectrum_log":
loss_func = self.tf_log_mse_loss
else:
raise ValueError("Unsupported loss type: %s" % self.loss_type)
assert spectrum_pre is not None
if spectrum_ref[0].dim() > spectrum_pre[0].dim():
# only select one channel as the reference
spectrum_ref = [sr[..., self.ref_channel, :] for sr in spectrum_ref]
tf_loss, perm = self._permutation_loss(
spectrum_ref, spectrum_pre, loss_func
)
elif self.loss_type.startswith("mask"):
if self.loss_type == "mask_mse":
loss_func = self.tf_mse_loss
else:
raise ValueError("Unsupported loss type: %s" % self.loss_type)
assert others is not None
mask_pre_ = [
others["mask_spk{}".format(spk + 1)] for spk in range(self.num_spk)
]
# prepare ideal masks
mask_ref = self._create_mask_label(
spectrum_mix, spectrum_ref, mask_type=self.mask_type
)
# compute TF masking loss
tf_loss, perm = self._permutation_loss(mask_ref, mask_pre_, loss_func)
if "mask_dereverb1" in others:
if dereverb_speech_ref is None:
raise ValueError(
"No dereverberated reference for training!\n"
'Please specify "--use_dereverb_ref true" in run.sh'
)
mask_wpe_pre = [
others["mask_dereverb{}".format(spk + 1)]
for spk in range(self.num_spk)
if "mask_dereverb{}".format(spk + 1) in others
]
assert len(mask_wpe_pre) == dereverb_speech_ref.size(1), (
len(mask_wpe_pre),
dereverb_speech_ref.size(1),
)
dereverb_speech_ref = torch.unbind(dereverb_speech_ref, dim=1)
dereverb_spectrum_ref = [
self.encoder(dr, speech_lengths)[0]
for dr in dereverb_speech_ref
]
dereverb_mask_ref = self._create_mask_label(
spectrum_mix, dereverb_spectrum_ref, mask_type=self.mask_type
)
tf_dereverb_loss, perm_d = self._permutation_loss(
dereverb_mask_ref, mask_wpe_pre, loss_func
)
tf_loss = tf_loss + tf_dereverb_loss
if "mask_noise1" in others:
if noise_ref is None:
raise ValueError(
"No noise reference for training!\n"
'Please specify "--use_noise_ref true" in run.sh'
)
noise_ref = torch.unbind(noise_ref, dim=1)
noise_spectrum_ref = [
self.encoder(nr, speech_lengths)[0] for nr in noise_ref
]
noise_mask_ref = self._create_mask_label(
spectrum_mix, noise_spectrum_ref, mask_type=self.mask_type
)
mask_noise_pre = [
others["mask_noise{}".format(n + 1)]
for n in range(self.num_noise_type)
]
tf_noise_loss, perm_n = self._permutation_loss(
noise_mask_ref, mask_noise_pre, loss_func
)
tf_loss = tf_loss + tf_noise_loss
else:
raise ValueError("Unsupported loss type: %s" % self.loss_type)
loss = tf_loss
return loss, spectrum_pre, others, flens, perm
else:
speech_pre = [self.decoder(ps, speech_lengths)[0] for ps in feature_pre]
if not cal_loss:
loss, perm = None, None
return loss, speech_pre, None, speech_lengths, perm
# speech_pre: list[(batch, sample)]
assert speech_pre[0].dim() == 2, speech_pre[0].dim()
if speech_ref.dim() == 4:
# For si_snr loss of multi-channel input,
# only select one channel as the reference
speech_ref = speech_ref[..., self.ref_channel]
speech_ref = torch.unbind(speech_ref, dim=1)
# compute si-snr loss
si_snr_loss, perm = self._permutation_loss(
speech_ref, speech_pre, self.si_snr_loss_zeromean_multi_grained if (self.multi_grained and self.training) else self.si_snr_loss_zeromean
)
loss = si_snr_loss
return loss, speech_pre, None, speech_lengths, perm
@staticmethod
def tf_mse_loss(ref, inf):
"""time-frequency MSE loss.
Args:
ref: (Batch, T, F) or (Batch, T, C, F)
inf: (Batch, T, F) or (Batch, T, C, F)
Returns:
loss: (Batch,)
"""
assert ref.shape == inf.shape, (ref.shape, inf.shape)
if not is_torch_1_3_plus:
# in case of binary masks
ref = ref.type(inf.dtype)
diff = ref - inf
if isinstance(diff, ComplexTensor):
mseloss = diff.real ** 2 + diff.imag ** 2
else:
mseloss = diff ** 2
if ref.dim() == 3:
mseloss = mseloss.mean(dim=[1, 2])
elif ref.dim() == 4:
mseloss = mseloss.mean(dim=[1, 2, 3])
else:
raise ValueError(
"Invalid input shape: ref={}, inf={}".format(ref.shape, inf.shape)
)
return mseloss
@staticmethod
def tf_log_mse_loss(ref, inf):
"""time-frequency log-MSE loss.
Args:
ref: (Batch, T, F) or (Batch, T, C, F)
inf: (Batch, T, F) or (Batch, T, C, F)
Returns:
loss: (Batch,)
"""
assert ref.shape == inf.shape, (ref.shape, inf.shape)
if not is_torch_1_3_plus:
# in case of binary masks
ref = ref.type(inf.dtype)
diff = ref - inf
if isinstance(diff, ComplexTensor):
log_mse_loss = diff.real ** 2 + diff.imag ** 2
else:
log_mse_loss = diff ** 2
if ref.dim() == 3:
log_mse_loss = torch.log10(log_mse_loss.sum(dim=[1, 2])) * 10
elif ref.dim() == 4:
log_mse_loss = torch.log10(log_mse_loss.sum(dim=[1, 2, 3])) * 10
else:
raise ValueError(
"Invalid input shape: ref={}, inf={}".format(ref.shape, inf.shape)
)
return log_mse_loss
@staticmethod
def tf_l1_loss(ref, inf):
"""time-frequency L1 loss.
Args:
ref: (Batch, T, F) or (Batch, T, C, F)
inf: (Batch, T, F) or (Batch, T, C, F)
Returns:
loss: (Batch,)
"""
assert ref.shape == inf.shape, (ref.shape, inf.shape)
if not is_torch_1_3_plus:
# in case of binary masks
ref = ref.type(inf.dtype)
if isinstance(inf, ComplexTensor):
l1loss = abs(ref - inf + EPS)
else:
l1loss = abs(ref - inf)
if ref.dim() == 3:
l1loss = l1loss.mean(dim=[1, 2])
elif ref.dim() == 4:
l1loss = l1loss.mean(dim=[1, 2, 3])
else:
raise ValueError(
"Invalid input shape: ref={}, inf={}".format(ref.shape, inf.shape)
)
return l1loss
@staticmethod
def si_snr_loss(ref, inf):
"""SI-SNR loss
Args:
ref: (Batch, samples)
inf: (Batch, samples)
Returns:
loss: (Batch,)
"""
ref = ref / torch.norm(ref, p=2, dim=1, keepdim=True)
inf = inf / torch.norm(inf, p=2, dim=1, keepdim=True)
s_target = (ref * inf).sum(dim=1, keepdims=True) * ref
e_noise = inf - s_target
si_snr = 20 * (
torch.log10(torch.norm(s_target, p=2, dim=1).clamp(min=EPS))
- torch.log10(torch.norm(e_noise, p=2, dim=1).clamp(min=EPS))
)
return -si_snr
@staticmethod
def si_snr_loss_zeromean_multi_grained(ref, inf):
"""SI-SNR loss with zero-mean in pre-processing.
Args:
ref: (Batch, samples)
inf: (Batch, samples)
Returns:
loss: (Batch,)
"""
# logging.info("applying multi grained si snr")
assert ref.size() == inf.size()
B, T = ref.size()
base = 1000
pair_wise_si_snr = 0
cnt = 1
while True:
if base > T:
break
for start in range(0, T, base):
end = start + base
pair_wise_si_snr += ESPnetEnhancementInformedModel.si_snr_loss_zeromean(ref[:, start:end], inf[:, start:end])
cnt += 1
base *= 4
pair_wise_si_snr += ESPnetEnhancementInformedModel.si_snr_loss_zeromean(ref, inf)
return pair_wise_si_snr / cnt
@staticmethod
def si_snr_loss_zeromean(ref, inf):
"""SI-SNR loss with zero-mean in pre-processing.
Args:
ref: (Batch, samples)
inf: (Batch, samples)
Returns:
loss: (Batch,)
"""
assert ref.size() == inf.size()
B, T = ref.size()
# mask padding position along T
# Step 1. Zero-mean norm
mean_target = torch.sum(ref, dim=1, keepdim=True) / T
mean_estimate = torch.sum(inf, dim=1, keepdim=True) / T
zero_mean_target = ref - mean_target
zero_mean_estimate = inf - mean_estimate
# Step 2. SI-SNR with order
# reshape to use broadcast
s_target = zero_mean_target # [B, T]
s_estimate = zero_mean_estimate # [B, T]
# s_target = <s', s>s / ||s||^2
pair_wise_dot = torch.sum(s_estimate * s_target, dim=1, keepdim=True) # [B, 1]
s_target_energy = torch.sum(s_target ** 2, dim=1, keepdim=True) + EPS # [B, 1]
pair_wise_proj = pair_wise_dot * s_target / s_target_energy # [B, T]
# e_noise = s' - s_target
e_noise = s_estimate - pair_wise_proj # [B, T]
# SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)
pair_wise_si_snr = torch.sum(pair_wise_proj ** 2, dim=1) / (
torch.sum(e_noise ** 2, dim=1) + EPS
)
# print('pair_si_snr',pair_wise_si_snr[0,:])
pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + EPS) # [B]
# print(pair_wise_si_snr)
return -1 * pair_wise_si_snr
@staticmethod
def _permutation_loss(ref, inf, criterion, perm=None):
"""The basic permutation loss function.
Args:
ref (List[torch.Tensor]): [(batch, ...), ...] x n_spk
inf (List[torch.Tensor]): [(batch, ...), ...]
criterion (function): Loss function
perm (torch.Tensor): specified permutation (batch, num_spk)
Returns:
loss (torch.Tensor): minimum loss with the best permutation (batch)
perm (torch.Tensor): permutation for inf (batch, num_spk)
e.g. tensor([[1, 0, 2], [0, 1, 2]])
"""
assert len(ref) == len(inf), (len(ref), len(inf))
num_spk = len(ref)
def pair_loss(permutation):
return sum(
[criterion(ref[s], inf[t]) for s, t in enumerate(permutation)]
) / len(permutation)
if perm is None:
device = ref[0].device
all_permutations = list(permutations(range(num_spk)))
losses = torch.stack([pair_loss(p) for p in all_permutations], dim=1)
loss, perm = torch.min(losses, dim=1)
perm = torch.index_select(
torch.tensor(all_permutations, device=device, dtype=torch.long),
0,
perm,
)
else:
loss = torch.tensor(
[
torch.tensor(
[
criterion(
ref[s][batch].unsqueeze(0), inf[t][batch].unsqueeze(0)
)
for s, t in enumerate(p)
]
).mean()
for batch, p in enumerate(perm)
]
)
return loss.mean(), perm
def collect_feats(
self, speech_mix: torch.Tensor, speech_mix_lengths: torch.Tensor, **kwargs
) -> Dict[str, torch.Tensor]:
# for data-parallel
speech_mix = speech_mix[:, : speech_mix_lengths.max()]
feats, feats_lengths = speech_mix, speech_mix_lengths
return {"feats": feats, "feats_lengths": feats_lengths}
| [
"torch.get_default_dtype",
"torch.stack",
"torch.min",
"torch.arange",
"torch.unbind",
"torch.norm",
"torch.bmm",
"torch.log10",
"torch.ones",
"torch.tensor",
"torch.sum"
] | 1.1.0 | IceCreamWW/ESPnet-informed-se | 38471b7749b7df0fadeae500cf8a050ac66872d2 |
1.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .BasePIFuNet import BasePIFuNet
from .SurfaceClassifier import SurfaceClassifier
from .DepthNormalizer import DepthNormalizer
from .HGFilters import *
from ..net_util import init_net
class HGPIFuNet(BasePIFuNet):
'''
HG PIFu network uses Hourglass stacks as the image filter.
It does the following:
1. Compute image feature stacks and store it in self.im_feat_list
self.im_feat_list[-1] is the last stack (output stack)
2. Calculate calibration
3. If training, it index on every intermediate stacks,
If testing, it index on the last stack.
4. Classification.
5. During training, error is calculated on all stacks.
'''
def __init__(self,
opt,
projection_mode='orthogonal',
error_term=nn.MSELoss(),
):
super(HGPIFuNet, self).__init__(
projection_mode=projection_mode,
error_term=error_term)
self.name = 'hgpifu'
self.opt = opt
self.num_views = self.opt.num_views
self.image_filter = HGFilter(opt)
self.surface_classifier = SurfaceClassifier(
filter_channels=self.opt.mlp_dim,
num_views=self.opt.num_views,
no_residual=self.opt.no_residual,
last_op=nn.Sigmoid())
self.normalizer = DepthNormalizer(opt)
# This is a list of [B x Feat_i x H x W] features
self.im_feat_list = []
self.tmpx = None
self.normx = None
self.intermediate_preds_list = []
init_net(self)
def filter(self, images):
'''
Filter the input images
store all intermediate features.
:param images: [B, C, H, W] input images
'''
self.im_feat_list, self.tmpx, self.normx = self.image_filter(images)
# If it is not in training, only produce the last im_feat
if not self.training:
self.im_feat_list = [self.im_feat_list[-1]]
def query(self, points, calibs, transforms=None, labels=None):
'''
Given 3D points, query the network predictions for each point.
Image features should be pre-computed before this call.
store all intermediate features.
query() function may behave differently during training/testing.
:param points: [B, 3, N] world space coordinates of points
:param calibs: [B, 3, 4] calibration matrices for each image
:param transforms: Optional [B, 2, 3] image space coordinate transforms
:param labels: Optional [B, Res, N] gt labeling
:return: [B, Res, N] predictions for each point
'''
if labels is not None:
self.labels = labels
xyz = self.projection(points, calibs, transforms)
xy = xyz[:, :2, :]
z = xyz[:, 2:3, :]
in_img = (xy[:, 0] >= -1.0) & (xy[:, 0] <= 1.0) & (xy[:, 1] >= -1.0) & (xy[:, 1] <= 1.0)
z_feat = self.normalizer(z, calibs=calibs)
if self.opt.skip_hourglass:
tmpx_local_feature = self.index(self.tmpx, xy)
self.intermediate_preds_list = []
for im_feat in self.im_feat_list:
# [B, Feat_i + z, N]
im_feat = im_feat.reshape((-1, self.opt.temporalSize, 256, 128, 128)).reshape((-1, self.opt.temporalSize * 256, 128, 128))
point_local_feat_list = [self.index(im_feat, xy), z_feat]
if self.opt.skip_hourglass:
point_local_feat_list.append(tmpx_local_feature)
point_local_feat = torch.cat(point_local_feat_list, 1)
# out of image plane is always set to 0
pred = in_img[:,None].float() * self.surface_classifier(point_local_feat)
self.intermediate_preds_list.append(pred)
self.preds = self.intermediate_preds_list[-1]
def get_im_feat(self):
'''
Get the image filter
:return: [B, C_feat, H, W] image feature after filtering
'''
return self.im_feat_list[-1]
def get_error(self):
'''
Hourglass has its own intermediate supervision scheme
'''
error = 0
for preds in self.intermediate_preds_list:
error += self.error_term(preds, self.labels)
error /= len(self.intermediate_preds_list)
return error
def forward(self, images, points, calibs, transforms=None, labels=None):
# Get image feature
self.filter(images)
# Phase 2: point query
self.query(points=points, calibs=calibs, transforms=transforms, labels=labels)
# get the prediction
res = self.get_preds()
# get the error
error = self.get_error()
return res, error
| [
"torch.cat",
"torch.nn.MSELoss",
"torch.nn.Sigmoid"
] | 1.4.0 | ayush94582/pifu_surreal | f370165481361991146fb80a0757be38a0763961 |
1.10 | from argparse import ArgumentParser
from turtle import forward
from typing import List
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_lightning.loggers import TensorBoardLogger
from torchmetrics import RetrievalHitRate
from lit_data import LitDataModule
from lit_model import LitModel
from ml100k import ML100KSequence
from utils import bpr_loss
class Caser(nn.Module):
def __init__(self, embedding_dims, num_users, num_items,
L=5, num_hfilters=16, num_vfilters=4,
dropout=0.05, **kwargs):
super().__init__()
self.P = nn.Embedding(num_users, embedding_dims)
self.Q = nn.Embedding(num_items, embedding_dims)
self.num_hfilters = num_hfilters
self.num_vfilters = num_vfilters
# Vertical convolution
self.conv_v = nn.Conv2d(1, num_vfilters, (L, 1))
# Horizontal convolutions
self.conv_h = nn.ModuleList([
nn.Sequential(
nn.Conv2d(1, num_hfilters, (h, embedding_dims)),
nn.ReLU(),
nn.AdaptiveMaxPool2d((1, 1)))
for h in range(1, L+1)])
# Fully-connected layer
self.fc = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(
num_vfilters*embedding_dims + num_hfilters*L,
embedding_dims),
nn.ReLU())
self.Q_out = nn.Embedding(num_items, 2*embedding_dims)
self.b_out = nn.Embedding(num_items, 1)
def forward(self, user_id, seq, item_id):
item_emb = self.Q(seq).unsqueeze(1)
user_emb = self.P(user_id)
v = self.conv_v(item_emb)
h = torch.cat([filt(item_emb) for filt in self.conv_h], axis=-2)
x = self.fc(torch.cat([v.flatten(1), h.flatten(1)], -1))
x = torch.cat([x, user_emb], -1)
logit = (self.Q_out(item_id)*x).sum(-1) + self.b_out(item_id).squeeze()
return logit
class LitCaser(pl.LightningModule):
def __init__(self, lr=0.002, hitrate_cutout=10, **kwargs):
super().__init__()
self.save_hyperparameters()
self.model = Caser(**kwargs)
self.lr = lr
self.hitrate = RetrievalHitRate(k=hitrate_cutout)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), self.lr, weight_decay=1e-5)
def forward(self, user_id, seq, item_id):
return self.model(user_id, seq, item_id)
def training_step(self, batch, batch_idx):
user_id, seq, pos_item, neg_item = batch
pos_logit = self(user_id, seq, pos_item)
neg_logit = self(user_id, seq, neg_item)
loss = bpr_loss(pos_logit, neg_logit)
return loss
def validation_step(self, batch, batch_idx):
user_id, seq, item_id, is_pos = batch
logit = self(user_id, seq, item_id)
score = torch.sigmoid(logit).reshape(-1,)
self.hitrate.update(score, is_pos, user_id)
return
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
self.logger.experiment.add_scalar(
"train/loss", avg_loss, self.current_epoch)
def validation_epoch_end(self, outputs):
self.logger.experiment.add_scalar(
f"val/hit_rate@{self.hitrate.k}",
self.hitrate.compute(),
self.current_epoch)
self.hitrate.reset()
def main(args):
data = LitDataModule(
ML100KSequence(seq_len=args.seq_len),
batch_size=args.batch_size)
data.setup()
model = LitCaser(
num_users=data.num_users, num_items=data.num_items,
embedding_dims=args.embedding_dims,
seq_len=args.seq_len)
logger = TensorBoardLogger("lightning_logs",
name=f"Caser_{args.embedding_dims}_L{args.seq_len}")
trainer = pl.Trainer.from_argparse_args(args, logger=logger)
trainer.fit(model, data)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--embedding_dims", type=int, default=10)
parser.add_argument("--seq_len", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=1024)
pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
main(args)
| [
"torch.nn.Linear",
"torch.sigmoid",
"torch.cat",
"torch.nn.Dropout",
"torch.stack",
"torch.nn.AdaptiveMaxPool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Embedding"
] | 1.10.2 | nntrongnghia/learn-recsys | 43505c2663255d10e900f4cb98553eb5058e0a09 |
1.10 | import os
from copy import deepcopy
from typing import Tuple
import numpy as np
import pandas as pd
from torch.utils.data import random_split
from lit_data import BaseDataset
def read_data_ml100k(data_dir="./ml-100k") -> pd.DataFrame:
names = ['user_id', 'item_id', 'rating', 'timestamp']
data = pd.read_csv(os.path.join(data_dir, 'u.data'), '\t', names=names)
return data
class ML100K(BaseDataset):
def __init__(self, data_dir="./ml-100k", normalize_rating=False):
"""MovieLens 100K for Matrix Factorization
Each sample is a tuple of:
- user_id: int
- item_id: int
- rating: float
Parameters
----------
data_dir : str, optional
Path to dataset directory, by default "./ml-100k"
normalize_rating : bool, optional
If True, rating is normalized to (0..1), by default False
"""
self.normalize_rating = normalize_rating
self.data_dir = data_dir
self.df = read_data_ml100k(data_dir)
# set to zero-based index
self.df.user_id -= 1
self.df.item_id -= 1
if normalize_rating:
self.df.rating /= 5.0
self.num_users = self.df.user_id.unique().shape[0]
self.num_items = self.df.item_id.unique().shape[0]
self.user_id = self.df.user_id.values
self.item_id = self.df.item_id.values
self.rating = self.df.rating.values.astype(np.float32)
self.timestamp = self.df.timestamp
def split(self, train_ratio=0.8):
train_len = int(train_ratio*len(self))
test_len = len(self) - train_len
return random_split(self, [train_len, test_len])
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
return self.user_id[idx], self.item_id[idx], self.rating[idx]
class ML100KRatingMatrix(ML100K):
def __init__(self, data_dir="./ml-100k", user_based=False, normalize_rating=False):
"""MovieLens 100K for AutoRec
Each sample is a row/column of the rating matrix.
The rating matrix has shape (number of items, number of users).
Parameters
----------
data_dir : str, optional
_description_, by default "./ml-100k"
user_based : bool, optional
If True, extract columns of the rating matrix.
else, extract rows.
By default False
normalize_rating : bool, optional
If True, divide rating by 5, by default False
"""
super().__init__(data_dir)
self.normalize_rating = normalize_rating
self.user_based = user_based
self.rating_matrix = np.zeros(
(self.num_items, self.num_users), dtype=np.float32)
self.rating_matrix[[self.item_id, self.user_id]] = self.rating
if normalize_rating:
self.rating_matrix /= 5.0
def __len__(self):
if self.user_based:
return self.num_users
else:
return self.num_items
def __getitem__(self, idx):
if self.user_based:
return self.rating_matrix[:, idx]
else:
return self.rating_matrix[idx]
class ML100KPairWise(ML100K):
def __init__(self, data_dir="./ml-100k",
test_leave_out=1,
test_sample_size: int = None):
"""Pair Wise loader to train NeuMF model.
Samples are slightly different based on train/test mode.
In training mode:
- user_id: int
- item_id: int
Item id that user has interacted with
- neg_item: int
Item id that user hasn't interacted with while training
In testing mode:
- user_id: int
- item_id: int
Random item_id to be ranked by the model
- is_pos: bool
If True, this item is a positive item
that user has interacted with in groundtruth data.
Parameters
----------
data_dir : str, optional
Path to dataset directory, by default "./ml-100k"
test_leave_out : int, optional
Leave out how many items per user for testing
By default 1
test_sample_size : int, optional
It is time-consuming to rank all items for every user during
evaluation, we can randomly choose a subset of items to rank
If None, rank all items.
By default None
"""
super().__init__(data_dir)
self.set_all_item_ids = set(np.unique(self.item_id))
self.test_leave_out = test_leave_out
self.test_sample_size = test_sample_size
# general
self.train = None
self.has_setup = False
# Split Dataframe
self.split_dataframe()
self.build_candidates()
def split_dataframe(self):
"""Split ML100K dataframe with the strategy leave-n-out
with timestamp order.
"""
user_group = self.df.groupby("user_id", sort=False)
train_df = []
test_df = []
for user_id, user_df in user_group:
user_df = user_df.sort_values("timestamp")
train_df.append(user_df[:-self.test_leave_out])
test_df.append(user_df[-self.test_leave_out:])
self.train_df = pd.concat(train_df)
self.test_df = pd.concat(test_df)
def build_candidates(self):
# Train
self.observed_items_per_user_in_train = {
int(user_id): user_df.item_id.values
for user_id, user_df in self.train_df.groupby("user_id", sort=False)
}
self.unobserved_items_per_user_in_train = {
user_id: np.array(
list(self.set_all_item_ids - set(observed_items)))
for user_id, observed_items in self.observed_items_per_user_in_train.items()
}
# Test
self.gt_pos_items_per_user_in_test = {
int(user_id): user_df[-self.test_leave_out:].item_id.values
for user_id, user_df in self.test_df.groupby("user_id", sort=False)
}
def split(self, *args, **kwargs):
# Train split
train_split = deepcopy(self)
train_split.user_id = self.train_df.user_id.values
train_split.item_id = self.train_df.item_id.values
train_split.train = True
train_split.has_setup = True
# Test split
test_split = deepcopy(self)
test_split.user_id = []
test_split.item_id = []
for user_id, items in self.unobserved_items_per_user_in_train.items():
if self.test_sample_size is None:
sample_items = items
elif isinstance(self.test_sample_size, int):
sample_items = np.random.choice(items, self.test_sample_size)
else:
raise TypeError("self.test_sample_size should be int")
sample_items = np.concatenate(
[test_split.gt_pos_items_per_user_in_test[user_id],
sample_items])
sample_items = np.unique(sample_items)
test_split.user_id += [user_id]*len(sample_items)
test_split.item_id.append(sample_items)
test_split.user_id = np.array(test_split.user_id)
test_split.item_id = np.concatenate(test_split.item_id)
test_split.train = False
test_split.has_setup = True
return train_split, test_split
def __len__(self):
return len(self.user_id)
def __getitem__(self, idx):
assert self.has_setup, "Must run self.setup()"
if self.train:
user_id = self.user_id[idx]
pos_item = self.item_id[idx]
neg_item = np.random.choice(
self.unobserved_items_per_user_in_train[int(user_id)])
return user_id, pos_item, neg_item
else:
user_id = self.user_id[idx]
item_id = self.item_id[idx]
is_pos = item_id in self.gt_pos_items_per_user_in_test[user_id]
return user_id, item_id, is_pos
class ML100KSequence(ML100KPairWise):
def __init__(self, data_dir="./ml-100k",
test_leave_out=1,
test_sample_size=100,
seq_len=5):
"""Sequence data to train Caser model
Similarly to Pair Wise dataset, the sample depends on train/test mode.
In training mode:
- user_id: int
- seq: List[int]
Sequence of last N item ids that user has interacted with.
- target_item: int
Target item id that user will interact with after the sequence
- neg_item: int
Item id that user doesn't interacted with while training
In testing mode:
- user_id: int
- seq: List[int]
Sequence of last N item ids that user has interacted with.
- item_id: int
Random item_id to be ranked by the model
- is_pos: bool
If True, this item is a positive item
that user has interacted with in groundtruth data.
Parameters
----------
data_dir : str, optional
Path to dataset directory, by default "./ml-100k"
test_leave_out : int, optional
Leave out how many items per user for testing
By default 1
test_sample_size : int, optional
It is time-consuming to rank all items for every user during
evaluation, we can randomly choose a subset of items to rank
If None, rank all items.
By default None
seq_len : int, optional
Length of sequence of item ids, by default 5
"""
self.seq_len = seq_len
super().__init__(data_dir, test_leave_out, test_sample_size)
self.getitem_df = None
def split_dataframe(self):
user_group = self.df.groupby("user_id", sort=False)
train_df = []
test_df = []
for user_id, user_df in user_group:
user_df = user_df.sort_values("timestamp")
train_df.append(user_df[:-self.test_leave_out])
test_df.append(user_df[-(self.test_leave_out+self.seq_len):])
self.train_df = pd.concat(train_df)
self.test_df = pd.concat(test_df)
def split(self, *args, **kwargs):
# Train
train_split = deepcopy(self)
df = []
for _, user_df in self.train_df.groupby("user_id", sort=False):
user_df = user_df.sort_values("timestamp").reset_index()
user_id = user_df.user_id[:-self.seq_len].values
target = user_df.item_id[self.seq_len:].values
seq = [
user_df.item_id[i:i+self.seq_len].values
for i in range(len(user_df) - self.seq_len)]
df.append(
pd.DataFrame({
"user_id": user_id,
"seq": seq,
"target_item": target}))
train_split.getitem_df = pd.concat(df).reset_index()
train_split.train = True
# Test
test_split = deepcopy(self)
df = []
for uid, user_df in self.test_df.groupby("user_id", sort=False):
user_df = user_df.sort_values("timestamp").reset_index()
user_id = user_df.user_id[:-self.seq_len].values
seq = [
user_df.item_id[i:i+self.seq_len].values
for i in range(len(user_df) - self.seq_len)]
target_per_seq = user_df.item_id[self.seq_len:].values
unobserved_item_id = np.concatenate([
np.random.choice(
self.unobserved_items_per_user_in_train[uid],
self.test_sample_size,
replace=self.test_sample_size > self.unobserved_items_per_user_in_train[uid].shape[0]),
])
item_id_per_seq = [
np.unique(np.append(unobserved_item_id, target))
for target in target_per_seq
]
user_id = np.concatenate([
np.repeat(u, len(item_id))
for u, item_id in zip(user_id, item_id_per_seq)
])
seq = np.concatenate([
np.repeat(s.reshape(1, -1), len(item_id), 0)
for s, item_id in zip(seq, item_id_per_seq)
])
item_id = np.concatenate(item_id_per_seq)
is_pos = np.isin(item_id, target_per_seq)
df.append(
pd.DataFrame({
"user_id": user_id,
"seq": list(seq),
"item_id": item_id,
"is_pos": is_pos}))
test_split.getitem_df = pd.concat(df).reset_index()
test_split.train = False
return train_split, test_split
def __len__(self):
assert self.getitem_df is not None
return len(self.getitem_df)
def __getitem__(self, idx):
assert self.getitem_df is not None
row = self.getitem_df.iloc[idx]
if self.train:
neg_item = np.random.choice(
self.unobserved_items_per_user_in_train[int(row.user_id)])
return row.user_id, row.seq, row.target_item, neg_item
else:
return row.user_id, row.seq, row.item_id, row.is_pos
| [
"torch.utils.data.random_split"
] | 1.10.2 | nntrongnghia/learn-recsys | 43505c2663255d10e900f4cb98553eb5058e0a09 |
1.4 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Implementation of ResNeXt (https://arxiv.org/pdf/1611.05431.pdf)
"""
import copy
import math
from typing import Any, Dict, List, Optional, Tuple, Union
import torch.nn as nn
from classy_vision.generic.util import is_pos_int
from . import register_model
from .classy_model import ClassyModel
from .squeeze_and_excitation_layer import SqueezeAndExcitationLayer
# global setting for in-place ReLU:
INPLACE = True
def conv3x3(in_planes, out_planes, stride=1, groups=1):
"""helper function for constructing 3x3 grouped convolution"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
groups=groups,
bias=False,
)
def conv1x1(in_planes, out_planes, stride=1):
"""helper function for constructing 1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class GenericLayer(nn.Module):
"""
Parent class for 2-layer (BasicLayer) and 3-layer (BottleneckLayer)
bottleneck layer class
"""
def __init__(
self,
convolutional_block,
in_planes,
out_planes,
stride=1,
mid_planes_and_cardinality=None,
reduction=4,
final_bn_relu=True,
use_se=False,
se_reduction_ratio=16,
):
# assertions on inputs:
assert is_pos_int(in_planes) and is_pos_int(out_planes)
assert is_pos_int(stride) and is_pos_int(reduction)
# set object fields:
super(GenericLayer, self).__init__()
self.convolutional_block = convolutional_block
self.final_bn_relu = final_bn_relu
# final batchnorm and relu layer:
if final_bn_relu:
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU(inplace=INPLACE)
# define down-sampling layer (if direct residual impossible):
self.downsample = None
if stride != 1 or in_planes != out_planes:
self.downsample = nn.Sequential(
conv1x1(in_planes, out_planes, stride=stride),
nn.BatchNorm2d(out_planes),
)
self.se = (
SqueezeAndExcitationLayer(out_planes, reduction_ratio=se_reduction_ratio)
if use_se
else None
)
def forward(self, x):
# if required, perform downsampling along shortcut connection:
if self.downsample is None:
residual = x
else:
residual = self.downsample(x)
# forward pass through convolutional block:
out = self.convolutional_block(x)
if self.final_bn_relu:
out = self.bn(out)
if self.se is not None:
out = self.se(out)
# add residual connection, perform rely + batchnorm, and return result:
out += residual
if self.final_bn_relu:
out = self.relu(out)
return out
class BasicLayer(GenericLayer):
"""
ResNeXt layer with `in_planes` input planes and `out_planes`
output planes.
"""
def __init__(
self,
in_planes,
out_planes,
stride=1,
mid_planes_and_cardinality=None,
reduction=4,
final_bn_relu=True,
use_se=False,
se_reduction_ratio=16,
):
# assertions on inputs:
assert is_pos_int(in_planes) and is_pos_int(out_planes)
assert is_pos_int(stride) and is_pos_int(reduction)
# define convolutional block:
convolutional_block = nn.Sequential(
conv3x3(in_planes, out_planes, stride=stride),
nn.BatchNorm2d(out_planes),
nn.ReLU(inplace=INPLACE),
conv3x3(out_planes, out_planes),
)
# call constructor of generic layer:
super().__init__(
convolutional_block,
in_planes,
out_planes,
stride=stride,
reduction=reduction,
final_bn_relu=final_bn_relu,
use_se=use_se,
se_reduction_ratio=se_reduction_ratio,
)
class BottleneckLayer(GenericLayer):
"""
ResNeXt bottleneck layer with `in_planes` input planes, `out_planes`
output planes, and a bottleneck `reduction`.
"""
def __init__(
self,
in_planes,
out_planes,
stride=1,
mid_planes_and_cardinality=None,
reduction=4,
final_bn_relu=True,
use_se=False,
se_reduction_ratio=16,
):
# assertions on inputs:
assert is_pos_int(in_planes) and is_pos_int(out_planes)
assert is_pos_int(stride) and is_pos_int(reduction)
# define convolutional layers:
bottleneck_planes = int(math.ceil(out_planes / reduction))
cardinality = 1
if mid_planes_and_cardinality is not None:
mid_planes, cardinality = mid_planes_and_cardinality
bottleneck_planes = mid_planes * cardinality
convolutional_block = nn.Sequential(
conv1x1(in_planes, bottleneck_planes),
nn.BatchNorm2d(bottleneck_planes),
nn.ReLU(inplace=INPLACE),
conv3x3(
bottleneck_planes, bottleneck_planes, stride=stride, groups=cardinality
),
nn.BatchNorm2d(bottleneck_planes),
nn.ReLU(inplace=INPLACE),
conv1x1(bottleneck_planes, out_planes),
)
# call constructor of generic layer:
super(BottleneckLayer, self).__init__(
convolutional_block,
in_planes,
out_planes,
stride=stride,
reduction=reduction,
final_bn_relu=final_bn_relu,
use_se=use_se,
se_reduction_ratio=se_reduction_ratio,
)
class SmallInputInitialBlock(nn.Module):
"""
ResNeXt initial block for small input with `in_planes` input planes
"""
def __init__(self, init_planes):
super().__init__()
self._module = nn.Sequential(
conv3x3(3, init_planes, stride=1),
nn.BatchNorm2d(init_planes),
nn.ReLU(inplace=INPLACE),
)
def forward(self, x):
return self._module(x)
class InitialBlock(nn.Module):
"""
ResNeXt initial block with `in_planes` input planes
"""
def __init__(self, init_planes):
super().__init__()
self._module = nn.Sequential(
nn.Conv2d(3, init_planes, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(init_planes),
nn.ReLU(inplace=INPLACE),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
def forward(self, x):
return self._module(x)
@register_model("resnext")
class ResNeXt(ClassyModel):
def __init__(
self,
num_blocks,
init_planes: int = 64,
reduction: int = 4,
small_input: bool = False,
zero_init_bn_residuals: bool = False,
base_width_and_cardinality: Optional[Union[Tuple, List]] = None,
basic_layer: bool = False,
final_bn_relu: bool = True,
bn_weight_decay: Optional[bool] = False,
use_se: bool = False,
se_reduction_ratio: int = 16,
):
"""
Implementation of `ResNeXt <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
small_input: set to `True` for 32x32 sized image inputs.
final_bn_relu: set to `False` to exclude the final batchnorm and
ReLU layers. These settings are useful when training Siamese
networks.
use_se: Enable squeeze and excitation
se_reduction_ratio: The reduction ratio to apply in the excitation
stage. Only used if `use_se` is `True`.
"""
super().__init__()
# assertions on inputs:
assert type(num_blocks) == list
assert all(is_pos_int(n) for n in num_blocks)
assert is_pos_int(init_planes) and is_pos_int(reduction)
assert type(small_input) == bool
assert type(bn_weight_decay) == bool
assert (
type(zero_init_bn_residuals) == bool
), "zero_init_bn_residuals must be a boolean, set to true if gamma of last\
BN of residual block should be initialized to 0.0, false for 1.0"
assert base_width_and_cardinality is None or (
isinstance(base_width_and_cardinality, (tuple, list))
and len(base_width_and_cardinality) == 2
and is_pos_int(base_width_and_cardinality[0])
and is_pos_int(base_width_and_cardinality[1])
)
assert isinstance(use_se, bool), "use_se has to be a boolean"
# Chooses whether to apply weight decay to batch norm
# parameters. This improves results in some situations,
# e.g. ResNeXt models trained / evaluated using the Imagenet
# dataset, but can cause worse performance in other scenarios
self.bn_weight_decay = bn_weight_decay
# initial convolutional block:
self.num_blocks = num_blocks
self.small_input = small_input
self._make_initial_block(small_input, init_planes, basic_layer)
# compute number of planes at each spatial resolution:
out_planes = [init_planes * 2 ** i * reduction for i in range(len(num_blocks))]
in_planes = [init_planes] + out_planes[:-1]
# create subnetworks for each spatial resolution:
blocks = []
for idx in range(len(out_planes)):
mid_planes_and_cardinality = None
if base_width_and_cardinality is not None:
w, c = base_width_and_cardinality
mid_planes_and_cardinality = (w * 2 ** idx, c)
new_block = self._make_resolution_block(
in_planes[idx],
out_planes[idx],
idx,
num_blocks[idx], # num layers
stride=1 if idx == 0 else 2,
mid_planes_and_cardinality=mid_planes_and_cardinality,
reduction=reduction,
final_bn_relu=final_bn_relu or (idx != (len(out_planes) - 1)),
use_se=use_se,
se_reduction_ratio=se_reduction_ratio,
)
blocks.append(nn.Sequential(*new_block))
self.blocks = nn.Sequential(*blocks)
self.out_planes = out_planes[-1]
self._num_classes = out_planes
# initialize weights:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Init BatchNorm gamma to 0.0 for last BN layer, it gets 0.2-0.3% higher
# final val top1 for larger batch sizes.
if zero_init_bn_residuals:
for m in self.modules():
if isinstance(m, GenericLayer):
if hasattr(m, "bn"):
nn.init.constant_(m.bn.weight, 0)
def _make_initial_block(self, small_input, init_planes, basic_layer):
if small_input:
self.initial_block = SmallInputInitialBlock(init_planes)
self.layer_type = BasicLayer
else:
self.initial_block = InitialBlock(init_planes)
self.layer_type = BasicLayer if basic_layer else BottleneckLayer
# helper function that creates ResNet blocks at single spatial resolution:
def _make_resolution_block(
self,
in_planes,
out_planes,
resolution_idx,
num_blocks,
stride=1,
mid_planes_and_cardinality=None,
reduction=4,
final_bn_relu=True,
use_se=False,
se_reduction_ratio=16,
):
# add the desired number of residual blocks:
blocks = []
for idx in range(num_blocks):
blocks.append(
self.build_attachable_block(
"block{}-{}".format(resolution_idx, idx),
self.layer_type(
in_planes if idx == 0 else out_planes,
out_planes,
stride=stride if idx == 0 else 1, # only first block has stride
mid_planes_and_cardinality=mid_planes_and_cardinality,
reduction=reduction,
final_bn_relu=final_bn_relu or (idx != (num_blocks - 1)),
use_se=use_se,
se_reduction_ratio=se_reduction_ratio,
),
)
)
return blocks
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ResNeXt":
"""Instantiates a ResNeXt from a configuration.
Args:
config: A configuration for a ResNeXt.
See :func:`__init__` for parameters expected in the config.
Returns:
A ResNeXt instance.
"""
assert "num_blocks" in config
config = {
"num_blocks": config["num_blocks"],
"init_planes": config.get("init_planes", 64),
"reduction": config.get("reduction", 4),
"base_width_and_cardinality": config.get("base_width_and_cardinality"),
"small_input": config.get("small_input", False),
"basic_layer": config.get("basic_layer", False),
"final_bn_relu": config.get("final_bn_relu", True),
"zero_init_bn_residuals": config.get("zero_init_bn_residuals", False),
"bn_weight_decay": config.get("bn_weight_decay", False),
"use_se": config.get("use_se", False),
"se_reduction_ratio": config.get("se_reduction_ratio", 16),
}
return cls(**config)
# forward pass in residual network:
def forward(self, x):
# initial convolutional block:
out = self.initial_block(x)
# evaluate all residual blocks:
# TODO: (kaizh) T43794289 exit early if there is no block that has heads
self.blocks(out)
# By default the classification layer is implemented as one head on top
# of the last block. The head is automatically computed right after the
# last block.
head_outputs = self.execute_heads()
if len(head_outputs) == 0:
raise Exception("Expecting at least one head that generates output")
elif len(head_outputs) == 1:
return list(head_outputs.values())[0]
else:
return head_outputs
def get_optimizer_params(self):
return super().get_optimizer_params(bn_weight_decay=self.bn_weight_decay)
@property
def input_shape(self):
if self.small_input:
return (3, 32, 32)
else:
return (3, 224, 224)
@property
def output_shape(self):
return (1, self._num_classes)
@property
def model_depth(self):
return sum(self.num_blocks)
class _ResNeXt(ResNeXt):
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ResNeXt":
config = copy.deepcopy(config)
config.pop("name")
if "heads" in config:
config.pop("heads")
return cls(**config)
@register_model("resnet18")
class ResNet18(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[2, 2, 2, 2],
basic_layer=True,
zero_init_bn_residuals=True,
**kwargs,
)
@register_model("resnet34")
class ResNet34(ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 4, 6, 3],
basic_layer=True,
zero_init_bn_residuals=True,
**kwargs,
)
@register_model("resnet50")
class ResNet50(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 4, 6, 3],
basic_layer=False,
zero_init_bn_residuals=True,
**kwargs,
)
@register_model("resnet101")
class ResNet101(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 4, 23, 3],
basic_layer=False,
zero_init_bn_residuals=True,
**kwargs,
)
@register_model("resnet152")
class ResNet152(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 8, 36, 3],
basic_layer=False,
zero_init_bn_residuals=True,
**kwargs,
)
# Note, the ResNeXt models all have weight decay enabled for the batch
# norm parameters. We have found empirically that this gives better
# results when training on ImageNet (~0.5pp of top-1 acc) and brings
# our results on track with reported ImageNet results...but for
# training on other datasets, we have observed losses in accuracy (for
# example, the dataset used in https://arxiv.org/abs/1805.00932).
@register_model("resnext50_32x4d")
class ResNeXt50(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 4, 6, 3],
basic_layer=False,
zero_init_bn_residuals=True,
base_width_and_cardinality=(4, 32),
bn_weight_decay=True,
**kwargs,
)
@register_model("resnext101_32x4d")
class ResNeXt101(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 4, 23, 3],
basic_layer=False,
zero_init_bn_residuals=True,
base_width_and_cardinality=(4, 32),
bn_weight_decay=True,
**kwargs,
)
@register_model("resnext152_32x4d")
class ResNeXt152(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 8, 36, 3],
basic_layer=False,
zero_init_bn_residuals=True,
base_width_and_cardinality=(4, 32),
bn_weight_decay=True,
**kwargs,
)
| [
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.4 | dendisuhubdy/ClassyVision | c7f8de4615181b5a14dd5ec44fa72bebb790e886 |
1.7 | from load_data import Data
import numpy as np
import torch
import time
from collections import defaultdict
from model import *
from torch.optim.lr_scheduler import ExponentialLR
import argparse
from tqdm import tqdm
import os
from prettytable import PrettyTable
class Experiment:
def __init__(self, learning_rate=0.0005, ent_vec_dim=200, rel_vec_dim=200,
num_iterations=500, batch_size=128, decay_rate=0., cuda=False,
input_dropout=0.3, hidden_dropout1=0.4, hidden_dropout2=0.5,
label_smoothing=0., outfile='tucker.model', valid_steps=1, loss_type='BCE', do_batch_norm=1,
dataset='', model='Rotat3', l3_reg = 0.0, load_from = ''):
self.dataset = dataset
self.learning_rate = learning_rate
self.ent_vec_dim = ent_vec_dim
self.rel_vec_dim = rel_vec_dim
self.num_iterations = num_iterations
self.batch_size = batch_size
self.decay_rate = decay_rate
self.label_smoothing = label_smoothing
self.cuda = cuda
self.outfile = outfile
self.valid_steps = valid_steps
self.model = model
self.l3_reg = l3_reg
self.loss_type = loss_type
self.load_from = load_from
if do_batch_norm == 1:
do_batch_norm = True
else:
do_batch_norm = False
self.kwargs = {"input_dropout": input_dropout, "hidden_dropout1": hidden_dropout1,
"hidden_dropout2": hidden_dropout2, "model": model, "loss_type": loss_type,
"do_batch_norm": do_batch_norm, "l3_reg": l3_reg}
def get_data_idxs(self, data):
'''
Returns triples in their idx form,
e.g.: (head_entity,relation,tail_entity) gets converted to (1,1,2)
'''
data_idxs = [(self.entity_idxs[data[i][0]], self.relation_idxs[data[i][1]], \
self.entity_idxs[data[i][2]]) for i in range(len(data))]
return data_idxs
def get_er_vocab(self, data):
"""
data =[[1,2,3],[1,2,3], [1,4,3]]
der_vocab : efaultdict(<class 'list'>, {(1, 2): [3, 3, 3], (1, 4): [3]})
returns er_vocab: (h,r):[t]
"""
er_vocab = defaultdict(list)
for triple in data:
er_vocab[(triple[0], triple[1])].append(triple[2])
return er_vocab
def get_batch(self, er_vocab, er_vocab_pairs, idx):
'''
Returns
1. batch: er_vocab_pairs(size:batch_size)
2. targets: batch_size*num_entities tensor with target label for each er_vocab pair
'''
batch = er_vocab_pairs[idx:idx+self.batch_size]
targets = torch.zeros([len(batch), len(d.entities)], dtype=torch.float32)
if self.cuda:
targets = targets.cuda()
for idx, pair in enumerate(batch):
targets[idx, er_vocab[pair]] = 1.
return np.array(batch), targets
def evaluate(self, model, data):
model.eval()
hits = []
ranks = []
for i in range(10):
hits.append([])
test_data_idxs = self.get_data_idxs(data)
er_vocab = self.get_er_vocab(self.get_data_idxs(d.data))
print("Number of data points: %d" % len(test_data_idxs))
for i in tqdm(range(0, len(test_data_idxs), self.batch_size)):
data_batch, _ = self.get_batch(er_vocab, test_data_idxs, i)
e1_idx = torch.tensor(data_batch[:,0])
r_idx = torch.tensor(data_batch[:,1])
e2_idx = torch.tensor(data_batch[:,2])
if self.cuda:
e1_idx = e1_idx.cuda()
r_idx = r_idx.cuda()
e2_idx = e2_idx.cuda()
predictions = model.forward(e1_idx, r_idx)
# following lines commented means RAW evaluation (not filtered)
for j in range(data_batch.shape[0]):
filt = er_vocab[(data_batch[j][0], data_batch[j][1])]
target_value = predictions[j,e2_idx[j]].item()
predictions[j, filt] = 0.0
predictions[j, e2_idx[j]] = target_value
sort_values, sort_idxs = torch.sort(predictions, dim=1, descending=True)
sort_idxs = sort_idxs.cpu().numpy()
for j in range(data_batch.shape[0]):
rank = np.where(sort_idxs[j]==e2_idx[j].item())[0][0]
ranks.append(rank+1)
for hits_level in range(10):
if rank <= hits_level:
hits[hits_level].append(1.0)
else:
hits[hits_level].append(0.0)
hitat10 = np.mean(hits[9])
hitat3 = np.mean(hits[2])
hitat1 = np.mean(hits[0])
meanrank = np.mean(ranks)
mrr = np.mean(1./np.array(ranks))
pretty_tbl = PrettyTable()
pretty_tbl.field_names = ["Metric", "Result"]
pretty_tbl.add_row(['Hits@10', hitat10])
pretty_tbl.add_row(['Hits@3', hitat3])
pretty_tbl.add_row(['Hits@1', hitat1])
pretty_tbl.add_row(['MeanRank', meanrank])
pretty_tbl.add_row(['MeanReciprocalRank', mrr])
print(pretty_tbl)
return [mrr, meanrank, hitat10, hitat3, hitat1]
def write_embedding_files(self, model):
model.eval()
model_folder = f"../kg_embeddings/{self.model}/{self.dataset}"
data_folder = "../data/%s/" % self.dataset
embedding_type = self.model
if(not os.path.exists(model_folder)):
os.makedirs(model_folder)
R_numpy = model.R.weight.data.cpu().numpy()
E_numpy = model.E.weight.data.cpu().numpy()
bn_list = []
for bn in [model.bn0, model.bn1, model.bn2]:
bn_weight = bn.weight.data.cpu().numpy()
bn_bias = bn.bias.data.cpu().numpy()
bn_running_mean = bn.running_mean.data.cpu().numpy()
bn_running_var = bn.running_var.data.cpu().numpy()
bn_numpy = {}
bn_numpy['weight'] = bn_weight
bn_numpy['bias'] = bn_bias
bn_numpy['running_mean'] = bn_running_mean
bn_numpy['running_var'] = bn_running_var
bn_list.append(bn_numpy)
if embedding_type == 'TuckER':
W_numpy = model.W.detach().cpu().numpy()
np.save(model_folder +'/E.npy', E_numpy)
np.save(model_folder +'/R.npy', R_numpy)
for i, bn in enumerate(bn_list):
np.save(model_folder + '/bn' + str(i) + '.npy', bn)
if embedding_type == 'TuckER':
np.save(model_folder +'/W.npy', W_numpy)
f = open(data_folder + '/entities.dict', 'r')
f2 = open(model_folder + '/entities.dict', 'w')
ents = {}
idx2ent = {}
for line in f:
line = line.rstrip().split('\t')
name = line[0]
id = int(line[1])
ents[name] = id
idx2ent[id] = name
f2.write(str(id) + '\t' + name + '\n')
f.close()
f2.close()
f = open(data_folder + '/relations.dict', 'r')
f2 = open(model_folder + '/relations.dict', 'w')
rels = {}
idx2rel = {}
for line in f:
line = line.strip().split('\t')
name = line[0]
id = int(line[1])
rels[name] = id
idx2rel[id] = name
f2.write(str(id) + '\t' + name + '\n')
f.close()
f2.close()
def train_and_eval(self, d):
torch.set_num_threads(2)
best_valid = [0, 0, 0, 0, 0]
best_test = [0, 0, 0, 0, 0]
self.entity_idxs = {d.entities[i]:i for i in range(len(d.entities))}
self.relation_idxs = {d.relations[i]:i for i in range(len(d.relations))}
f = open('../data/' + self.dataset +'/entities.dict', 'w')
for key, value in self.entity_idxs.items():
f.write(key + '\t' + str(value) +'\n')
f.close()
f = open('../data/' + self.dataset + '/relations.dict', 'w')
for key, value in self.relation_idxs.items():
f.write(key + '\t' + str(value) +'\n')
f.close()
train_data_idxs = self.get_data_idxs(d.train_data)
pretty_tbl = PrettyTable()
pretty_tbl.field_names = ["ARTIFACT", "SAMPLES"]
pretty_tbl.add_row(['#TrainingSamples', len(train_data_idxs)])
pretty_tbl.add_row(['#Entities', len(self.entity_idxs)])
pretty_tbl.add_row(['#Relations', len(self.relation_idxs)])
print(pretty_tbl)
model = KGE(d, self.ent_vec_dim, self.rel_vec_dim, **self.kwargs)
model.init()
if self.load_from != '':
fname = self.load_from
checkpoint = torch.load(fname)
model.load_state_dict(checkpoint)
if self.cuda:
model.cuda()
opt = torch.optim.Adam(model.parameters(), lr=self.learning_rate)
if self.decay_rate:
scheduler = ExponentialLR(opt, self.decay_rate)
er_vocab = self.get_er_vocab(train_data_idxs)
er_vocab_pairs = list(er_vocab.keys()) #list(er_vocab.keys())
print("Starting training...")
for it in range(1, self.num_iterations+1):
print(f"Iteration: {it}/{self.num_iterations}")
start_train = time.time()
model.train()
losses = []
np.random.shuffle(er_vocab_pairs)
for j in tqdm(range(0, len(er_vocab_pairs), self.batch_size)):
data_batch, targets = self.get_batch(er_vocab, er_vocab_pairs, j)
opt.zero_grad()
e1_idx = torch.tensor(data_batch[:,0])
r_idx = torch.tensor(data_batch[:,1])
if self.cuda:
e1_idx = e1_idx.cuda()
r_idx = r_idx.cuda()
predictions = model.forward(e1_idx, r_idx)
if self.label_smoothing:
targets = ((1.0-self.label_smoothing)*targets) + (1.0/targets.size(1))
loss = model.loss(predictions, targets)
loss.backward()
opt.step()
losses.append(loss.item())
if self.decay_rate:
scheduler.step()
if it%100 == 0:
print('Epoch', it, ' Epoch time', time.time()-start_train, ' Loss:', np.mean(losses))
model.eval()
with torch.no_grad():
if it % self.valid_steps == 0:
start_test = time.time()
print("Validation:")
valid = self.evaluate(model, d.valid_data)
print("Test:")
test = self.evaluate(model, d.test_data)
valid_mrr = valid[0]
test_mrr = test[0]
if valid_mrr >= best_valid[0]:
best_valid = valid
best_test = test
print('Validation MRR increased.')
print('Saving model...')
self.write_embedding_files(model)
print('Model saved!')
pretty_tbl = PrettyTable()
pretty_tbl.field_names = ["ARTIFACT", "VALUE"]
pretty_tbl.add_row(['Best valid', best_valid])
pretty_tbl.add_row(['Best test', best_test])
pretty_tbl.add_row(['Dataset', self.dataset])
pretty_tbl.add_row(['Model', self.model])
print(pretty_tbl)
print(f'Training-time: {round(time.time()-start_test,2)}')
pretty_tbl = PrettyTable()
pretty_tbl.field_names = ["Parameter", "Value"]
pretty_tbl.add_row(['Learning rate', self.learning_rate])
pretty_tbl.add_row(['Decay', self.decay_rate])
pretty_tbl.add_row(['Dim', self.ent_vec_dim])
pretty_tbl.add_row(['Input drop', self.kwargs["input_dropout"]])
pretty_tbl.add_row(['Hidden drop 2', self.kwargs["hidden_dropout2"]])
pretty_tbl.add_row(['Label Smoothing', self.label_smoothing])
pretty_tbl.add_row(['Batch size', self.batch_size])
pretty_tbl.add_row(['Loss type', self.loss_type])
pretty_tbl.add_row(['L3 reg', self.l3_reg])
print(pretty_tbl)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, default="FB15k-237", nargs="?",
help="Which dataset to use: FB15k, FB15k-237, WN18 or WN18RR.")
parser.add_argument("--num_iterations", type=int, default=500, nargs="?",
help="Number of iterations.")
parser.add_argument("--batch_size", type=int, default=128, nargs="?",
help="Batch size.")
parser.add_argument("--lr", type=float, default=0.0005, nargs="?",
help="Learning rate.")
parser.add_argument("--model", type=str, default='Rotat3', nargs="?",
help="Model.")
parser.add_argument("--dr", type=float, default=1.0, nargs="?",
help="Decay rate.")
parser.add_argument("--edim", type=int, default=200, nargs="?",
help="Entity embedding dimensionality.")
parser.add_argument("--rdim", type=int, default=200, nargs="?",
help="Relation embedding dimensionality.")
parser.add_argument("--cuda", type=bool, default=True, nargs="?",
help="Whether to use cuda (GPU) or not (CPU).")
parser.add_argument("--input_dropout", type=float, default=0.3, nargs="?",
help="Input layer dropout.")
parser.add_argument("--hidden_dropout1", type=float, default=0.4, nargs="?",
help="Dropout after the first hidden layer.")
parser.add_argument("--hidden_dropout2", type=float, default=0.5, nargs="?",
help="Dropout after the second hidden layer.")
parser.add_argument("--label_smoothing", type=float, default=0.1, nargs="?",
help="Amount of label smoothing.")
parser.add_argument("--outfile", type=str, default='tucker.model', nargs="?",
help="File to save")
parser.add_argument("--valid_steps", type=int, default=1, nargs="?",
help="Epochs before u validate")
parser.add_argument("--loss_type", type=str, default='BCE', nargs="?",
help="Loss type")
parser.add_argument("--do_batch_norm", type=int, default=1, nargs="?",
help="Do batch norm or not (0, 1)")
parser.add_argument("--l3_reg", type=float, default=0.0, nargs="?",
help="l3 reg hyperparameter")
parser.add_argument("--load_from", type=str, default='', nargs="?",
help="load from state dict")
args = parser.parse_args()
dataset = args.dataset
data_dir = f"../data/{dataset}/"
torch.backends.cudnn.deterministic = True
seed = 20
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available:
torch.cuda.manual_seed_all(seed)
experiment = Experiment(num_iterations=args.num_iterations, batch_size=args.batch_size, learning_rate=args.lr,
decay_rate=args.dr, ent_vec_dim=args.edim, rel_vec_dim=args.rdim, cuda=args.cuda,
input_dropout=args.input_dropout, hidden_dropout1=args.hidden_dropout1,
hidden_dropout2=args.hidden_dropout2, label_smoothing=args.label_smoothing, outfile=args.outfile,
valid_steps=args.valid_steps, loss_type=args.loss_type, do_batch_norm=args.do_batch_norm,
dataset=args.dataset, model=args.model, l3_reg=args.l3_reg, load_from=args.load_from)
d=Data(data_dir=data_dir, reverse=True)
experiment.train_and_eval(d)
| [
"torch.cuda.manual_seed_all",
"torch.no_grad",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.manual_seed",
"torch.tensor",
"torch.load",
"torch.sort",
"torch.set_num_threads"
] | 1.7.1 | jishnujayakumar/MLRC2020-EmbedKGQA | ee99b8c83e6278b2dd6f16e0ae910c80b28da251 |
1.7 | import torch
import torch.nn as nn
import torch.nn.utils
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from torch.nn.init import xavier_normal_
from transformers import *
import random
from helpers import *
class RelationExtractor(nn.Module):
def __init__(self, embedding_dim, relation_dim, num_entities, pretrained_embeddings, device, entdrop, reldrop, scoredrop, l3_reg, model, que_embedding_model, ls, do_batch_norm, freeze=True):
super(RelationExtractor, self).__init__()
self.device = device
self.model = model
self.freeze = freeze
self.label_smoothing = ls
self.l3_reg = l3_reg
self.do_batch_norm = do_batch_norm
if not self.do_batch_norm:
print('Not doing batch norm')
self.pre_trained_model_name = get_pretrained_model_name(que_embedding_model)
if que_embedding_model == 'RoBERTa':
self.que_embedding_model = RobertaModel.from_pretrained(self.pre_trained_model_name)
elif que_embedding_model == 'XLNet':
self.que_embedding_model = XLNetModel.from_pretrained(self.pre_trained_model_name)
elif que_embedding_model == 'ALBERT':
self.que_embedding_model = AlbertModel.from_pretrained(self.pre_trained_model_name)
elif que_embedding_model == 'SentenceTransformer':
self.que_embedding_model = AutoModel.from_pretrained(self.pre_trained_model_name)
elif que_embedding_model == 'Longformer':
self.que_embedding_model = LongformerModel.from_pretrained(self.pre_trained_model_name)
else:
print('Incorrect question embeddding model specified:', que_embedding_model)
exit(0)
for param in self.que_embedding_model.parameters():
param.requires_grad = True
if self.model == 'DistMult':
multiplier = 1
self.getScores = self.DistMult
elif self.model == 'SimplE':
multiplier = 2
self.getScores = self.SimplE
elif self.model == 'ComplEx':
multiplier = 2
self.getScores = self.ComplEx
elif self.model == 'TuckER':
# W_torch = torch.from_numpy(np.load(w_matrix))
# self.W = nn.Parameter(
# torch.Tensor(W_torch),
# requires_grad = not self.freeze
# )
self.W = nn.Parameter(torch.tensor(np.random.uniform(-1, 1, (relation_dim, relation_dim, relation_dim)),
dtype=torch.float, device="cuda", requires_grad=True))
multiplier = 1
self.getScores = self.TuckER
elif self.model == 'RESCAL':
self.getScores = self.RESCAL
multiplier = 1
else:
print('Incorrect model specified:', self.model)
exit(0)
print('Model is', self.model)
self.hidden_dim = 768
self.relation_dim = relation_dim * multiplier
if self.model == 'RESCAL':
self.relation_dim = relation_dim * relation_dim
self.num_entities = num_entities
# self.loss = torch.nn.BCELoss(reduction='sum')
self.loss = self.kge_loss
# best: all dropout 0
self.rel_dropout = torch.nn.Dropout(reldrop)
self.ent_dropout = torch.nn.Dropout(entdrop)
self.score_dropout = torch.nn.Dropout(scoredrop)
self.fcnn_dropout = torch.nn.Dropout(0.1)
# self.pretrained_embeddings = pretrained_embeddings
# random.shuffle(pretrained_embeddings)
# print(pretrained_embeddings[0])
print('Frozen:', self.freeze)
self.embedding = nn.Embedding.from_pretrained(torch.stack(pretrained_embeddings, dim=0), freeze=self.freeze)
# self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(pretrained_embeddings), freeze=self.freeze)
print(self.embedding.weight.shape)
# self.embedding = nn.Embedding(self.num_entities, self.relation_dim)
# self.embedding.weight.requires_grad = False
# xavier_normal_(self.embedding.weight.data)
self.mid1 = 512
self.mid2 = 512
self.mid3 = 512
self.mid4 = 512
# self.lin1 = nn.Linear(self.hidden_dim, self.mid1)
# self.lin2 = nn.Linear(self.mid1, self.mid2)
# self.lin3 = nn.Linear(self.mid2, self.mid3)
# self.lin4 = nn.Linear(self.mid3, self.mid4)
# self.hidden2rel = nn.Linear(self.mid4, self.relation_dim)
self.hidden2rel = nn.Linear(self.hidden_dim, self.relation_dim)
self.hidden2rel_base = nn.Linear(self.mid2, self.relation_dim)
if self.model in ['DistMult', 'TuckER', 'RESCAL', 'SimplE']:
self.bn0 = torch.nn.BatchNorm1d(self.embedding.weight.size(1))
self.bn2 = torch.nn.BatchNorm1d(self.embedding.weight.size(1))
else:
self.bn0 = torch.nn.BatchNorm1d(multiplier)
self.bn2 = torch.nn.BatchNorm1d(multiplier)
self.logsoftmax = torch.nn.LogSoftmax(dim=-1)
self._klloss = torch.nn.KLDivLoss(reduction='sum')
def set_bn_eval(self):
self.bn0.eval()
self.bn2.eval()
def kge_loss(self, scores, targets):
# loss = torch.mean(scores*targets)
return self._klloss(
F.log_softmax(scores, dim=1), F.normalize(targets.float(), p=1, dim=1)
)
def applyNonLinear(self, outputs):
# outputs = self.fcnn_dropout(self.lin1(outputs))
# outputs = F.relu(outputs)
# outputs = self.fcnn_dropout(self.lin2(outputs))
# outputs = F.relu(outputs)
# outputs = self.lin3(outputs)
# outputs = F.relu(outputs)
# outputs = self.lin4(outputs)
# outputs = F.relu(outputs)
outputs = self.hidden2rel(outputs)
# outputs = self.hidden2rel_base(outputs)
return outputs
def TuckER(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
x = head.view(-1, 1, head.size(1))
W_mat = torch.mm(relation, self.W.view(relation.size(1), -1))
W_mat = W_mat.view(-1, head.size(1), head.size(1))
W_mat = self.rel_dropout(W_mat)
x = torch.bmm(x, W_mat)
x = x.view(-1, head.size(1))
x = self.bn2(x)
x = self.score_dropout(x)
x = torch.mm(x, self.embedding.weight.transpose(1,0))
pred = torch.sigmoid(x)
return pred
def RESCAL(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
ent_dim = head.size(1)
head = head.view(-1, 1, ent_dim)
relation = relation.view(-1, ent_dim, ent_dim)
relation = self.rel_dropout(relation)
x = torch.bmm(head, relation)
x = x.view(-1, ent_dim)
x = self.bn2(x)
x = self.score_dropout(x)
x = torch.mm(x, self.embedding.weight.transpose(1,0))
pred = torch.sigmoid(x)
return pred
def DistMult(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
relation = self.rel_dropout(relation)
s = head * relation
s = self.bn2(s)
s = self.score_dropout(s)
ans = torch.mm(s, self.embedding.weight.transpose(1,0))
pred = torch.sigmoid(ans)
return pred
def SimplE(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
relation = self.rel_dropout(relation)
s = head * relation
s_head, s_tail = torch.chunk(s, 2, dim=1)
s = torch.cat([s_tail, s_head], dim=1)
s = self.bn2(s)
s = self.score_dropout(s)
s = torch.mm(s, self.embedding.weight.transpose(1,0))
s = 0.5 * s
pred = torch.sigmoid(s)
return pred
def ComplEx(self, head, relation):
head = torch.stack(list(torch.chunk(head, 2, dim=1)), dim=1)
if self.do_batch_norm:
head = self.bn0(head)
head = self.ent_dropout(head)
relation = self.rel_dropout(relation)
head = head.permute(1, 0, 2)
re_head = head[0]
im_head = head[1]
re_relation, im_relation = torch.chunk(relation, 2, dim=1)
re_tail, im_tail = torch.chunk(self.embedding.weight, 2, dim =1)
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
score = torch.stack([re_score, im_score], dim=1)
if self.do_batch_norm:
score = self.bn2(score)
score = self.score_dropout(score)
score = score.permute(1, 0, 2)
re_score = score[0]
im_score = score[1]
score = torch.mm(re_score, re_tail.transpose(1,0)) + torch.mm(im_score, im_tail.transpose(1,0))
# pred = torch.sigmoid(score)
pred = score
return pred
def getQuestionEmbedding(self, question_tokenized, attention_mask):
if self.que_embedding_model == "SentenceTransformer":
with torch.no_grad():
model_output = self.que_embedding_model(question_tokenized, attention_mask)
# model_output = model(**encoded_input)
question_embedding = mean_pooling(model_output, attention_mask)
return question_embedding[0]
else:
last_hidden_states = self.que_embedding_model(
question_tokenized,
attention_mask=attention_mask).last_hidden_state
states = last_hidden_states.transpose(1,0)
cls_embedding = states[0]
question_embedding = cls_embedding
question_embedding = torch.mean(last_hidden_states, dim=1)
return question_embedding
def forward(self, question_tokenized, attention_mask, p_head, p_tail):
question_embedding = self.getQuestionEmbedding(question_tokenized, attention_mask)
rel_embedding = self.applyNonLinear(question_embedding)
p_head = self.embedding(p_head)
pred = self.getScores(p_head, rel_embedding)
actual = p_tail
if self.label_smoothing:
actual = ((1.0-self.label_smoothing)*actual) + (1.0/actual.size(1))
loss = self.loss(pred, actual)
if not self.freeze:
if self.l3_reg:
norm = torch.norm(self.embedding.weight, p=3, dim=-1)
loss = loss + self.l3_reg * torch.sum(norm)
return loss
def get_score_ranked(self, head, question_tokenized, attention_mask):
question_embedding = self.getQuestionEmbedding(question_tokenized.unsqueeze(0), attention_mask.unsqueeze(0))
rel_embedding = self.applyNonLinear(question_embedding)
head = self.embedding(head).unsqueeze(0)
scores = self.getScores(head, rel_embedding)
# top2 = torch.topk(scores, k=2, largest=True, sorted=True)
# return top2
return scores
| [
"torch.nn.Linear",
"torch.nn.LogSoftmax",
"torch.nn.Dropout",
"torch.sigmoid",
"torch.cat",
"torch.stack",
"torch.norm",
"torch.no_grad",
"torch.bmm",
"torch.nn.functional.log_softmax",
"torch.nn.BatchNorm1d",
"torch.nn.KLDivLoss",
"torch.mean",
"torch.chunk",
"torch.sum"
] | 1.7.1 | jishnujayakumar/MLRC2020-EmbedKGQA | ee99b8c83e6278b2dd6f16e0ae910c80b28da251 |
1.3 | import torch
from torch import nn
from torch.nn.functional import fold, unfold
from . import norms, activations
from .norms import GlobLN, CumLN
from ..utils import has_arg
class SingleRNN(nn.Module):
""" Module for a RNN block.
Inspired from https://github.com/yluo42/TAC/blob/master/utility/models.py
Licensed under CC BY-NC-SA 3.0 US.
Args:
rnn_type (str): Select from ``'RNN'``, ``'LSTM'``, ``'GRU'``. Can
also be passed in lowercase letters.
input_size (int): Dimension of the input feature. The input should have
shape [batch, seq_len, input_size].
hidden_size (int): Dimension of the hidden state.
n_layers (int, optional): Number of layers used in RNN. Default is 1.
dropout (float, optional): Dropout ratio. Default is 0.
bidirectional (bool, optional): Whether the RNN layers are
bidirectional. Default is ``False``.
"""
def __init__(
self, rnn_type, input_size, hidden_size, n_layers=1, dropout=0, bidirectional=False
):
super(SingleRNN, self).__init__()
assert rnn_type.upper() in ["RNN", "LSTM", "GRU"]
rnn_type = rnn_type.upper()
self.rnn_type = rnn_type
self.input_size = input_size
self.hidden_size = hidden_size
self.rnn = getattr(nn, rnn_type)(
input_size,
hidden_size,
num_layers=n_layers,
dropout=dropout,
batch_first=True,
bidirectional=bool(bidirectional),
)
def forward(self, inp):
""" Input shape [batch, seq, feats] """
self.rnn.flatten_parameters() # Enables faster multi-GPU training.
output = inp
rnn_output, _ = self.rnn(output)
return rnn_output
class StackedResidualRNN(nn.Module):
""" Stacked RNN with builtin residual connection.
Only supports forward RNNs.
See StackedResidualBiRNN for bidirectional ones.
Args:
rnn_type (str): Select from ``'RNN'``, ``'LSTM'``, ``'GRU'``. Can
also be passed in lowercase letters.
n_units (int): Number of units in recurrent layers. This will also be
the expected input size.
n_layers (int): Number of recurrent layers.
dropout (float): Dropout value, between 0. and 1. (Default: 0.)
bidirectional (bool): If True, use bidirectional RNN, else
unidirectional. (Default: False)
"""
def __init__(self, rnn_type, n_units, n_layers=4, dropout=0.0, bidirectional=False):
super(StackedResidualRNN, self).__init__()
self.rnn_type = rnn_type
self.n_units = n_units
self.n_layers = n_layers
self.dropout = dropout
assert bidirectional is False, "Bidirectional not supported yet"
self.bidirectional = bidirectional
self.layers = nn.ModuleList()
for _ in range(n_layers):
self.layers.append(
SingleRNN(
rnn_type, input_size=n_units, hidden_size=n_units, bidirectional=bidirectional
)
)
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, x):
""" Builtin residual connections + dropout applied before residual.
Input shape : [batch, time_axis, feat_axis]
"""
for rnn in self.layers:
rnn_out = rnn(x)
dropped_out = self.dropout_layer(rnn_out)
x = x + dropped_out
return x
class StackedResidualBiRNN(nn.Module):
""" Stacked Bidirectional RNN with builtin residual connection.
Residual connections are applied on both RNN directions.
Only supports bidiriectional RNNs.
See StackedResidualRNN for unidirectional ones.
Args:
rnn_type (str): Select from ``'RNN'``, ``'LSTM'``, ``'GRU'``. Can
also be passed in lowercase letters.
n_units (int): Number of units in recurrent layers. This will also be
the expected input size.
n_layers (int): Number of recurrent layers.
dropout (float): Dropout value, between 0. and 1. (Default: 0.)
bidirectional (bool): If True, use bidirectional RNN, else
unidirectional. (Default: False)
"""
def __init__(self, rnn_type, n_units, n_layers=4, dropout=0.0, bidirectional=True):
super().__init__()
self.rnn_type = rnn_type
self.n_units = n_units
self.n_layers = n_layers
self.dropout = dropout
assert bidirectional is True, "Only bidirectional not supported yet"
self.bidirectional = bidirectional
# The first layer has as many units as input size
self.first_layer = SingleRNN(
rnn_type, input_size=n_units, hidden_size=n_units, bidirectional=bidirectional
)
# As the first layer outputs 2*n_units, the following layers need
# 2*n_units as input size
self.layers = nn.ModuleList()
for i in range(n_layers - 1):
input_size = 2 * n_units
self.layers.append(
SingleRNN(
rnn_type,
input_size=input_size,
hidden_size=n_units,
bidirectional=bidirectional,
)
)
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, x):
""" Builtin residual connections + dropout applied before residual.
Input shape : [batch, time_axis, feat_axis]
"""
# First layer
rnn_out = self.first_layer(x)
dropped_out = self.dropout_layer(rnn_out)
x = torch.cat([x, x], dim=-1) + dropped_out
# Rest of the layers
for rnn in self.layers:
rnn_out = rnn(x)
dropped_out = self.dropout_layer(rnn_out)
x = x + dropped_out
return x
class DPRNNBlock(nn.Module):
""" Dual-Path RNN Block as proposed in [1].
Args:
in_chan (int): Number of input channels.
hid_size (int): Number of hidden neurons in the RNNs.
norm_type (str, optional): Type of normalization to use. To choose from
- ``'gLN'``: global Layernorm
- ``'cLN'``: channelwise Layernorm
bidirectional (bool, optional): True for bidirectional Inter-Chunk RNN.
rnn_type (str, optional): Type of RNN used. Choose from ``'RNN'``,
``'LSTM'`` and ``'GRU'``.
num_layers (int, optional): Number of layers used in each RNN.
dropout (float, optional): Dropout ratio. Must be in [0, 1].
References:
[1] "Dual-path RNN: efficient long sequence modeling for
time-domain single-channel speech separation", Yi Luo, Zhuo Chen
and Takuya Yoshioka. https://arxiv.org/abs/1910.06379
"""
def __init__(
self,
in_chan,
hid_size,
norm_type="gLN",
bidirectional=True,
rnn_type="LSTM",
num_layers=1,
dropout=0,
):
super(DPRNNBlock, self).__init__()
# IntraRNN and linear projection layer (always bi-directional)
self.intra_RNN = SingleRNN(
rnn_type, in_chan, hid_size, num_layers, dropout=dropout, bidirectional=True
)
self.intra_linear = nn.Linear(hid_size * 2, in_chan)
self.intra_norm = norms.get(norm_type)(in_chan)
# InterRNN block and linear projection layer (uni or bi-directional)
self.inter_RNN = SingleRNN(
rnn_type, in_chan, hid_size, num_layers, dropout=dropout, bidirectional=bidirectional
)
num_direction = int(bidirectional) + 1
self.inter_linear = nn.Linear(hid_size * num_direction, in_chan)
self.inter_norm = norms.get(norm_type)(in_chan)
def forward(self, x):
""" Input shape : [batch, feats, chunk_size, num_chunks] """
B, N, K, L = x.size()
output = x # for skip connection
# Intra-chunk processing
x = x.transpose(1, -1).reshape(B * L, K, N)
x = self.intra_RNN(x)
x = self.intra_linear(x)
x = x.reshape(B, L, K, N).transpose(1, -1)
x = self.intra_norm(x)
output = output + x
# Inter-chunk processing
x = output.transpose(1, 2).transpose(2, -1).reshape(B * K, L, N)
x = self.inter_RNN(x)
x = self.inter_linear(x)
x = x.reshape(B, K, L, N).transpose(1, -1).transpose(2, -1)
x = self.inter_norm(x)
return output + x
class DPRNN(nn.Module):
""" Dual-path RNN Network for Single-Channel Source Separation
introduced in [1].
Args:
in_chan (int): Number of input filters.
n_src (int): Number of masks to estimate.
out_chan (int or None): Number of bins in the estimated masks.
Defaults to `in_chan`.
bn_chan (int): Number of channels after the bottleneck.
Defaults to 128.
hid_size (int): Number of neurons in the RNNs cell state.
Defaults to 128.
chunk_size (int): window size of overlap and add processing.
Defaults to 100.
hop_size (int or None): hop size (stride) of overlap and add processing.
Default to `chunk_size // 2` (50% overlap).
n_repeats (int): Number of repeats. Defaults to 6.
norm_type (str, optional): Type of normalization to use. To choose from
- ``'gLN'``: global Layernorm
- ``'cLN'``: channelwise Layernorm
mask_act (str, optional): Which non-linear function to generate mask.
bidirectional (bool, optional): True for bidirectional Inter-Chunk RNN
(Intra-Chunk is always bidirectional).
rnn_type (str, optional): Type of RNN used. Choose between ``'RNN'``,
``'LSTM'`` and ``'GRU'``.
num_layers (int, optional): Number of layers in each RNN.
dropout (float, optional): Dropout ratio, must be in [0,1].
References:
[1] "Dual-path RNN: efficient long sequence modeling for
time-domain single-channel speech separation", Yi Luo, Zhuo Chen
and Takuya Yoshioka. https://arxiv.org/abs/1910.06379
"""
def __init__(
self,
in_chan,
n_src,
out_chan=None,
bn_chan=128,
hid_size=128,
chunk_size=100,
hop_size=None,
n_repeats=6,
norm_type="gLN",
mask_act="relu",
bidirectional=True,
rnn_type="LSTM",
num_layers=1,
dropout=0,
):
super(DPRNN, self).__init__()
self.in_chan = in_chan
out_chan = out_chan if out_chan is not None else in_chan
self.out_chan = out_chan
self.bn_chan = bn_chan
self.hid_size = hid_size
self.chunk_size = chunk_size
hop_size = hop_size if hop_size is not None else chunk_size // 2
self.hop_size = hop_size
self.n_repeats = n_repeats
self.n_src = n_src
self.norm_type = norm_type
self.mask_act = mask_act
self.bidirectional = bidirectional
self.rnn_type = rnn_type
self.num_layers = num_layers
self.dropout = dropout
layer_norm = norms.get(norm_type)(in_chan)
bottleneck_conv = nn.Conv1d(in_chan, bn_chan, 1)
self.bottleneck = nn.Sequential(layer_norm, bottleneck_conv)
# Succession of DPRNNBlocks.
net = []
for x in range(self.n_repeats):
net += [
DPRNNBlock(
bn_chan,
hid_size,
norm_type=norm_type,
bidirectional=bidirectional,
rnn_type=rnn_type,
num_layers=num_layers,
dropout=dropout,
)
]
self.net = nn.Sequential(*net)
# Masking in 3D space
net_out_conv = nn.Conv2d(bn_chan, n_src * bn_chan, 1)
self.first_out = nn.Sequential(nn.PReLU(), net_out_conv)
# Gating and masking in 2D space (after fold)
self.net_out = nn.Sequential(nn.Conv1d(bn_chan, bn_chan, 1), nn.Tanh())
self.net_gate = nn.Sequential(nn.Conv1d(bn_chan, bn_chan, 1), nn.Sigmoid())
self.mask_net = nn.Conv1d(bn_chan, out_chan, 1, bias=False)
# Get activation function.
mask_nl_class = activations.get(mask_act)
# For softmax, feed the source dimension.
if has_arg(mask_nl_class, "dim"):
self.output_act = mask_nl_class(dim=1)
else:
self.output_act = mask_nl_class()
def forward(self, mixture_w):
"""
Args:
mixture_w (:class:`torch.Tensor`): Tensor of shape
[batch, n_filters, n_frames]
Returns:
:class:`torch.Tensor`
estimated mask of shape [batch, n_src, n_filters, n_frames]
"""
batch, n_filters, n_frames = mixture_w.size()
output = self.bottleneck(mixture_w) # [batch, bn_chan, n_frames]
output = unfold(
output.unsqueeze(-1),
kernel_size=(self.chunk_size, 1),
padding=(self.chunk_size, 0),
stride=(self.hop_size, 1),
)
n_chunks = output.size(-1)
output = output.reshape(batch, self.bn_chan, self.chunk_size, n_chunks)
# Apply stacked DPRNN Blocks sequentially
output = self.net(output)
# Map to sources with kind of 2D masks
output = self.first_out(output)
output = output.reshape(batch * self.n_src, self.bn_chan, self.chunk_size, n_chunks)
# Overlap and add:
# [batch, out_chan, chunk_size, n_chunks] -> [batch, out_chan, n_frames]
to_unfold = self.bn_chan * self.chunk_size
output = fold(
output.reshape(batch * self.n_src, to_unfold, n_chunks),
(n_frames, 1),
kernel_size=(self.chunk_size, 1),
padding=(self.chunk_size, 0),
stride=(self.hop_size, 1),
)
# Apply gating
output = output.reshape(batch * self.n_src, self.bn_chan, -1)
output = self.net_out(output) * self.net_gate(output)
# Compute mask
score = self.mask_net(output)
est_mask = self.output_act(score)
est_mask = est_mask.view(batch, self.n_src, self.out_chan, n_frames)
return est_mask
def get_config(self):
config = {
"in_chan": self.in_chan,
"out_chan": self.out_chan,
"bn_chan": self.bn_chan,
"hid_size": self.hid_size,
"chunk_size": self.chunk_size,
"hop_size": self.hop_size,
"n_repeats": self.n_repeats,
"n_src": self.n_src,
"norm_type": self.norm_type,
"mask_act": self.mask_act,
"bidirectional": self.bidirectional,
"rnn_type": self.rnn_type,
"num_layers": self.num_layers,
"dropout": self.dropout,
}
return config
class LSTMMasker(nn.Module):
""" LSTM mask network introduced in [1], without skip connections.
Args:
in_chan (int): Number of input filters.
n_src (int): Number of masks to estimate.
out_chan (int or None): Number of bins in the estimated masks.
Defaults to `in_chan`.
rnn_type (str, optional): Type of RNN used. Choose between ``'RNN'``,
``'LSTM'`` and ``'GRU'``.
n_layers (int, optional): Number of layers in each RNN.
hid_size (int): Number of neurons in the RNNs cell state.
mask_act (str, optional): Which non-linear function to generate mask.
bidirectional (bool, optional): Whether to use BiLSTM
dropout (float, optional): Dropout ratio, must be in [0,1].
References:
[1]: Yi Luo et al. "Real-time Single-channel Dereverberation and Separation
with Time-domain Audio Separation Network", Interspeech 2018
"""
def __init__(
self,
in_chan,
n_src,
out_chan=None,
rnn_type="lstm",
n_layers=4,
hid_size=512,
dropout=0.3,
mask_act="sigmoid",
bidirectional=True,
):
super().__init__()
self.in_chan = in_chan
self.n_src = n_src
out_chan = out_chan if out_chan is not None else in_chan
self.out_chan = out_chan
self.rnn_type = rnn_type
self.n_layers = n_layers
self.hid_size = hid_size
self.dropout = dropout
self.mask_act = mask_act
self.bidirectional = bidirectional
# Get activation function.
mask_nl_class = activations.get(mask_act)
# For softmax, feed the source dimension.
if has_arg(mask_nl_class, "dim"):
self.output_act = mask_nl_class(dim=1)
else:
self.output_act = mask_nl_class()
# Create TasNet masker
out_size = hid_size * (int(bidirectional) + 1)
if bidirectional:
self.bn_layer = GlobLN(in_chan)
else:
self.bn_layer = CumLN(in_chan)
self.masker = nn.Sequential(
SingleRNN(
"lstm",
in_chan,
hidden_size=hid_size,
n_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout,
),
nn.Linear(out_size, self.n_src * out_chan),
self.output_act,
)
def forward(self, x):
batch_size = x.shape[0]
to_sep = self.bn_layer(x)
est_masks = self.masker(to_sep.transpose(-1, -2)).transpose(-1, -2)
est_masks = est_masks.view(batch_size, self.n_src, self.out_chan, -1)
return est_masks
def get_config(self):
config = {
"in_chan": self.in_chan,
"n_src": self.n_src,
"out_chan": self.out_chan,
"rnn_type": self.rnn_type,
"n_layers": self.n_layers,
"hid_size": self.hid_size,
"dropout": self.dropout,
"mask_act": self.mask_act,
"bidirectional": self.bidirectional,
}
return config
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.Sigmoid",
"torch.nn.Conv2d",
"torch.nn.PReLU"
] | 1.3.0 | DizzyProtos/asteroid | bb3c374fefe7525c2f6da355834e470d45b45b90 |
1.3 | import torch
import numpy as np
from torch import nn
import torchvision
import torch.nn.functional as F
from pathlib import Path
from asteroid import torch_utils
from asteroid.engine.optimizers import make_optimizer
from asteroid.filterbanks import transforms
# Reference: https://github.com/bill9800/speech_separation/blob/master/model/lib/utils.py
def generate_cRM(Y, S):
"""Generate CRM.
Args:
Y (torch.Tensor): mixed/noisy stft.
S (torch.Tensor): clean stft.
Returns:
M (torch.Tensor): structed cRM.
"""
M = torch.zeros(Y.shape)
epsilon = 1e-8
# real part
M_real = (Y[..., 0] * S[..., 0]) + (Y[..., 1] * S[..., 1])
square_real = (Y[..., 0] ** 2) + (Y[..., 1] ** 2)
M_real = M_real / (square_real + epsilon)
M[..., 0] = M_real
# imaginary part
M_img = (Y[..., 0] * S[..., 1]) - (Y[..., 1] * S[..., 0])
square_img = (Y[..., 0] ** 2) + (Y[..., 1] ** 2)
M_img = M_img / (square_img + epsilon)
M[..., 1] = M_img
return M
def cRM_tanh_compress(M, K=10, C=0.1):
"""CRM tanh compress.
Args:
M (torch.Tensor): crm.
K (torch.Tensor): parameter to control the compression.
C (torch.Tensor): parameter to control the compression.
Returns:
crm (torch.Tensor): compressed crm.
"""
numerator = 1 - torch.exp(-C * M)
numerator[numerator == inf] = 1
numerator[numerator == -inf] = -1
denominator = 1 + torch.exp(-C * M)
denominator[denominator == inf] = 1
denominator[denominator == -inf] = -1
crm = K * (numerator / denominator)
return crm
def cRM_tanh_recover(O, K=10, C=0.1):
"""CRM tanh recover.
Args:
O (torch.Tensor): predicted compressed crm.
K (torch.Tensor): parameter to control the compression.
C (torch.Tensor): parameter to control the compression.
Returns:
M (torch.Tensor): uncompressed crm.
"""
numerator = K - O
denominator = K + O
M = -((1.0 / C) * torch.log((numerator / denominator)))
return M
def fast_cRM(Fclean, Fmix, K=10, C=0.1):
"""Fast CRM.
Args:
Fmix (torch.Tensor): mixed/noisy stft.
Fclean (torch.Tensor): clean stft.
K (torch.Tensor): parameter to control the compression.
C (torch.Tensor): parameter to control the compression.
Returns:
crm (torch.Tensor): compressed crm.
"""
M = generate_cRM(Fmix, Fclean)
crm = cRM_tanh_compress(M, K, C)
return crm
def fast_icRM(Y, crm, K=10, C=0.1):
"""fast iCRM.
Args:
Y (torch.Tensor): mixed/noised stft.
crm (torch.Tensor): DNN output of compressed crm.
K (torch.Tensor): parameter to control the compression.
C (torch.Tensor): parameter to control the compression.
Returns:
S (torch.Tensor): clean stft.
"""
M = cRM_tanh_recover(crm, K, C)
S = torch.zeros(M.shape)
S[:, 0, ...] = (M[:, 0, ...] * Y[:, 0, ...]) - (M[:, 1, ...] * Y[:, 1, ...])
S[:, 1, ...] = (M[:, 0, ...] * Y[:, 1, ...]) + (M[:, 1, ...] * Y[:, 0, ...])
return S
class Audio_Model(nn.Module):
def __init__(self, last_shape=8):
super(Audio_Model, self).__init__()
# Audio model layers , name of layers as per table 1 given in paper.
self.conv1 = nn.Conv2d(
2, 96, kernel_size=(1, 7), padding=self.get_padding((1, 7), (1, 1)), dilation=(1, 1),
)
self.conv2 = nn.Conv2d(
96, 96, kernel_size=(7, 1), padding=self.get_padding((7, 1), (1, 1)), dilation=(1, 1),
)
self.conv3 = nn.Conv2d(
96, 96, kernel_size=(5, 5), padding=self.get_padding((5, 5), (1, 1)), dilation=(1, 1),
)
self.conv4 = nn.Conv2d(
96, 96, kernel_size=(5, 5), padding=self.get_padding((5, 5), (2, 1)), dilation=(2, 1),
)
self.conv5 = nn.Conv2d(
96, 96, kernel_size=(5, 5), padding=self.get_padding((5, 5), (4, 1)), dilation=(4, 1),
)
self.conv6 = nn.Conv2d(
96, 96, kernel_size=(5, 5), padding=self.get_padding((5, 5), (8, 1)), dilation=(8, 1),
)
self.conv7 = nn.Conv2d(
96, 96, kernel_size=(5, 5), padding=self.get_padding((5, 5), (16, 1)), dilation=(16, 1),
)
self.conv8 = nn.Conv2d(
96, 96, kernel_size=(5, 5), padding=self.get_padding((5, 5), (32, 1)), dilation=(32, 1),
)
self.conv9 = nn.Conv2d(
96, 96, kernel_size=(5, 5), padding=self.get_padding((5, 5), (1, 1)), dilation=(1, 1),
)
self.conv10 = nn.Conv2d(
96, 96, kernel_size=(5, 5), padding=self.get_padding((5, 5), (2, 2)), dilation=(2, 2),
)
self.conv11 = nn.Conv2d(
96, 96, kernel_size=(5, 5), padding=self.get_padding((5, 5), (4, 4)), dilation=(4, 4),
)
self.conv12 = nn.Conv2d(
96, 96, kernel_size=(5, 5), padding=self.get_padding((5, 5), (8, 8)), dilation=(8, 8),
)
self.conv13 = nn.Conv2d(
96,
96,
kernel_size=(5, 5),
padding=self.get_padding((5, 5), (16, 16)),
dilation=(16, 16),
)
self.conv14 = nn.Conv2d(
96,
96,
kernel_size=(5, 5),
padding=self.get_padding((5, 5), (32, 32)),
dilation=(32, 32),
)
self.conv15 = nn.Conv2d(
96,
last_shape,
kernel_size=(1, 1),
padding=self.get_padding((1, 1), (1, 1)),
dilation=(1, 1),
)
# Batch normalization layers
self.batch_norm1 = nn.BatchNorm2d(96)
self.batch_norm2 = nn.BatchNorm2d(96)
self.batch_norm3 = nn.BatchNorm2d(96)
self.batch_norm4 = nn.BatchNorm2d(96)
self.batch_norm5 = nn.BatchNorm2d(96)
self.batch_norm6 = nn.BatchNorm2d(96)
self.batch_norm7 = nn.BatchNorm2d(96)
self.batch_norm8 = nn.BatchNorm2d(96)
self.batch_norm9 = nn.BatchNorm2d(96)
self.batch_norm10 = nn.BatchNorm2d(96)
self.batch_norm11 = nn.BatchNorm2d(96)
self.batch_norm11 = nn.BatchNorm2d(96)
self.batch_norm12 = nn.BatchNorm2d(96)
self.batch_norm13 = nn.BatchNorm2d(96)
self.batch_norm14 = nn.BatchNorm2d(96)
self.batch_norm15 = nn.BatchNorm2d(last_shape)
def get_padding(self, kernel_size, dilation):
padding = (
((dilation[0]) * (kernel_size[0] - 1)) // 2,
((dilation[1]) * (kernel_size[1] - 1)) // 2,
)
return padding
def forward(self, input_audio):
# input audio will be (2,298,257)
output_layer = F.relu(self.batch_norm1(self.conv1(input_audio)))
output_layer = F.relu(self.batch_norm2(self.conv2(output_layer)))
output_layer = F.relu(self.batch_norm3(self.conv3(output_layer)))
output_layer = F.relu(self.batch_norm4(self.conv4(output_layer)))
output_layer = F.relu(self.batch_norm5(self.conv5(output_layer)))
output_layer = F.relu(self.batch_norm6(self.conv6(output_layer)))
output_layer = F.relu(self.batch_norm7(self.conv7(output_layer)))
output_layer = F.relu(self.batch_norm8(self.conv8(output_layer)))
output_layer = F.relu(self.batch_norm9(self.conv9(output_layer)))
output_layer = F.relu(self.batch_norm10(self.conv10(output_layer)))
output_layer = F.relu(self.batch_norm11(self.conv11(output_layer)))
output_layer = F.relu(self.batch_norm12(self.conv12(output_layer)))
output_layer = F.relu(self.batch_norm13(self.conv13(output_layer)))
output_layer = F.relu(self.batch_norm14(self.conv14(output_layer)))
output_layer = F.relu(self.batch_norm15(self.conv15(output_layer)))
# output_layer will be (N,8,298,257)
# we want it to be (N,8*257,298,1)
batch_size = output_layer.size(0) # N
height = output_layer.size(2) # 298
output_layer = output_layer.transpose(-1, -2).reshape((batch_size, -1, height, 1))
return output_layer
class Video_Model(nn.Module):
def __init__(self, last_shape=256):
super(Video_Model, self).__init__()
self.conv1 = nn.Conv2d(
512, 256, kernel_size=(7, 1), padding=self.get_padding((7, 1), (1, 1)), dilation=(1, 1),
)
self.conv2 = nn.Conv2d(
256, 256, kernel_size=(5, 1), padding=self.get_padding((5, 1), (1, 1)), dilation=(1, 1),
)
self.conv3 = nn.Conv2d(
256, 256, kernel_size=(5, 1), padding=self.get_padding((5, 1), (2, 1)), dilation=(2, 1),
)
self.conv4 = nn.Conv2d(
256, 256, kernel_size=(5, 1), padding=self.get_padding((5, 1), (4, 1)), dilation=(4, 1),
)
self.conv5 = nn.Conv2d(
256, 256, kernel_size=(5, 1), padding=self.get_padding((5, 1), (8, 1)), dilation=(8, 1),
)
self.conv6 = nn.Conv2d(
256,
256,
kernel_size=(5, 1),
padding=self.get_padding((5, 1), (16, 1)),
dilation=(16, 1),
)
# Batch normalization layers
self.batch_norm1 = nn.BatchNorm2d(256)
self.batch_norm2 = nn.BatchNorm2d(256)
self.batch_norm3 = nn.BatchNorm2d(256)
self.batch_norm4 = nn.BatchNorm2d(256)
self.batch_norm5 = nn.BatchNorm2d(256)
self.batch_norm6 = nn.BatchNorm2d(last_shape)
def get_padding(self, kernel_size, dilation):
padding = (
((dilation[0]) * (kernel_size[0] - 1)) // 2,
((dilation[1]) * (kernel_size[1] - 1)) // 2,
)
return padding
def forward(self, input_video):
# input video will be (512,75,1)
if len(input_video.shape) == 3:
input_video = input_video.unsqueeze(1)
# input_video = torch.transpose(input_video,1,3) # (1,75,512)
# input_video = self.linear_for_512_to_1024(input_video) # (1,75,1024)
input_video = torch.transpose(input_video, 1, 3) # (1024,75,1)
output_layer = F.relu(self.batch_norm1(self.conv1(input_video)))
output_layer = F.relu(self.batch_norm2(self.conv2(output_layer)))
output_layer = F.relu(self.batch_norm3(self.conv3(output_layer)))
output_layer = F.relu(self.batch_norm4(self.conv4(output_layer)))
output_layer = F.relu(self.batch_norm5(self.conv5(output_layer)))
output_layer = F.relu(self.batch_norm6(self.conv6(output_layer)))
# for upsampling , as mentioned in paper
output_layer = nn.functional.interpolate(output_layer, size=(298, 1), mode="nearest")
return output_layer
# so now , video_output is (N,256,298,1)
# and audio_output is (N,8*257,298,1)
# where N = batch_size
class Audio_Visual_Fusion(nn.Module):
"""Audio Visual Speech Separation model as described in [1].
All default values are the same as paper.
Args:
num_person (int): total number of persons (as i/o).
device (torch.Device): device used to return the final tensor.
audio_last_shape (int): relevant last shape for tensor in audio network.
video_last_shape (int): relevant last shape for tensor in video network.
input_spectrogram_shape (tuple(int)): shape of input spectrogram.
References:
[1]: 'Looking to Listen at the Cocktail Party:
A Speaker-Independent Audio-Visual Model for Speech Separation' Ephrat et. al
https://arxiv.org/abs/1804.03619
"""
def __init__(
self,
num_person=2,
device=None,
audio_last_shape=8,
video_last_shape=256,
input_spectrogram_shape=(298, 257, 2),
):
if isinstance(device, str):
device = torch.device(device)
self.device = device
super(Audio_Visual_Fusion, self).__init__()
self.num_person = num_person
self.input_dim = (
audio_last_shape * input_spectrogram_shape[1] + video_last_shape * self.num_person
)
self.audio_output = Audio_Model(last_shape=audio_last_shape)
self.video_output = Video_Model(last_shape=video_last_shape)
self.lstm = nn.LSTM(
self.input_dim, 400, num_layers=1, bias=True, batch_first=True, bidirectional=True,
)
self.fc1 = nn.Linear(400, 600)
torch.nn.init.xavier_uniform_(self.fc1.weight)
self.fc2 = nn.Linear(600, 600)
torch.nn.init.xavier_uniform_(self.fc2.weight)
self.fc3 = nn.Linear(600, 600)
torch.nn.init.xavier_uniform_(self.fc3.weight)
self.complex_mask_layer = nn.Linear(600, 2 * 257 * self.num_person)
torch.nn.init.xavier_uniform_(self.complex_mask_layer.weight)
self.drop1 = nn.Dropout(0.2)
self.drop2 = nn.Dropout(0.2)
self.drop3 = nn.Dropout(0.2)
self.batch_norm1 = nn.BatchNorm1d(298)
self.batch_norm2 = nn.BatchNorm1d(298)
self.batch_norm3 = nn.BatchNorm1d(298)
def forward(self, input_audio, input_video):
# input_audio will be (N,514,298)
# input_video will be list of size = num_person , so each item of list will be of (N,512,75,1)
input_audio = transforms.to_torchaudio(input_audio).transpose(1, 3)
audio_out = self.audio_output(input_audio)
# audio_out will be (N,256,298,1)
AVFusion = [audio_out]
for i in range(self.num_person):
video_out = self.video_output(input_video[i])
AVFusion.append(video_out)
mixed_av = torch.cat(AVFusion, dim=1)
mixed_av = mixed_av.squeeze(3) # (N,input_dim,298)
mixed_av = torch.transpose(mixed_av, 1, 2) # (N,298,input_dim)
self.lstm.flatten_parameters()
mixed_av, (h, c) = self.lstm(mixed_av)
mixed_av = mixed_av[..., :400] + mixed_av[..., 400:]
mixed_av = self.batch_norm1((F.relu(self.fc1(mixed_av))))
mixed_av = self.drop1(mixed_av)
mixed_av = self.batch_norm2(F.relu(self.fc2(mixed_av)))
mixed_av = self.drop2(mixed_av)
mixed_av = self.batch_norm3(F.relu(self.fc3(mixed_av))) # (N,298,600)
mixed_av = self.drop3(mixed_av)
complex_mask = torch.sigmoid(self.complex_mask_layer(mixed_av)) # (N,298,2*257*num_person)
batch_size = complex_mask.size(0) # N
complex_mask = complex_mask.view(batch_size, 2, 298, 257, self.num_person)
output_audio = torch.zeros_like(complex_mask, device=self.device)
for i in range(self.num_person):
output_audio[..., i] = fast_icRM(input_audio, complex_mask[..., i])
output_audio = output_audio.permute(0, 4, 1, 3, 2).reshape(
batch_size, self.num_person, 514, 298
)
return output_audio
def make_model_and_optimizer(conf, gpu_ids=[0]):
"""Define model and optimizer.
Args:
conf: Configuration for model and optimizer.
Returns:
model, optimizer
"""
device = torch.device(conf["training"]["device"])
model = Audio_Visual_Fusion(conf["main_args"]["n_src"], device)
model = model.to(device)
device_count = torch.cuda.device_count()
if len(gpu_ids) > 1 and device_count > 1:
if len(gpu_ids) != device_count:
print(f"Using {gpu_ids} GPUs")
else:
print(f"Using all {device_count} GPUs")
model = torch.nn.DataParallel(model, device_ids=gpu_ids)
optimizer = make_optimizer(model.parameters(), **conf["optim"])
return model, optimizer
def load_best_model(train_conf, exp_dir):
"""Load best model.
NOTE: This function is not needed during training. Catalyst has
a `resume` parameter that takes in the logdir location.
Args:
train_conf: Configuration used during training.
exp_dir: Logdir created by Catalyst.
Returns:
model
"""
model, optimizer = make_model_and_optimizer(train_conf)
# Catalyst stores the best model as: logdir/checkpoints/best_full.pth
exp_dir = Path(exp_dir) if isinstance(exp_dir, str) else exp_dir
best_model_path = exp_dir / "checkpoints" / "best_full.pth"
if not best_model_path.is_file():
print(f"No best path in logdir: {exp_dir}. Initializing model...")
return model
checkpoint = torch.load(best_model_path)
model = torch_utils.load_state_dict_in(checkpoint["model_state_dict"], model)
return model
| [
"torch.zeros",
"torch.device",
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.cat",
"torch.nn.BatchNorm2d",
"torch.nn.functional.interpolate",
"torch.nn.init.xavier_uniform_",
"torch.cuda.device_count",
"torch.nn.BatchNorm1d",
"torch.load",
"torch.zeros_like",
"torch.transpose",
"torch.log",
"torch.exp",
"torch.nn.DataParallel"
] | 1.3.0 | DizzyProtos/asteroid | bb3c374fefe7525c2f6da355834e470d45b45b90 |
1.5 | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import segmentation_models_pytorch as smp
class Generator_with_Refin(nn.Module):
def __init__(self, encoder):
"""Generator initialization
Args:
encoder: an encoder for Unet generator
"""
super(Generator_with_Refin, self).__init__()
# declare Unet generator
self.generator = smp.Unet(
encoder_name=encoder,
classes=1,
activation='identity',
encoder_depth=4,
decoder_channels=[128, 64, 32, 16],
)
# replace the first conv block in generator (6 channels tensor as input)
self.generator.encoder.conv1 = nn.Conv2d(4, 64, kernel_size=(6, 6), stride=(2, 2), padding=(2, 2), bias=False)
self.generator.segmentation_head = nn.Identity()
# RGB-shadow mask as output before refinement module
self.SG_head = nn.Conv2d(in_channels=16, out_channels=3, kernel_size=3, stride=1, padding=1)
# refinement module
self.refinement = torch.nn.Sequential()
for i in range(4):
self.refinement.add_module(f'refinement{3*i+1}', nn.BatchNorm2d(16))
self.refinement.add_module(f'refinement{3*i+2}', nn.ReLU())
self.refinement.add_module(f'refinement{3*i+3}', nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1))
# RGB-shadow mask as output after refinement module
self.output1 = nn.Conv2d(in_channels=16, out_channels=3, kernel_size=3, stride=1, padding=1)
def forward(self, x):
"""Forward for generator
Args:
x: torch.FloatTensor or torch.cuda.FloatTensor - input tensor with images and masks
"""
x = self.generator(x)
out1 = self.SG_head(x)
x = self.refinement(x)
x = self.output1(x)
return out1, x
class Discriminator(nn.Module):
def __init__(self, input_shape):
"""Discriminator initialization
Args:
input_shape (tuple): shape of input image
"""
super(Discriminator, self).__init__()
self.input_shape = input_shape
in_channels, in_height, in_width = self.input_shape
patch_h, patch_w = int(in_height / 2 ** 4), int(in_width / 2 ** 4)
self.output_shape = (1, patch_h, patch_w)
def discriminator_block(in_filters, out_filters, first_block=False):
layers = []
layers.append(nn.Conv2d(in_filters, out_filters, kernel_size=3, stride=1, padding=1))
if not first_block:
layers.append(nn.BatchNorm2d(out_filters))
layers.append(nn.LeakyReLU(0.2, inplace=True))
layers.append(nn.Conv2d(out_filters, out_filters, kernel_size=4, stride=2, padding=1)) #k=3,p=1
layers.append(nn.BatchNorm2d(out_filters))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
layers = []
in_filters = in_channels
for i, out_filters in enumerate([64, 128, 256, 512]):
layers.extend(discriminator_block(in_filters, out_filters, first_block=(i == 0)))
in_filters = out_filters
layers.append(nn.Conv2d(out_filters, 1, kernel_size=3, stride=1, padding=1))
self.model = nn.Sequential(*layers)
def forward(self, img):
"""Discriminator forward
"""
return self.model(img)
| [
"torch.nn.Identity",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.5.0 | sgarg18/arshadowgan | 8183f8c06f93c249e48193cdfa41a5e78bcc3d5e |
1.2 | """
This function help to feed in train and test datasets.
Select model archtecture and seed then output corresponding model.
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F #233
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
from PIL import Image
def feed_dataset(data, data_dict, seedin = 100, random_train = False):
torch.manual_seed(seedin)
if random_train == True:
if(data == 'MNIST'):
train_set = datasets.MNIST('./', train=True, download = True,transform=transforms.Compose([transforms.ToTensor()]))
test_set = datasets.MNIST('../data', train=False, download = True,transform=transforms.Compose([transforms.ToTensor()]))
full_set = torch.utils.data.ConcatDataset([train_set,test_set])
trans = transforms.Compose(transforms = [
transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])
train_len = 60000
test_len = 10000
trainset_new, testset_new = torch.utils.data.random_split(full_set,[train_len, test_len])
trainset_new.transform = trans
testset_new.transform = trans
train_loader = torch.utils.data.DataLoader(trainset_new, batch_size = 64, shuffle = True)
test_loader = torch.utils.data.DataLoader(testset_new, batch_size = 1000, shuffle = True)
else:
pass
return train_loader, test_loader
else:
if(data == 'CIFAR10'):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_val = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(data_dict, train=True, download = True,
transform=transform_train),
batch_size= 1000, shuffle=True) #, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(data_dict, train=False, download = True,
transform=transform_val),
batch_size= 1000, shuffle=True) #, **kwargs)
elif(data == 'MNIST'):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(data_dict, train=True, download = True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=64,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, download = True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=1000,
shuffle=True)
elif(data == 'ImageNet'):
pass
return train_loader, test_loader
| [
"torch.manual_seed",
"torch.utils.data.ConcatDataset",
"torch.utils.data.random_split",
"torch.utils.data.DataLoader"
] | 1.2.0 | kkmumu/DeepRobust | 0cc1950177ed6604e55274e1a7cd578d54fba5c4 |
1.3 |
from escnn.nn import FieldType, GeometricTensor
from escnn.group import Representation
from escnn.kernels import KernelBasis
from torch_geometric.data import Data
from .rd_point_convolution import _RdPointConv
from typing import Callable, Tuple, Dict, Union, List
import torch
import numpy as np
import math
__all__ = ["R2PointConv"]
class R2PointConv(_RdPointConv):
def __init__(self,
in_type: FieldType,
out_type: FieldType,
groups: int = 1,
bias: bool = True,
sigma: Union[List[float], float] = None,
width: float = None,
n_rings: int = None,
frequencies_cutoff: Union[float, Callable[[float], int]] = None,
rings: List[float] = None,
basis_filter: Callable[[dict], bool] = None,
recompute: bool = False,
):
basis_filter, self._rings, self._sigma, self._maximum_frequency = compute_basis_params(
frequencies_cutoff, rings, sigma, width, n_rings, basis_filter
)
super(R2PointConv, self).__init__(
in_type, out_type,
d=2,
groups=groups,
bias=bias,
basis_filter=basis_filter,
recompute=recompute
)
def _build_kernel_basis(self, in_repr: Representation, out_repr: Representation) -> KernelBasis:
return self.space.build_kernel_basis(in_repr, out_repr,
self._sigma, self._rings,
maximum_frequency=self._maximum_frequency
)
def bandlimiting_filter(frequency_cutoff: Union[float, Callable[[float], float]]) -> Callable[[dict], bool]:
r"""
Returns a method which takes as input the attributes (as a dictionary) of a basis element and returns a boolean
value: whether to preserve that element (true) or not (false)
If the parameter ``frequency_cutoff`` is a scalar value, the maximum frequency allowed at a certain radius is
proportional to the radius itself. in thi case, the parameter ``frequency_cutoff`` is the factor controlling this
proportionality relation.
If the parameter ``frequency_cutoff`` is a callable, it needs to take as input a radius (a scalar value) and return
the maximum frequency which can be sampled at that radius.
args:
frequency_cutoff (float): factor controlling the bandlimiting
returns:
a function which checks the attributes of individual basis elements and chooses whether to discard them or not
"""
if isinstance(frequency_cutoff, float):
frequency_cutoff = lambda r, fco=frequency_cutoff: r * frequency_cutoff
def bl_filter(attributes: dict) -> bool:
return math.fabs(attributes["irrep:frequency"]) <= frequency_cutoff(attributes["radius"])
return bl_filter
def compute_basis_params(
frequencies_cutoff: Union[float, Callable[[float], float]] = None,
rings: List[float] = None,
sigma: List[float] = None,
width: float = None,
n_rings: int = None,
custom_basis_filter: Callable[[dict], bool] = None,
):
assert (width is not None and n_rings is not None) != (rings is not None)
# by default, the number of rings equals half of the filter size
if rings is None:
assert width > 0.
assert n_rings > 0
rings = torch.linspace(0, width, n_rings)
rings = rings.tolist()
if sigma is None:
sigma = [0.6] * (len(rings) - 1) + [0.4]
for i, r in enumerate(rings):
if r == 0.:
sigma[i] = 0.005
elif isinstance(sigma, float):
sigma = [sigma] * len(rings)
if frequencies_cutoff is None:
frequencies_cutoff = 3.
if isinstance(frequencies_cutoff, float):
frequencies_cutoff = lambda r, fco=frequencies_cutoff: fco * r
# check if the object is a callable function
assert callable(frequencies_cutoff)
maximum_frequency = int(max(frequencies_cutoff(r) for r in rings))
fco_filter = bandlimiting_filter(frequencies_cutoff)
if custom_basis_filter is not None:
basis_filter = lambda d, custom_basis_filter=custom_basis_filter, fco_filter=fco_filter: (
custom_basis_filter(d) and fco_filter(d)
)
else:
basis_filter = fco_filter
return basis_filter, rings, sigma, maximum_frequency
| [
"torch.linspace"
] | 1.3 | QUVA-Lab/escnn | 59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882 |
1.3 | import unittest
from unittest import TestCase
from escnn.nn import *
from escnn.gspaces import *
import torch
import random
class TestGeometricTensor(TestCase):
def test_sum(self):
for N in [2, 4, 7, 16]:
gs = rot2dOnR2(N)
for irr in gs.irreps:
type = FieldType(gs, [irr] * 3)
for i in range(3):
t1 = GeometricTensor(torch.randn(10, type.size, 11, 11), type)
t2 = GeometricTensor(torch.randn(10, type.size, 11, 11), type)
out1 = t1.tensor + t2.tensor
out2 = (t1 + t2).tensor
out3 = (t2 + t1).tensor
self.assertTrue(torch.allclose(out1, out2))
self.assertTrue(torch.allclose(out3, out2))
def test_isum(self):
for N in [2, 4, 7, 16]:
gs = rot2dOnR2(N)
for irr in gs.irreps:
type = FieldType(gs, [irr] * 3)
for i in range(5):
t1 = GeometricTensor(torch.randn(10, type.size, 11, 11), type)
t2 = GeometricTensor(torch.randn(10, type.size, 11, 11), type)
out1 = t1.tensor + t2.tensor
t1 += t2
out2 = t1.tensor
self.assertTrue(torch.allclose(out1, out2))
def test_sub(self):
for N in [2, 4, 7, 16]:
gs = rot2dOnR2(N)
for irr in gs.irreps:
type = FieldType(gs, [irr]*3)
for i in range(3):
t1 = GeometricTensor(torch.randn(10, type.size, 11, 11), type)
t2 = GeometricTensor(torch.randn(10, type.size, 11, 11), type)
out1 = t1.tensor - t2.tensor
out2 = (t1 - t2).tensor
self.assertTrue(torch.allclose(out1, out2))
def test_isub(self):
for N in [2, 4, 7, 16]:
gs = rot2dOnR2(N)
for irr in gs.irreps:
type = FieldType(gs, [irr] * 3)
for i in range(5):
t1 = GeometricTensor(torch.randn(10, type.size, 11, 11), type)
t2 = GeometricTensor(torch.randn(10, type.size, 11, 11), type)
out1 = t1.tensor - t2.tensor
t1 -= t2
out2 = t1.tensor
self.assertTrue(torch.allclose(out1, out2))
def test_mul(self):
for N in [2, 4, 7, 16]:
gs = rot2dOnR2(N)
for irr in gs.irreps:
type = FieldType(gs, [irr] * 3)
for i in range(3):
t1 = GeometricTensor(torch.randn(10, type.size, 11, 11), type)
s = 10*torch.randn(1)
out1 = t1.tensor * s
out2 = (s * t1).tensor
out3 = (t1 * s).tensor
self.assertTrue(torch.allclose(out1, out2))
self.assertTrue(torch.allclose(out3, out2))
def test_imul(self):
for N in [2, 4, 7, 16]:
gs = rot2dOnR2(N)
for irr in gs.irreps:
type = FieldType(gs, [irr] * 3)
for i in range(5):
t1 = GeometricTensor(torch.randn(10, type.size, 11, 11), type)
s = 10*torch.randn(1)
out1 = t1.tensor * s
t1 *= s
out2 = t1.tensor
self.assertTrue(torch.allclose(out1, out2))
def test_slicing(self):
for N in [2, 4, 7, 16]:
gs = flipRot2dOnR2(N)
for irr in gs.irreps:
# with multiple fields
F = 7
type = FieldType(gs, [irr] * F)
for i in range(3):
t = torch.randn(10, type.size, 11, 11)
gt = GeometricTensor(t, type)
# slice all dims except the channels
self.assertTrue(torch.allclose(
t[2:3, :, 2:7, 2:7],
gt[2:3, :, 2:7, 2:7].tensor,
))
# slice only spatial dims
self.assertTrue(torch.allclose(
t[:, :, 2:7, 2:7],
gt[:, :, 2:7, 2:7].tensor,
))
self.assertTrue(torch.allclose(
t[:, :, 2:7, 2:7],
gt[..., 2:7, 2:7].tensor,
))
# slice only 1 spatial
self.assertTrue(torch.allclose(
t[..., 2:7],
gt[..., 2:7].tensor,
))
# slice only batch
self.assertTrue(torch.allclose(
t[2:4],
gt[2:4, ...].tensor,
))
self.assertTrue(torch.allclose(
t[2:4],
gt[2:4].tensor,
))
# different ranges
self.assertTrue(torch.allclose(
t[:, :, 1:9:2, 0:8:3],
gt[..., 1:9:2, 0:8:3].tensor,
))
# no slicing
self.assertTrue(torch.allclose(
t,
gt[:].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, :, :, :].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, :, :].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, :].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[...].tensor,
))
# slice channels with all fields of same type
self.assertTrue(torch.allclose(
t[:, 1 * irr.size:4 * irr.size:],
gt[:, 1:4, ...].tensor,
))
# slice cover all channels
self.assertTrue(torch.allclose(
t,
gt[:, 0:7, ...].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, 0:7:1, ...].tensor,
))
# with a larger step
start = 1
end = 6
step = 2
self.assertTrue(torch.allclose(
t[:,
[f * irr.size + i for f in range(start, end, step) for i in range(irr.size)]
],
gt[:, start:end:step, ...].tensor,
))
start = 0
end = 7
step = 3
self.assertTrue(torch.allclose(
t[:,
[f * irr.size + i for f in range(start, end, step) for i in range(irr.size)]
],
gt[:, start:end:step, ...].tensor,
))
# with negative step
start = 6
end = 1
step = -1
self.assertTrue(torch.allclose(
t[:,
[f * irr.size + i for f in range(start, end, step) for i in range(irr.size)]
],
gt[:, start:end:step, ...].tensor,
))
start = 6
end = 1
step = -2
self.assertTrue(torch.allclose(
t[:,
[f * irr.size + i for f in range(start, end, step) for i in range(irr.size)]
],
gt[:, start:end:step, ...].tensor,
))
# 1 single field
start = 1
end = 2
step = 1
self.assertTrue(torch.allclose(
t[:,
[f * irr.size + i for f in range(start, end, step) for i in range(irr.size)]
],
gt[:, start:end:step, ...].tensor,
))
# index only one field
f = 2
self.assertTrue(torch.allclose(
t[:,
[type.fields_start[f] + i for i in range(irr.size)]
],
gt[:, f:f+1, ...].tensor,
))
# single index
f = 2
self.assertTrue(torch.allclose(
t[:,
[type.fields_start[f] + i for i in range(irr.size)]
],
gt[:, f, ...].tensor,
))
self.assertTrue(torch.allclose(
t[:,
[type.fields_start[f] + i for i in range(irr.size)]
],
gt[:, f].tensor,
))
self.assertTrue(torch.allclose(
t[1:2],
gt[1, ...].tensor,
))
self.assertTrue(torch.allclose(
t[..., 3:4],
gt[..., 3].tensor,
))
self.assertTrue(torch.allclose(
t[..., 2:3, 3:4],
gt[..., 2, 3].tensor,
))
self.assertTrue(torch.allclose(
t[3:4, ..., 2:3, 3:4],
gt[3, ..., 2, 3].tensor,
))
self.assertTrue(torch.allclose(
t[1:2, :irr.size],
gt[1, 0, ...].tensor,
))
self.assertTrue(torch.allclose(
t[1:2, :irr.size, 4:5, 2:3],
gt[1, 0, 4, 2].tensor,
))
# raise errors
with self.assertRaises(TypeError):
sliced = gt[2:5, 0:4, 1:7, 1:7, ...]
with self.assertRaises(TypeError):
sliced = gt[[2, 4, 2], 0:4, ...]
with self.assertRaises(TypeError):
sliced = gt[2, 0:4, range(3), range(3)]
# with a single field
F = 1
type = FieldType(gs, [irr] * F)
for i in range(3):
t = torch.randn(10, type.size, 11, 11)
gt = GeometricTensor(t, type)
# slice all dims except the channels
self.assertTrue(torch.allclose(
t[2:3, :, 2:7, 2:7],
gt[2:3, :, 2:7, 2:7].tensor,
))
# slice only spatial dims
self.assertTrue(torch.allclose(
t[:, :, 2:7, 2:7],
gt[:, :, 2:7, 2:7].tensor,
))
self.assertTrue(torch.allclose(
t[:, :, 2:7, 2:7],
gt[..., 2:7, 2:7].tensor,
))
# slice only 1 spatial
self.assertTrue(torch.allclose(
t[..., 2:7],
gt[..., 2:7].tensor,
))
# slice only batch
self.assertTrue(torch.allclose(
t[2:4],
gt[2:4, ...].tensor,
))
self.assertTrue(torch.allclose(
t[2:4],
gt[2:4].tensor,
))
# different ranges
self.assertTrue(torch.allclose(
t[:, :, 1:9:2, 0:8:3],
gt[..., 1:9:2, 0:8:3].tensor,
))
# no slicing
self.assertTrue(torch.allclose(
t,
gt[:].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, :, :, :].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, :, :].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, :].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[...].tensor,
))
# 1 single field
self.assertTrue(torch.allclose(
t,
gt[:, 0:1, ...].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, 0, ...].tensor,
))
# negative index
self.assertTrue(torch.allclose(
t,
gt[:, -1, ...].tensor,
))
# with negative step
start = 0
end = -2
step = -1
self.assertTrue(torch.allclose(
t,
gt[:, start:end:step, ...].tensor,
))
for i in range(3):
reprs = list(gs.representations.values())*3
random.shuffle(reprs)
type = FieldType(gs, reprs)
F = len(type)
t = torch.randn(3, type.size, 3, 4)
gt = GeometricTensor(t, type)
# assignment should not be allowed
with self.assertRaises(TypeError):
gt[2, 1:3, ...] = torch.randn(gt[2, 1:3, ...].shape)
# no slicing
self.assertTrue(torch.allclose(
t,
gt[:].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, :, :, :].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, :, :].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, :].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[...].tensor,
))
# slice channels with all fields of different types
self.assertTrue(torch.allclose(
t[:, type.fields_start[1]:type.fields_end[3]:],
gt[:, 1:4, ...].tensor,
))
# slice cover all channels
self.assertTrue(torch.allclose(
t,
gt[:, 0:F, ...].tensor,
))
self.assertTrue(torch.allclose(
t,
gt[:, 0:F:1, ...].tensor,
))
# with a larger step
start = 1
end = 6
step = 2
self.assertTrue(torch.allclose(
t[:,
[type.fields_start[f] + i for f in range(start, end, step) for i in range(type.representations[f].size)]
],
gt[:, start:end:step, ...].tensor,
))
start = 0
end = 7
step = 3
self.assertTrue(torch.allclose(
t[:,
[type.fields_start[f] + i for f in range(start, end, step) for i in range(type.representations[f].size)]
],
gt[:, start:end:step, ...].tensor,
))
# with negative step
start = 6
end = 1
step = -1
self.assertTrue(torch.allclose(
t[:,
[type.fields_start[f] + i for f in range(start, end, step) for i in range(type.representations[f].size)]
],
gt[:, start:end:step, ...].tensor,
))
start = 6
end = 1
step = -2
self.assertTrue(torch.allclose(
t[:,
[type.fields_start[f] + i for f in range(start, end, step) for i in range(type.representations[f].size)]
],
gt[:, start:end:step, ...].tensor,
))
# single index
for f in range(F):
self.assertTrue(torch.allclose(
t[:,
[type.fields_start[f] + i for i in range(type.representations[f].size)]
],
gt[:, f, ...].tensor,
))
self.assertTrue(torch.allclose(
t[:,
[type.fields_start[f] + i for i in range(type.representations[f].size)]
],
gt[:, f].tensor,
))
self.assertTrue(torch.allclose(
t[1:2,
[type.fields_start[f] + i for i in range(type.representations[f].size)]
],
gt[1, f, ...].tensor,
))
self.assertTrue(torch.allclose(
t[
1:2,
[type.fields_start[f] + i for i in range(type.representations[f].size)],
3:4,
4:5
],
gt[1, f, 3, 4].tensor,
))
def test_rmul(self):
for N in [2, 4, 7, 16]:
gs = rot2dOnR2(N)
for irr in gs.irreps:
type = FieldType(gs, [irr] * 3)
for i in range(3):
t1 = GeometricTensor(torch.randn(10, type.size, 11, 11), type)
for _ in range(5):
g = gs.fibergroup.sample()
out1 = g @ t1
out2 = t1.transform_fibers(g)
self.assertTrue(torch.allclose(out1.tensor, out2.tensor))
if __name__ == '__main__':
unittest.main()
| [
"torch.allclose",
"torch.randn"
] | 1.3 | QUVA-Lab/escnn | 59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882 |
1.7 | # -*- coding: utf-8 -*-
"""
Main proposal object that includes normalising flows.
"""
import copy
import datetime
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import numpy.lib.recfunctions as rfn
import torch
from ..flowmodel import FlowModel, update_config
from ..livepoint import (
live_points_to_array,
numpy_array_to_live_points,
get_dtype,
DEFAULT_FLOAT_DTYPE
)
from ..reparameterisations import (
CombinedReparameterisation,
get_reparameterisation
)
from ..plot import plot_live_points, plot_1d_comparison
from .rejection import RejectionProposal
from ..utils import (
compute_radius,
get_uniform_distribution,
get_multivariate_normal,
detect_edge,
configure_edge_detection,
save_live_points
)
logger = logging.getLogger(__name__)
class FlowProposal(RejectionProposal):
"""
Object that handles training and proposal points
Parameters
----------
model : :obj:`nessai.model.Model`
User defined model.
flow_config : dict, optional
Configuration for training the normalising flow. If None, uses default
settings. Defaults to None.
output : str, optional
Path to output directory.
plot : {True, False, 'all', 'min'}, optional
Controls the plotting level: ``True`` - all plots; ``False`` - no
plots; ``'all'`` - all plots and ``'min'`` - 1d plots and loss.
latent_prior : {'truncated_gaussian', 'gaussian', 'uniform_nsphere', \
'gaussian'}, optional
Prior distribution in the latent space. Defaults to
'truncated_gaussian'.
poolsize : int, optional
Size of the proposal pool. Defaults to 10000.
update_poolsize : bool, optional
If True the poolsize is updated using the current acceptance of the
nested sampler.
max_poolsize_scale : int, optional
Maximum scale for increasing the poolsize. E.g. if this value is 10
and the poolsize is 1000 the maximum number of points in the pool
is 10,000.
drawsize : int, optional
Number of points to simultaneously draw when populating the proposal
Defaults to 10000
check_acceptance : bool, optional
If True the acceptance is computed after populating the pool. This
includes computing the likelihood for every point. Default False.
min_radius : float, optional
Minimum radius used for population. If not specified not minimum is
used.
max_radius : float, optional
If a float then this value is used as an upper limit for the
computed radius when populating the proposal. If unspecified no
upper limit is used.
fixed_radius : float, optional
If specified and the chosen latent distribution is compatible, this
radius will be used to draw new samples instead of the value computed
with the flow.
constant_volume_mode : bool
If True, then a constant volume is used for the latent contour used to
draw new samples. The exact volume can be set using `volume_fraction`
volume_fraction : float
Fraction of the total probability to contain with the latent contour
when using a constant volume.
compute_radius_with_all : bool, optional
If True all the radius of the latent contour is computed using the
maximum radius of all the samples used to train the flow.
fuzz : float, optional
Fuzz-factor applied to the radius. If unspecified no fuzz-factor is
applied.
expansion_fraction : float, optional
Similar to ``fuzz`` but instead a scaling factor applied to the radius
this specifies a rescaling for volume of the n-ball used to draw
samples. This is translated to a value for ``fuzz``.
truncate : bool, optional
Truncate proposals using probability compute for worst point.
Not recommended.
rescale_parameters : list or bool, optional
If True live points are rescaled to `rescale_bounds` before training.
If an instance of `list` then must contain names of parameters to
rescale. If False no rescaling is applied.
rescale_bounds : list, optional
Lower and upper bound to use for rescaling. Defaults to [-1, 1]. See
`rescale_parameters`.
update_bounds : bool, optional
If True bounds used for rescaling are updated at the starting of
training. If False prior bounds are used.
boundary_inversion : bool or list, optional
If True boundary inversion is applied to all bounds. If
If an instance of `list` of parameters names, then inversion
only applied to these parameters. If False (default )no inversion is
used.
inversion_type : {'split', 'duplicate'}
Type of inversion to use. ``'split'`` keeps the number of samples
the sample but mirrors half around the bound. ``'duplicate'`` mirrors
all the samples at the bound.
detect_edges : bool, optional
If True, when applying the version the option of no inversion is
allowed.
detect_edges_kwargs : dict, optional
Dictionary of keyword arguments passed to \
:func:`nessai.utils.detect_edge`.
reparameterisations : dict, optional
Dictionary for configure more flexible reparameterisations. This
ignores any of the other settings related to rescaling. For more
details see the documentation.
fallback_reparameterisation : None or str
Name of the reparameterisation to be used for parameters that have not
been specified in the reparameterisations dictionary. If None, the
:py:class:`~nessai.reparameterisations.NullReparameterisation` is used.
Reparameterisation should support multiple parameters.
use_default_reparameterisations : bool, optional
If True then reparameterisations will be used even if
``reparameterisations`` is None. The exact reparameterisations used
will depend on
:py:func:`~nessai.proposal.flowproposal.FlowProposal.add_default_reparameterisations`
which may be overloaded by child classes. If not specified then the
value of the attribute
:py:attr:`~nessai.proposal.flowproposal.FlowProposal.use_default_reparameterisations`
is used.
draw_latent_kwargs : dict, optional
Dictionary of kwargs passed to the function for drawing samples
in the latent space. See the functions in utils for the possible
kwargs.
"""
use_default_reparameterisations = False
"""
Indicates whether reparameterisations will be used be default in this
class. Child classes can change this value a force the default
behaviour to change without changing the keyword arguments.
"""
def __init__(
self,
model,
flow_config=None,
output='./',
poolsize=None,
rescale_parameters=True,
latent_prior='truncated_gaussian',
constant_volume_mode=True,
volume_fraction=0.95,
fuzz=1.0,
plot='min',
fixed_radius=False,
drawsize=None,
check_acceptance=False,
truncate=False,
rescale_bounds=[-1, 1],
expansion_fraction=4.0,
boundary_inversion=False,
inversion_type='split',
update_bounds=True,
min_radius=False,
max_radius=50.0,
max_poolsize_scale=10,
update_poolsize=True,
save_training_data=False,
compute_radius_with_all=False,
draw_latent_kwargs=None,
detect_edges=False,
detect_edges_kwargs=None,
reparameterisations=None,
fallback_reparameterisation=None,
use_default_reparameterisations=None,
**kwargs
):
super(FlowProposal, self).__init__(model)
logger.debug('Initialising FlowProposal')
self._x_dtype = False
self._x_prime_dtype = False
self.flow = None
self._flow_config = None
self.populated = False
self.populating = False # Flag used for resuming during population
self.indices = []
self.training_count = 0
self.populated_count = 0
self.names = []
self.training_data = None
self.save_training_data = save_training_data
self.x = None
self.samples = None
self.rescaled_names = []
self.acceptance = []
self.approx_acceptance = []
self._edges = {}
self._reparameterisation = None
self.use_x_prime_prior = False
self.reparameterisations = reparameterisations
if use_default_reparameterisations is not None:
self.use_default_reparameterisations = \
use_default_reparameterisations
self.fallback_reparameterisation = fallback_reparameterisation
self.output = output
self.configure_population(poolsize, drawsize, update_poolsize,
max_poolsize_scale, fuzz, expansion_fraction,
latent_prior)
self.rescale_parameters = rescale_parameters
self.update_bounds = update_bounds
self.check_acceptance = check_acceptance
self.rescale_bounds = rescale_bounds
self.truncate = truncate
self.boundary_inversion = boundary_inversion
self.inversion_type = inversion_type
self.flow_config = flow_config
self.constant_volume_mode = constant_volume_mode
self.volume_fraction = volume_fraction
self.detect_edges = detect_edges
self.detect_edges_kwargs = \
configure_edge_detection(detect_edges_kwargs, detect_edges)
self.compute_radius_with_all = compute_radius_with_all
self.configure_fixed_radius(fixed_radius)
self.configure_min_max_radius(min_radius, max_radius)
self.configure_plotting(plot)
if draw_latent_kwargs is None:
self.draw_latent_kwargs = {}
else:
self.draw_latent_kwargs = draw_latent_kwargs
self.configure_latent_prior()
self.alt_dist = None
if kwargs:
kwargs.pop('max_threads', None)
logger.warning(
f'Extra kwargs were parsed to FlowProposal: {kwargs}')
@property
def poolsize(self):
"""
Return the poolsize based of the base value and the current
value of the scaling
"""
return int(self._poolsize_scale * self._poolsize)
@property
def flow_config(self):
"""Return the configuration for the flow"""
return self._flow_config
@flow_config.setter
def flow_config(self, config):
"""Set configuration (includes checking defaults)"""
self._flow_config = update_config(config)
@property
def dims(self):
"""Return the number of dimensions"""
return len(self.names)
@property
def rescaled_dims(self):
"""Return the number of rescaled dimensions"""
return len(self.rescaled_names)
@property
def x_dtype(self):
"""Return the dtype for the x space"""
if not self._x_dtype:
self._x_dtype = get_dtype(self.names, DEFAULT_FLOAT_DTYPE)
return self._x_dtype
@property
def x_prime_dtype(self):
"""Return the dtype for the x prime space"""
if not self._x_prime_dtype:
self._x_prime_dtype = \
get_dtype(self.rescaled_names, DEFAULT_FLOAT_DTYPE)
return self._x_prime_dtype
@property
def population_dtype(self):
"""
dtype used for populating the proposal, depends on if the prior
is defined in the x space or x-prime space
"""
if self.use_x_prime_prior:
return self.x_prime_dtype
else:
return self.x_dtype
def configure_population(self, poolsize, drawsize, update_poolsize,
max_poolsize_scale, fuzz, expansion_fraction,
latent_prior):
"""
Configure settings related to population
"""
if poolsize is None:
raise RuntimeError('Must specify a poolsize!')
if drawsize is None:
drawsize = poolsize
self._poolsize = poolsize
self._poolsize_scale = 1.0
self.update_poolsize = update_poolsize
self.max_poolsize_scale = max_poolsize_scale
self.ns_acceptance = 1.
self.drawsize = drawsize
self.fuzz = fuzz
self.expansion_fraction = expansion_fraction
self.latent_prior = latent_prior
def configure_plotting(self, plot):
"""Configure plotting.
Plotting is split into training and pool. Training refers to plots
produced during training and pool refers to plots produces during
the population stage.
Parameters
----------
plot : {True, False, 'all', 'train', 'pool', 'min', 'minimal'}
Level of plotting. `all`, `train` and `pool` enable corner style
plots. All other values that evaluate to True enable 1d histogram
plots. False disables all plotting.
"""
if plot:
if isinstance(plot, str):
if plot == 'all':
self._plot_pool = 'all'
self._plot_training = 'all'
elif plot == 'train':
self._plot_pool = False
self._plot_training = 'all'
elif plot == 'pool':
self._plot_pool = 'all'
self._plot_training = False
elif plot == 'minimal' or plot == 'min':
self._plot_pool = True
self._plot_training = True
else:
logger.warning(
f'Unknown plot argument: {plot}, setting all false')
self._plot_pool = False
self._plot_training = False
else:
self._plot_pool = True
self._plot_training = True
else:
self._plot_pool = False
self._plot_training = False
def configure_latent_prior(self):
"""Configure the latent prior"""
if self.latent_prior == 'truncated_gaussian':
from ..utils import draw_truncated_gaussian
self.draw_latent_prior = draw_truncated_gaussian
var = self.flow_config['model_config'].get('kwargs', {}).get('var')
if var and 'var' not in self.draw_latent_kwargs:
self.draw_latent_kwargs['var'] = var
elif self.latent_prior == 'gaussian':
logger.warning('Using a gaussian latent prior WITHOUT truncation')
from ..utils import draw_gaussian
self.draw_latent_prior = draw_gaussian
elif self.latent_prior == 'uniform':
from ..utils import draw_uniform
self.draw_latent_prior = draw_uniform
elif self.latent_prior in ['uniform_nsphere', 'uniform_nball']:
from ..utils import draw_nsphere
self.draw_latent_prior = draw_nsphere
else:
raise RuntimeError(
f'Unknown latent prior: {self.latent_prior}, choose from: '
'truncated_gaussian (default), gaussian, '
'uniform, uniform_nsphere'
)
def configure_fixed_radius(self, fixed_radius):
"""Configure the fixed radius"""
if fixed_radius:
try:
self.fixed_radius = float(fixed_radius)
except ValueError:
logger.error(
'Fixed radius enabled but could not be converted to a '
'float. Setting fixed_radius=False'
)
self.fixed_radius = False
else:
self.fixed_radius = False
def configure_min_max_radius(self, min_radius, max_radius):
"""
Configure the minimum and maximum radius
"""
if isinstance(min_radius, (int, float)):
self.min_radius = float(min_radius)
else:
raise RuntimeError('Min radius must be an int or float')
if max_radius:
if isinstance(max_radius, (int, float)):
self.max_radius = float(max_radius)
else:
raise RuntimeError('Max radius must be an int or float')
else:
logger.warning('Running without a maximum radius! The proposal '
'process may get stuck if very large radii are '
'returned by the worst point.')
self.max_radius = False
def configure_constant_volume(self):
"""Configure using constant volume latent contour."""
if self.constant_volume_mode:
logger.debug('Configuring constant volume latent contour')
if not self.latent_prior == 'truncated_gaussian':
raise RuntimeError(
"Constant volume requires "
"`latent_prior='truncated_gaussian'`"
)
self.fixed_radius = compute_radius(
self.rescaled_dims, self.volume_fraction
)
self.fuzz = 1.0
if self.max_radius < self.fixed_radius:
logger.warning(
'Max radius is less than the radius need to use a '
'constant volume latent contour. Max radius will be '
'disabled.'
)
self.max_radius = False
if self.min_radius > self.fixed_radius:
logger.warning(
'Min radius is greater than the radius need to use a '
'constant volume latent contour. Min radius will be '
'disabled.'
)
self.min_radius = False
else:
logger.debug(
'Nothing to configure for constant volume latent contour.'
)
def update_flow_config(self):
"""Update the flow configuration dictionary."""
self.flow_config['model_config']['n_inputs'] = self.rescaled_dims
def initialise(self):
"""
Initialise the proposal class.
This includes:
* Setting up the rescaling
* Verifying the rescaling is invertible
* Initialising the FlowModel
"""
if not os.path.exists(self.output):
os.makedirs(self.output, exist_ok=True)
self._x_dtype = False
self._x_prime_dtype = False
self.set_rescaling()
self.verify_rescaling()
if self.expansion_fraction and self.expansion_fraction is not None:
logger.info('Overwritting fuzz factor with expansion fraction')
self.fuzz = \
(1 + self.expansion_fraction) ** (1 / self.rescaled_dims)
logger.info(f'New fuzz factor: {self.fuzz}')
self.configure_constant_volume()
self.update_flow_config()
self.flow = FlowModel(config=self.flow_config, output=self.output)
self.flow.initialise()
self.populated = False
self.initialised = True
def update_poolsize_scale(self, acceptance):
"""
Update poolsize given the current acceptance.
Parameters
----------
acceptance : float
Current acceptance.
"""
logger.debug(f'Updating poolsize with acceptance: {acceptance:.3f}')
if not acceptance:
logger.warning('Acceptance is zero, using maximum scale')
self._poolsize_scale = self.max_poolsize_scale
else:
self._poolsize_scale = 1.0 / acceptance
if self._poolsize_scale > self.max_poolsize_scale:
logger.warning(
'Poolsize scaling is greater than maximum value')
self._poolsize_scale = self.max_poolsize_scale
if self._poolsize_scale < 1.:
self._poolsize_scale = 1.
def set_boundary_inversion(self):
"""
Setup boundary inversion
"""
if self.boundary_inversion:
if not self.rescale_parameters:
raise RuntimeError('Boundary inversion requires rescaling')
if (isinstance(self.boundary_inversion, list) and
not set(self.boundary_inversion).issubset(self.names)):
raise RuntimeError(
'Boundaries are not in known parameters')
elif isinstance(self.rescale_parameters, list):
if (isinstance(self.boundary_inversion, list) and
not set(self.boundary_inversion).issubset(
self.rescale_parameters)):
raise RuntimeError(
'Boundaries are not in rescaled parameters')
if not isinstance(self.boundary_inversion, list):
if isinstance(self.rescale_parameters, list):
self.boundary_inversion = self.rescale_parameters.copy()
else:
self.boundary_inversion = self.names.copy()
logger.info('Appling boundary inversion to: '
f'{self.boundary_inversion}')
if self.inversion_type not in ('split', 'duplicate'):
raise RuntimeError(
f'Unknown inversion type: {self.inversion_type}')
self.rescale_bounds = [0, 1]
self.update_bounds = True
self._edges = {n: None for n in self.boundary_inversion}
logger.info(f'Changing bounds to {self.rescale_bounds}')
else:
self.boundary_inversion = []
def add_default_reparameterisations(self):
"""Add any reparameterisations which are assumed by default"""
logger.debug('No default reparameterisations')
def get_reparameterisation(self, name):
"""Get the reparameterisation from the name"""
return get_reparameterisation(name)
def configure_reparameterisations(self, reparameterisations):
"""Configure the reparameterisations.
Parameters
----------
reparameterisations : {dict, None}
Dictionary of reparameterisations. If None, then the defaults
from :py:func`get_default_reparameterisations` are used.
"""
if reparameterisations is None:
logger.info(
'No reparameterisations provided, using default '
f'reparameterisations included in {self.__class__.__name__}'
)
_reparameterisations = {}
else:
_reparameterisations = copy.deepcopy(reparameterisations)
logger.info(f'Adding reparameterisations from: {_reparameterisations}')
self._reparameterisation = CombinedReparameterisation()
if not isinstance(_reparameterisations, dict):
raise TypeError('Reparameterisations must be a dictionary, '
f'received {type(_reparameterisations).__name__}')
for k, config in _reparameterisations.items():
if k in self.names:
logger.debug(f'Found parameter {k} in model, '
'assuming it is a parameter')
if isinstance(config, str) or config is None:
rc, default_config = self.get_reparameterisation(config)
default_config['parameters'] = k
elif isinstance(config, dict):
if config.get('reparameterisation', None) is None:
raise RuntimeError(
f'No reparameterisation found for {k}. '
'Check inputs (and their spelling :)). '
f'Current keys: {list(config.keys())}')
rc, default_config = self.get_reparameterisation(
config['reparameterisation'])
config.pop('reparameterisation')
if config.get('parameters', False):
config['parameters'] += [k]
else:
default_config['parameters'] = k
default_config.update(config)
else:
raise TypeError(
f'Unknown config type for: {k}. Expected str or dict, '
f'received instance of {type(config)}.')
else:
logger.debug(f'Assuming {k} is a reparameterisation')
try:
rc, default_config = self.get_reparameterisation(k)
default_config.update(config)
except ValueError:
raise RuntimeError(
f'{k} is not a parameter in the model or a known '
'reparameterisation')
if not default_config.get('parameters', False):
raise RuntimeError('No parameters key in the config! '
'Check reparameterisations, setting logging'
' level to DEBUG can be helpful')
if ('boundary_inversion' in default_config and
default_config['boundary_inversion']):
self.boundary_inversion = True
if isinstance(default_config['parameters'], list):
prior_bounds = {p: self.model.bounds[p]
for p in default_config['parameters']}
else:
prior_bounds = \
{default_config['parameters']:
self.model.bounds[default_config['parameters']]}
logger.info(f'Adding {rc.__name__} with config: {default_config}')
r = rc(prior_bounds=prior_bounds, **default_config)
self._reparameterisation.add_reparameterisations(r)
self.add_default_reparameterisations()
other_params = [n for n in self.names
if n not in self._reparameterisation.parameters]
if other_params:
logger.debug('Getting fallback reparameterisation')
FallbackClass, fallback_kwargs = \
self.get_reparameterisation(self.fallback_reparameterisation)
fallback_kwargs['prior_bounds'] = \
{p: self.model.bounds[p] for p in other_params}
logger.info(
f'Assuming fallback reparameterisation '
f'({FallbackClass.__name__}) for {other_params} with kwargs: '
f'{fallback_kwargs}.'
)
r = FallbackClass(parameters=other_params, **fallback_kwargs)
self._reparameterisation.add_reparameterisations(r)
if any(r._update_bounds for r in self._reparameterisation.values()):
self.update_bounds = True
else:
self.update_bounds = False
if self._reparameterisation.has_prime_prior:
self.use_x_prime_prior = True
self.x_prime_log_prior = self._reparameterisation.x_prime_log_prior
logger.debug('Using x prime prior')
else:
logger.debug('Prime prior is disabled')
if self._reparameterisation.requires_prime_prior:
raise RuntimeError(
'One or more reparameterisations require use of the x '
'prime prior but it cannot be enabled with the current '
'settings.'
)
self.rescale = self._rescale_w_reparameterisation
self.inverse_rescale = \
self._inverse_rescale_w_reparameterisation
self.names = self._reparameterisation.parameters
self.rescaled_names = self._reparameterisation.prime_parameters
self.rescale_parameters = \
[p for p in self._reparameterisation.parameters
if p not in self._reparameterisation.prime_parameters]
def set_rescaling(self):
"""
Set function and parameter names for rescaling
"""
self.names = self.model.names.copy()
self.rescaled_names = self.names.copy()
self.set_boundary_inversion()
if self.model.reparameterisations is not None:
self.configure_reparameterisations(self.model.reparameterisations)
self.reparameterisations = self.model.reparameterisations
elif (
self.reparameterisations is not None
or self.use_default_reparameterisations
):
self.configure_reparameterisations(self.reparameterisations)
elif self.rescale_parameters:
# if rescale is a list, there are the parameters to rescale
# else all parameters are rescale
if not isinstance(self.rescale_parameters, list):
self.rescale_parameters = self.names.copy()
for i, rn in enumerate(self.rescaled_names):
if rn in self.rescale_parameters:
self.rescaled_names[i] += '_prime'
self._min = {n: self.model.bounds[n][0] for n in self.model.names}
self._max = {n: self.model.bounds[n][1] for n in self.model.names}
self._rescale_factor = np.ptp(self.rescale_bounds)
self._rescale_shift = self.rescale_bounds[0]
self.rescale = self._rescale_to_bounds
self.inverse_rescale = self._inverse_rescale_to_bounds
logger.info(f'Set to rescale inputs to {self.rescale_bounds}')
if self.update_bounds:
logger.info(
'Rescaling will use min and max of current live points'
)
else:
logger.info('Rescaling will use model bounds')
logger.info(f'x space parameters: {self.names}')
logger.info(f'parameters to rescale {self.rescale_parameters}')
logger.info(f'x prime space parameters: {self.rescaled_names}')
def verify_rescaling(self):
"""
Verify the rescaling functions are invertible
"""
logger.info('Verifying rescaling functions')
x = self.model.new_point(N=1000)
for inversion in ['lower', 'upper', False, None]:
self.check_state(x)
logger.debug(f'Testing: {inversion}')
x_prime, log_J = self.rescale(x, test=inversion)
x_out, log_J_inv = self.inverse_rescale(x_prime)
if x.size == x_out.size:
for f in x.dtype.names:
if not np.allclose(x[f], x_out[f]):
raise RuntimeError(
f'Rescaling is not invertible for {f}')
if not np.allclose(log_J, -log_J_inv):
raise RuntimeError('Rescaling Jacobian is not invertible')
else:
# ratio = x_out.size // x.size
for f in x.dtype.names:
if not all([np.any(np.isclose(x[f], xo))
for xo in x_out[f]]):
raise RuntimeError(
'Duplicate samples must map to same input values. '
'Check the rescaling and inverse rescaling '
f'functions for {f}.')
for f in x.dtype.names:
if not np.allclose(x[f], x_out[f][:x.size]):
raise RuntimeError(
f'Rescaling is not invertible for {f}')
if not np.allclose(log_J, -log_J_inv):
raise RuntimeError('Rescaling Jacobian is not invertible')
logger.info('Rescaling functions are invertible')
def _rescale_w_reparameterisation(self, x, compute_radius=False, **kwargs):
x_prime = np.zeros([x.size], dtype=self.x_prime_dtype)
log_J = np.zeros(x_prime.size)
if x.size == 1:
x = np.array([x], dtype=x.dtype)
x, x_prime, log_J = self._reparameterisation.reparameterise(
x, x_prime, log_J, compute_radius=compute_radius, **kwargs)
x_prime['logP'] = x['logP']
x_prime['logL'] = x['logL']
return x_prime, log_J
def _inverse_rescale_w_reparameterisation(self, x_prime, **kwargs):
x = np.zeros([x_prime.size], dtype=self.x_dtype)
log_J = np.zeros(x.size)
x, x_prime, log_J = self._reparameterisation.inverse_reparameterise(
x, x_prime, log_J, **kwargs)
x['logP'] = x_prime['logP']
x['logL'] = x_prime['logL']
return x, log_J
def _rescale_to_bounds(self, x, compute_radius=False, test=None):
"""
Rescale the inputs to specified bounds
"""
x_prime = np.zeros([x.size], dtype=self.x_prime_dtype)
log_J = np.zeros(x_prime.size)
if x.size == 1:
x = np.array([x], dtype=x.dtype)
for n, rn in zip(self.names, self.rescaled_names):
if n not in self.model.names:
continue
if n in self.rescale_parameters:
x_prime[rn] = self._rescale_factor \
* ((x[n] - self._min[n])
/ (self._max[n] - self._min[n])) \
+ self._rescale_shift
log_J += (-np.log(self._max[n] - self._min[n])
+ np.log(self._rescale_factor))
if n in self.boundary_inversion:
if self._edges[n] is None:
logger.debug('Determining edge')
self._edges[n] = detect_edge(
x_prime[rn],
test=test,
**self.detect_edges_kwargs
)
if self._edges[n]:
logger.debug(
f'Apply inversion for {n} to '
f'{self._edges[n]} bound'
)
if self._edges[n] == 'upper':
x_prime[rn] = 1 - x_prime[rn]
if (self.inversion_type == 'duplicate' or
compute_radius):
x_inv = x_prime.copy()
x_inv[rn] *= -1
x_prime = np.concatenate([x_prime, x_inv])
x = np.concatenate([x, x])
log_J = np.concatenate([log_J, log_J])
else:
inv = np.random.choice(x_prime.size,
x_prime.size // 2,
replace=False)
x_prime[rn][inv] *= -1
else:
logger.debug(f'Not using inversion for {n}')
else:
x_prime[rn] = x[n]
x_prime['logP'] = x['logP']
x_prime['logL'] = x['logL']
return x_prime, log_J
def _inverse_rescale_to_bounds(self, x_prime):
"""
Rescale the inputs from the prime space to the physical space
using the bounds specified
"""
x = np.zeros([x_prime.size], dtype=self.x_dtype)
log_J = np.zeros(x_prime.size)
for n, rn in zip(self.names, self.rescaled_names):
if n in self.rescale_parameters:
if n in self.boundary_inversion:
inv = x_prime[rn] < 0.
x_prime[rn][~inv] = x_prime[rn][~inv]
x_prime[rn][inv] = -x_prime[rn][inv]
if self._edges[n] == 'upper':
x_prime[rn] = 1 - x_prime[rn]
x[n] = (self._max[n] - self._min[n]) \
* (x_prime[rn] - self._rescale_shift) \
/ self._rescale_factor + self._min[n]
log_J += (np.log(self._max[n] - self._min[n])
- np.log(self._rescale_factor))
else:
x[n] = x_prime[rn]
x['logP'] = x_prime['logP']
x['logL'] = x_prime['logL']
return x, log_J
def rescale(self, x, compute_radius=False, **kwargs):
"""
Rescale from the physical space to the primed physical space
Parameters
----------
x: array_like
Array of live points to rescale
compute_radius: bool (False)
Used to indicate when rescaling is being used for computing the
radius for population. This is important for rescaling that uses
inversions.
Returns
-------
array
Array of rescaled values
array
Array of log det|J|
"""
log_J = np.zeros(x.size)
return x, log_J
def inverse_rescale(self, x_prime, **kwargs):
"""
Rescale from the primed physical space to the original physical
space.
Parameters
----------
x_prime : array_like
Array of live points to rescale.
Returns
-------
array
Array of rescaled values in the data space.
array
Array of log-Jacobian determinants.
"""
log_J = np.zeros(x_prime.size)
return x_prime, log_J
def check_state(self, x):
"""
Operations that need to checked before training. These include
updating the bounds for rescaling and resetting the bounds for
inversion.
Parameters
----------
x: array_like
Array of training live points which can be used to set parameters
"""
if self.boundary_inversion:
logger.debug('Resetting inversion')
if self._reparameterisation is not None:
self._reparameterisation.reset_inversion()
else:
self._edges = {n: None for n in self.boundary_inversion}
if self.update_bounds:
logger.debug('Updating bounds')
if self._reparameterisation is not None:
self._reparameterisation.update_bounds(x)
else:
self._min = {n: np.min(x[n]) for n in self.model.names}
self._max = {n: np.max(x[n]) for n in self.model.names}
def _plot_training_data(self, output):
"""Plot the training data and compare to the results"""
z_training_data, _ = self.forward_pass(self.training_data,
rescale=True)
z_gen = np.random.randn(self.training_data.size, self.dims)
fig = plt.figure()
plt.hist(np.sqrt(np.sum(z_training_data ** 2, axis=1)), 'auto')
plt.xlabel('Radius')
fig.savefig(os.path.join(output, 'radial_dist.png'))
plt.close(fig)
plot_1d_comparison(
z_training_data,
z_gen,
labels=['z_live_points', 'z_generated'],
convert_to_live_points=True,
filename=os.path.join(output, 'z_comparison.png')
)
x_prime_gen, log_prob = self.backward_pass(z_gen, rescale=False)
x_prime_gen['logL'] = log_prob
x_gen, log_J = self.inverse_rescale(x_prime_gen)
x_gen, log_J, x_prime_gen = \
self.check_prior_bounds(x_gen, log_J, x_prime_gen)
x_gen['logL'] += log_J
plot_1d_comparison(
self.training_data,
x_gen,
parameters=self.model.names,
labels=['live points', 'generated'],
filename=os.path.join(output, 'x_comparison.png')
)
if self._plot_training == 'all':
plot_live_points(
self.training_data,
c='logL',
filename=os.path.join(output, 'x_samples.png')
)
plot_live_points(
x_gen,
c='logL',
filename=os.path.join(output, 'x_generated.png')
)
if self.rescale_parameters:
if self._plot_training == 'all':
plot_live_points(
self.training_data_prime,
c='logL',
filename=os.path.join(output, 'x_prime_samples.png')
)
plot_live_points(
x_prime_gen,
c='logL',
filename=os.path.join(output, 'x_prime_generated.png')
)
plot_1d_comparison(
self.training_data_prime,
x_prime_gen,
parameters=self.rescaled_names,
labels=['live points', 'generated'],
filename=os.path.join(output, 'x_prime_comparison.png')
)
def train(self, x, plot=True):
"""
Train the normalising flow given some of the live points.
Parameters
----------
x : structured_array
Array of live points
plot : {True, False, 'all'}
Enable or disable plots for during training. By default the plots
are only one-dimensional histograms, `'all'` includes corner
plots with samples, these are often a few MB in size so
proceed with caution!
"""
if (plot and self._plot_training) or self.save_training_data:
block_output = os.path.join(
self.output, 'training', f'block_{self.training_count}', '')
else:
block_output = self.output
if not os.path.exists(block_output):
os.makedirs(block_output, exist_ok=True)
if self.save_training_data:
save_live_points(
x, os.path.join(block_output, 'training_data.json'))
self.training_data = x.copy()
self.check_state(self.training_data)
x_prime, _ = self.rescale(x)
self.training_data_prime = x_prime.copy()
# Convert to numpy array for training and remove likelihoods and priors
# Since the names of parameters may have changes, pull names from flows
x_prime_array = live_points_to_array(x_prime, self.rescaled_names)
self.flow.train(x_prime_array,
output=block_output, plot=self._plot_training and plot)
if self._plot_training and plot:
self._plot_training_data(block_output)
self.populated = False
self.training_count += 1
def reset_model_weights(self, **kwargs):
"""
Reset the flow weights.
Parameters
----------
kwargs :
Keyword arguments passed to
:meth:`nessai.flowmodel.FlowModel.reset_model`.
"""
self.flow.reset_model(**kwargs)
def check_prior_bounds(self, x, *args):
"""
Return only values that are within the prior bounds
Parameters
----------
x: array_like
Array of live points which will compared against prior bounds
*args:
Additional arrays which correspond to the array of live points.
Only those corresponding to points within the prior bounds
are returned
Returns
-------
out: tuple of arrays
Array containing the subset of the original arrays which fall
within the prior bounds
"""
idx = np.array(list(((x[n] >= self.model.bounds[n][0])
& (x[n] <= self.model.bounds[n][1]))
for n in self.model.names)).T.all(1)
out = (a[idx] for a in (x,) + args)
return out
def forward_pass(self, x, rescale=True, compute_radius=True):
"""
Pass a vector of points through the model
Parameters
----------
x : array_like
Live points to map to the latent space
rescale : bool, optional (True)
Apply rescaling function
compute_radius : bool, optional (True)
Flag parsed to rescaling for rescaling specific to radius
computation
Returns
-------
x : array_like
Samples in the latent space
log_prob : array_like
Log probabilities corresponding to each sample (including the
jacobian)
"""
log_J = 0
if rescale:
x, log_J_rescale = self.rescale(x, compute_radius=compute_radius)
log_J += log_J_rescale
x = live_points_to_array(x, names=self.rescaled_names)
if x.ndim == 1:
x = x[np.newaxis, :]
z, log_prob = self.flow.forward_and_log_prob(x)
return z, log_prob + log_J
def backward_pass(self, z, rescale=True):
"""
A backwards pass from the model (latent -> real)
Parameters
----------
z : array_like
Structured array of points in the latent space
rescale : bool, optional (True)
Apply inverse rescaling function
Returns
-------
x : array_like
Samples in the latent space
log_prob : array_like
Log probabilities corresponding to each sample (including the
Jacobian)
"""
# Compute the log probability
try:
x, log_prob = self.flow.sample_and_log_prob(
z=z, alt_dist=self.alt_dist)
except AssertionError:
return np.array([]), np.array([])
valid = np.isfinite(log_prob)
x, log_prob = x[valid], log_prob[valid]
x = numpy_array_to_live_points(x.astype(DEFAULT_FLOAT_DTYPE),
self.rescaled_names)
# Apply rescaling in rescale=True
if rescale:
x, log_J = self.inverse_rescale(x)
# Include Jacobian for the rescaling
log_prob -= log_J
x, log_prob = self.check_prior_bounds(x, log_prob)
return x, log_prob
def radius(self, z, log_q=None):
"""
Calculate the radius of a latent point or set of latent points.
If multiple points are parsed the maximum radius is returned.
Parameters
----------
z : :obj:`np.ndarray`
Array of points in the latent space
log_q : :obj:`np.ndarray`, optional (None)
Array of corresponding probabilities. If specified
then probability of the maximum radius is also returned.
Returns
-------
tuple of arrays
Tuple of array with the maximum radius and corresponding log_q
if it was a specified input.
"""
if log_q is not None:
r = np.sqrt(np.sum(z ** 2., axis=-1))
i = np.argmax(r)
return r[i], log_q[i]
else:
return np.nanmax(np.sqrt(np.sum(z ** 2., axis=-1)))
def log_prior(self, x):
"""
Compute the prior probability using the user-defined model
Parameters
----------
x : structured_array
Array of samples
Returns
-------
array_like
Array of log prior probabilities
"""
if self._reparameterisation:
return self.model.log_prior(x) \
+ self._reparameterisation.log_prior(x)
else:
return self.model.log_prior(x)
def x_prime_log_prior(self, x):
"""
Compute the prior in the prime space
Parameters
----------
x : array
Samples in the X-prime space.
"""
raise RuntimeError('Prime prior is not implemented')
def compute_weights(self, x, log_q):
"""
Compute weights for the samples.
Computes the log weights for rejection sampling sampling such that
that the maximum log probability is zero.
Also sets the fields `logP` and `logL`. Note `logL` is set as the
proposal probability.
Parameters
----------
x : structured_arrays
Array of points
log_q : array_like
Array of log proposal probabilities.
Returns
-------
array_like
Log-weights for rejection sampling.
"""
if self.use_x_prime_prior:
x['logP'] = self.x_prime_log_prior(x)
else:
x['logP'] = self.log_prior(x)
x['logL'] = log_q
log_w = x['logP'] - log_q
log_w -= np.max(log_w)
return log_w
def rejection_sampling(self, z, worst_q=None):
"""
Perform rejection sampling.
Converts samples from the latent space and computes the corresponding
weights. Then returns samples using standard rejection sampling.
Parameters
----------
z : ndarray
Samples from the latent space
worst_q : float, optional
Lower bound on the log-probability computed using the flow that
is used to truncate new samples. Not recommended.
Returns
-------
array_like
Array of accepted latent samples.
array_like
Array of accepted samples in the X space.
"""
x, log_q = self.backward_pass(z, rescale=not self.use_x_prime_prior)
if not x.size:
return np.array([]), x
if self.truncate:
if worst_q is None:
raise RuntimeError('Cannot use truncation with worst_q')
cut = log_q >= worst_q
x = x[cut]
z = z[cut]
log_q = log_q[cut]
# rescale given priors used initially, need for priors
log_w = self.compute_weights(x, log_q)
log_u = np.log(np.random.rand(x.shape[0]))
indices = np.where(log_w >= log_u)[0]
return z[indices], x[indices]
def convert_to_samples(self, x, plot=True):
"""
Convert the array to samples ready to be used.
This removes are auxiliary parameters, (e.g. auxiliary radial
parameters) and ensures the prior is computed. These samples can
be directly used in the nested sampler.
Parameters
----------
x : array_like
Array of samples
plot : bool, optional
If true a 1d histogram for each parameter of the pool is plotted.
This includes a comparison the live points used to train the
current realisation of the flow.
Returns
-------
array
Structured array of samples.
"""
if self.use_x_prime_prior:
if self._plot_pool and plot:
plot_1d_comparison(
self.training_data_prime, x,
labels=['live points', 'pool'],
filename=(f'{self.output}/pool_prime_'
+ f'{self.populated_count}.png'))
x, _ = self.inverse_rescale(x)
x['logP'] = self.model.log_prior(x)
return rfn.repack_fields(x[self.model.names + ['logP', 'logL']])
def populate(self, worst_point, N=10000, plot=True, r=None):
"""
Populate a pool of latent points given the current worst point.
Parameters
----------
worst_point : structured_array
The current worst point used to compute the radius of the contour
in the latent space.
N : int, optional (10000)
The total number of points to populate in the pool
plot : {True, False, 'all'}
Enable or disable plots for during training. By default the plots
are only one-dimensional histograms, `'all'` includes corner
plots with samples, these are often a few MB in size so
proceed with caution!
"""
if not self.initialised:
raise RuntimeError(
'Proposal has not been initialised. '
'Try calling `initialise()` first.'
)
if r is not None:
logger.info(f'Using user inputs for radius {r}')
worst_q = None
elif self.fixed_radius:
r = self.fixed_radius
worst_q = None
else:
logger.debug(f'Populating with worst point: {worst_point}')
if self.compute_radius_with_all:
logger.debug('Using previous live points to compute radius')
worst_point = self.training_data
worst_z, worst_q = self.forward_pass(worst_point,
rescale=True,
compute_radius=True)
r, worst_q = self.radius(worst_z, worst_q)
if self.max_radius and r > self.max_radius:
r = self.max_radius
if self.min_radius and r < self.min_radius:
r = self.min_radius
logger.info(f'Populating proposal with lantent radius: {r:.5}')
self.r = r
self.alt_dist = self.get_alt_distribution()
if not self.indices:
self.x = np.empty(N, dtype=self.population_dtype)
self.x['logP'] = np.nan * np.ones(N)
self.indices = []
z_samples = np.empty([N, self.dims])
proposed = 0
accepted = 0
percent = 0.1
warn = True
while accepted < N:
z = self.draw_latent_prior(self.dims, r=self.r,
N=self.drawsize, fuzz=self.fuzz,
**self.draw_latent_kwargs)
proposed += z.shape[0]
z, x = self.rejection_sampling(z, worst_q)
if not x.size:
continue
if warn and (x.size / self.drawsize < 0.01):
logger.warning(
'Rejection sampling accepted less than 1 percent of '
f'samples! ({x.size / self.drawsize})')
warn = False
n = min(x.size, N - accepted)
self.x[accepted:(accepted+n)] = x[:n]
z_samples[accepted:(accepted+n), ...] = z[:n]
accepted += n
if accepted > percent * N:
logger.info(f'Accepted {accepted} / {N} points, '
f'acceptance: {accepted/proposed:.4}')
percent += 0.1
self.samples = self.convert_to_samples(self.x, plot=plot)
if self._plot_pool and plot:
self.plot_pool(z_samples, self.samples)
logger.debug('Evaluating log-likelihoods')
self.samples['logL'] = \
self.model.batch_evaluate_log_likelihood(self.samples)
if self.check_acceptance:
if worst_q:
self.approx_acceptance.append(self.compute_acceptance(worst_q))
logger.debug(
'Current approximate acceptance '
f'{self.approx_acceptance[-1]}'
)
self.acceptance.append(
self.compute_acceptance(worst_point['logL']))
logger.debug(f'Current acceptance {self.acceptance[-1]}')
self.indices = np.random.permutation(self.samples.size).tolist()
self.population_acceptance = self.x.size / proposed
self.populated_count += 1
self.populated = True
self._checked_population = False
logger.info(f'Proposal populated with {len(self.indices)} samples')
logger.info(
f'Overall proposal acceptance: {self.x.size / proposed:.4}')
def get_alt_distribution(self):
"""
Get a distribution for the latent prior used to draw samples.
"""
if self.latent_prior in ['uniform_nsphere', 'uniform_nball']:
return get_uniform_distribution(self.dims, self.r * self.fuzz,
device=self.flow.device)
elif self.latent_prior == 'truncated_gaussian':
if 'var' in self.draw_latent_kwargs:
return get_multivariate_normal(
self.dims, var=self.draw_latent_kwargs['var'],
device=self.flow.device)
def compute_acceptance(self, logL):
"""
Compute how many of the current pool have log-likelihoods greater
than the specified log-likelihood using the current value in the
`logL` field.
Parameters
----------
float : logL
Log-likelihood to use as the lower bound
Returns
-------
float :
Acceptance defined on [0, 1]
"""
return (self.samples['logL'] > logL).sum() / self.samples.size
def draw(self, worst_point):
"""
Draw a replacement point. The new point is independent of the worst
point. The worst point is only used during population.
Parameters
----------
worst_point : structured_array
The current worst point used to compute the radius of the contour
in the latent space.
Returns
-------
structured_array
New live point
"""
if not self.populated:
self.populating = True
if self.update_poolsize:
self.update_poolsize_scale(self.ns_acceptance)
st = datetime.datetime.now()
while not self.populated:
self.populate(worst_point, N=self.poolsize)
self.population_time += (datetime.datetime.now() - st)
self.populating = False
# new sample is drawn randomly from proposed points
# popping from right end is faster
index = self.indices.pop()
new_sample = self.samples[index]
if not self.indices:
self.populated = False
logger.debug('Proposal pool is empty')
# make live point and return
return new_sample
def plot_pool(self, z, x):
"""
Plot the pool of points.
Parameters
----------
z : array_like
Latent samples to plot
x : array_like
Corresponding samples to plot in the physical space.
"""
if self._plot_pool == 'all':
plot_live_points(
x,
c='logL',
filename=os.path.join(self.output,
f'pool_{self.populated_count}.png')
)
else:
plot_1d_comparison(
self.training_data,
x,
labels=['live points', 'pool'],
filename=os.path.join(self.output,
f'pool_{self.populated_count}.png')
)
z_tensor = torch.from_numpy(z).to(self.flow.device)
with torch.no_grad():
if self.alt_dist is not None:
log_p = self.alt_dist.log_prob(z_tensor).cpu().numpy()
else:
log_p = self.flow.model.base_distribution_log_prob(
z_tensor).cpu().numpy()
fig, axs = plt.subplots(3, 1, figsize=(3, 9))
axs = axs.ravel()
axs[0].hist(x['logL'], 20, histtype='step', label='log q')
axs[1].hist(x['logL'] - log_p, 20, histtype='step',
label='log J')
axs[2].hist(np.sqrt(np.sum(z ** 2, axis=1)), 20,
histtype='step', label='Latent radius')
axs[0].set_xlabel('Log q')
axs[1].set_xlabel('Log |J|')
axs[2].set_xlabel('r')
plt.tight_layout()
fig.savefig(
os.path.join(self.output,
f'pool_{self.populated_count}_log_q.png')
)
plt.close(fig)
def resume(self, model, flow_config, weights_file=None):
"""
Resume the proposal.
The model and config are not stored so these must be provided.
Parameters
----------
model : :obj:`nessai.model.Model`
User-defined model used.
flow_config : dict
Configuration dictionary for the flow.
weights_files : str, optional
Weights file to try and load. If not provided the proposal
tries to load the last weights file.
"""
self.model = model
self.flow_config = flow_config
self._reparameterisation = None
if self.mask is not None:
if isinstance(self.mask, list):
m = np.array(self.mask)
self.flow_config['model_config']['kwargs']['mask'] = m
self.initialise()
if weights_file is None:
weights_file = self.weights_file
# Flow might have exited before any weights were saved.
if weights_file is not None:
if os.path.exists(weights_file):
self.flow.reload_weights(weights_file)
else:
logger.warning('Could not reload weights for flow')
if self.update_bounds:
if self.training_data is not None:
self.check_state(self.training_data)
elif self.training_data is None and self.training_count:
raise RuntimeError(
'Could not resume! Missing training data!')
def test_draw(self):
"""
Test the draw method to ensure it returns a sample in the correct
format and the the log prior is computed.
This method is not used since there are cases where the untrained
flow is very slow to draw a new point.
"""
logger.debug(f'Testing {self.__class__.__name__} draw method')
test_point = self.model.new_point()
self.populate(test_point, N=1, plot=False, r=1.0)
new_point = self.draw(test_point)
if new_point['logP'] != self.model.log_prior(new_point):
raise RuntimeError('Log prior of new point is incorrect!')
self.reset()
logger.debug(f'{self.__class__.__name__} passed draw test')
def reset(self):
"""Reset the proposal"""
self.indices = []
self.samples = None
self.x = None
self.populated = False
self.populated_count = 0
self.population_acceptance = None
self._poolsize_scale = 1.0
self.r = None
self.alt_dist = None
self._checked_population = True
self.acceptance = []
self.approx_acceptance = []
self._edges = {k: None for k in self._edges.keys()}
def __getstate__(self):
state = self.__dict__.copy()
state['initialised'] = False
state['weights_file'] = \
getattr(state.get('flow'), 'weights_file', None)
# Mask may be generate via permutation, so must be saved
if 'mask' in getattr(state.get('flow'), 'model_config', {}).get(
'kwargs', []):
state['mask'] = state['flow'].model_config['kwargs']['mask']
else:
state['mask'] = None
if state['populated'] and state['indices']:
state['resume_populated'] = True
else:
state['resume_populated'] = False
# user provides model and config for resume
# flow can be reconstructed from resume
del state['_reparameterisation']
del state['model']
del state['_flow_config']
del state['flow']
return state
def __setstate__(self, state):
self.__dict__ = state
| [
"torch.no_grad",
"torch.from_numpy"
] | 1.7.0 | Rodrigo-Tenorio/nessai | 2b4175da61b3a7250d1154a126ad93481836df0d |
1.1 | #!/usr/bin/env python3
import torch
import operator
from .lazy_tensor import LazyTensor
from .non_lazy_tensor import lazify
from ..utils.broadcasting import _matmul_broadcast_shape
from ..utils.memoize import cached
from functools import reduce
def _prod(iterable):
return reduce(operator.mul, iterable, 1)
def _matmul(lazy_tensors, kp_shape, rhs):
output_shape = _matmul_broadcast_shape(kp_shape, rhs.shape)
output_batch_shape = output_shape[:-2]
res = rhs.contiguous().expand(*output_batch_shape, *rhs.shape[-2:])
num_cols = rhs.size(-1)
for lazy_tensor in lazy_tensors:
res = res.view(*output_batch_shape, lazy_tensor.size(-1), -1)
factor = lazy_tensor._matmul(res)
factor = factor.view(*output_batch_shape, lazy_tensor.size(-2), -1, num_cols).transpose(-3, -2)
res = factor.reshape(*output_batch_shape, -1, num_cols)
return res
def _t_matmul(lazy_tensors, kp_shape, rhs):
kp_t_shape = (*kp_shape[:-2], kp_shape[-1], kp_shape[-2])
output_shape = _matmul_broadcast_shape(kp_t_shape, rhs.shape)
output_batch_shape = torch.Size(output_shape[:-2])
res = rhs.contiguous().expand(*output_batch_shape, *rhs.shape[-2:])
num_cols = rhs.size(-1)
for lazy_tensor in lazy_tensors:
res = res.view(*output_batch_shape, lazy_tensor.size(-2), -1)
factor = lazy_tensor._t_matmul(res)
factor = factor.view(*output_batch_shape, lazy_tensor.size(-1), -1, num_cols).transpose(-3, -2)
res = factor.reshape(*output_batch_shape, -1, num_cols)
return res
class KroneckerProductLazyTensor(LazyTensor):
def __init__(self, *lazy_tensors):
try:
lazy_tensors = tuple(lazify(lazy_tensor) for lazy_tensor in lazy_tensors)
except TypeError:
raise RuntimeError("KroneckerProductLazyTensor is intended to wrap lazy tensors.")
for prev_lazy_tensor, curr_lazy_tensor in zip(lazy_tensors[:-1], lazy_tensors[1:]):
if prev_lazy_tensor.batch_shape != curr_lazy_tensor.batch_shape:
raise RuntimeError(
"KroneckerProductLazyTensor expects lazy tensors with the "
"same batch shapes. Got {}.".format([lv.batch_shape for lv in lazy_tensors])
)
super(KroneckerProductLazyTensor, self).__init__(*lazy_tensors)
self.lazy_tensors = lazy_tensors
def _get_indices(self, row_index, col_index, *batch_indices):
row_factor = self.size(-2)
col_factor = self.size(-1)
res = None
for lazy_tensor in self.lazy_tensors:
sub_row_size = lazy_tensor.size(-2)
sub_col_size = lazy_tensor.size(-1)
row_factor //= sub_row_size
col_factor //= sub_col_size
sub_res = lazy_tensor._get_indices(
row_index.div(row_factor).fmod(sub_row_size),
col_index.div(col_factor).fmod(sub_col_size),
*batch_indices
)
res = sub_res if res is None else (sub_res * res)
return res
def _matmul(self, rhs):
is_vec = rhs.ndimension() == 1
if is_vec:
rhs = rhs.unsqueeze(-1)
res = _matmul(self.lazy_tensors, self.shape, rhs.contiguous())
if is_vec:
res = res.squeeze(-1)
return res
def _t_matmul(self, rhs):
is_vec = rhs.ndimension() == 1
if is_vec:
rhs = rhs.unsqueeze(-1)
res = _t_matmul(self.lazy_tensors, self.shape, rhs.contiguous())
if is_vec:
res = res.squeeze(-1)
return res
def _expand_batch(self, batch_shape):
return self.__class__(*[lazy_tensor._expand_batch(batch_shape) for lazy_tensor in self.lazy_tensors])
@cached(name="size")
def _size(self):
left_size = _prod(lazy_tensor.size(-2) for lazy_tensor in self.lazy_tensors)
right_size = _prod(lazy_tensor.size(-1) for lazy_tensor in self.lazy_tensors)
return torch.Size((*self.lazy_tensors[0].batch_shape, left_size, right_size))
def _transpose_nonbatch(self):
return self.__class__(*(lazy_tensor._transpose_nonbatch() for lazy_tensor in self.lazy_tensors), **self._kwargs)
| [
"torch.Size"
] | 1.1 | idelbrid/gpytorch | 092d523027a844939ba85d7ea8c8c7b7511843d5 |
1.7 | # -*- coding: utf-8 -*-
from typing import List
from collections import Counter
import torch
from ...wrapper import TargetWrapper, Batch
from ...utils import ChunksTagsTranslator
from ...nn.utils import unpad_seqs
from ...nn.modules import CombinedDropout, CRF
from ...nn.init import reinit_layer_
from ...metrics import precision_recall_f1_report
from .base import DecoderMixinBase, SingleDecoderConfigBase, DecoderBase
class SequenceTaggingDecoderMixin(DecoderMixinBase):
@property
def scheme(self):
return self._scheme
@scheme.setter
def scheme(self, scheme: str):
self._scheme = scheme
self.translator = ChunksTagsTranslator(scheme=scheme)
@property
def idx2tag(self):
return self._idx2tag
@idx2tag.setter
def idx2tag(self, idx2tag: List[str]):
self._idx2tag = idx2tag
self.tag2idx = {t: i for i, t in enumerate(self.idx2tag)} if idx2tag is not None else None
@property
def voc_dim(self):
return len(self.tag2idx)
@property
def pad_idx(self):
return self.tag2idx['<pad>']
def exemplify(self, data_entry: dict, training: bool=True):
return {'tags_obj': Tags(data_entry, self, training=training)}
def batchify(self, batch_examples: List[dict]):
return {'tags_objs': [ex['tags_obj'] for ex in batch_examples]}
def retrieve(self, batch: Batch):
return [tags_obj.chunks for tags_obj in batch.tags_objs]
def evaluate(self, y_gold: List[List[tuple]], y_pred: List[List[tuple]]):
"""Micro-F1 for entity recognition.
References
----------
https://www.clips.uantwerpen.be/conll2000/chunking/output.html
"""
scores, ave_scores = precision_recall_f1_report(y_gold, y_pred)
return ave_scores['micro']['f1']
class Tags(TargetWrapper):
"""A wrapper of tags with underlying chunks.
Parameters
----------
data_entry: dict
{'tokens': TokenSequence,
'chunks': List[tuple]}
"""
def __init__(self, data_entry: dict, config: SequenceTaggingDecoderMixin, training: bool=True):
super().__init__(training)
self.chunks = data_entry.get('chunks', None)
if self.chunks is not None:
self.tags = config.translator.chunks2tags(data_entry['chunks'], len(data_entry['tokens']))
self.tag_ids = torch.tensor([config.tag2idx[t] for t in self.tags], dtype=torch.long)
class SequenceTaggingDecoderConfig(SingleDecoderConfigBase, SequenceTaggingDecoderMixin):
def __init__(self, **kwargs):
self.in_drop_rates = kwargs.pop('in_drop_rates', (0.5, 0.0, 0.0))
self.scheme = kwargs.pop('scheme', 'BIOES')
self.idx2tag = kwargs.pop('idx2tag', None)
self.use_crf = kwargs.pop('use_crf', True)
super().__init__(**kwargs)
@property
def name(self):
return self._name_sep.join([self.scheme, self.criterion])
def __repr__(self):
repr_attr_dict = {key: getattr(self, key) for key in ['in_dim', 'in_drop_rates', 'scheme', 'criterion']}
return self._repr_non_config_attrs(repr_attr_dict)
@property
def criterion(self):
if self.use_crf:
return "CRF"
else:
return super().criterion
def instantiate_criterion(self, **kwargs):
if self.criterion.lower().startswith('crf'):
return CRF(tag_dim=self.voc_dim, pad_idx=self.pad_idx, batch_first=True)
else:
return super().instantiate_criterion(**kwargs)
def build_vocab(self, *partitions):
counter = Counter()
for data in partitions:
for data_entry in data:
curr_tags = self.translator.chunks2tags(data_entry['chunks'], len(data_entry['tokens']))
counter.update(curr_tags)
self.idx2tag = ['<pad>'] + list(counter.keys())
def instantiate(self):
return SequenceTaggingDecoder(self)
class SequenceTaggingDecoder(DecoderBase, SequenceTaggingDecoderMixin):
def __init__(self, config: SequenceTaggingDecoderConfig):
super().__init__()
self.scheme = config.scheme
self.idx2tag = config.idx2tag
self.dropout = CombinedDropout(*config.in_drop_rates)
self.hid2logit = torch.nn.Linear(config.in_dim, config.voc_dim)
reinit_layer_(self.hid2logit, 'sigmoid')
self.criterion = config.instantiate_criterion(ignore_index=config.pad_idx, reduction='sum')
def forward(self, batch: Batch, full_hidden: torch.Tensor):
# logits: (batch, step, tag_dim)
logits = self.hid2logit(self.dropout(full_hidden))
if isinstance(self.criterion, CRF):
batch_tag_ids = torch.nn.utils.rnn.pad_sequence([tags_obj.tag_ids for tags_obj in batch.tags_objs],
batch_first=True,
padding_value=self.criterion.pad_idx)
losses = self.criterion(logits, batch_tag_ids, mask=batch.mask)
else:
losses = [self.criterion(lg[:slen], tags_obj.tag_ids) for lg, tags_obj, slen in zip(logits, batch.tags_objs, batch.seq_lens.cpu().tolist())]
# `torch.stack`: Concatenates sequence of tensors along a new dimension.
losses = torch.stack(losses, dim=0)
return losses
def decode_tags(self, batch: Batch, full_hidden: torch.Tensor):
# logits: (batch, step, tag_dim)
logits = self.hid2logit(full_hidden)
if isinstance(self.criterion, CRF):
# List of List of predicted-tag-ids
batch_tag_ids = self.criterion.decode(logits, mask=batch.mask)
else:
best_paths = logits.argmax(dim=-1)
batch_tag_ids = unpad_seqs(best_paths, batch.seq_lens)
return [[self.idx2tag[i] for i in tag_ids] for tag_ids in batch_tag_ids]
def decode(self, batch: Batch, full_hidden: torch.Tensor):
batch_tags = self.decode_tags(batch, full_hidden)
return [self.translator.tags2chunks(tags) for tags in batch_tags]
| [
"torch.nn.Linear",
"torch.nn.utils.rnn.pad_sequence",
"torch.tensor",
"torch.stack"
] | 1.7.1 | syuoni/eznlp | 9d1397d8e9630c099295712cbcffa495353a3268 |
1.7 | # -*- coding: utf-8 -*-
from typing import List, Tuple
from collections import Counter
import logging
import torch
from ...wrapper import TargetWrapper, Batch
from ...utils.chunk import detect_nested, filter_clashed_by_priority
from ...nn.modules import CombinedDropout, SoftLabelCrossEntropyLoss
from ...nn.init import reinit_embedding_, reinit_layer_
from ...metrics import precision_recall_f1_report
from ..encoder import EncoderConfig
from .base import DecoderMixinBase, SingleDecoderConfigBase, DecoderBase
logger = logging.getLogger(__name__)
class BoundarySelectionDecoderMixin(DecoderMixinBase):
@property
def idx2label(self):
return self._idx2label
@idx2label.setter
def idx2label(self, idx2label: List[str]):
self._idx2label = idx2label
self.label2idx = {l: i for i, l in enumerate(idx2label)} if idx2label is not None else None
@property
def voc_dim(self):
return len(self.label2idx)
@property
def none_idx(self):
return self.label2idx[self.none_label]
def exemplify(self, data_entry: dict, training: bool=True):
return {'boundaries_obj': Boundaries(data_entry, self, training=training)}
def batchify(self, batch_examples: List[dict]):
return {'boundaries_objs': [ex['boundaries_obj'] for ex in batch_examples]}
def retrieve(self, batch: Batch):
return [boundaries_obj.chunks for boundaries_obj in batch.boundaries_objs]
def evaluate(self, y_gold: List[List[tuple]], y_pred: List[List[tuple]]):
"""Micro-F1 for entity recognition.
References
----------
https://www.clips.uantwerpen.be/conll2000/chunking/output.html
"""
scores, ave_scores = precision_recall_f1_report(y_gold, y_pred)
return ave_scores['micro']['f1']
def _spans_from_surrounding(span: Tuple[int], distance: int, num_tokens: int):
"""Spans from the surrounding area of the given `span`.
"""
for k in range(distance):
for start_offset, end_offset in [(-k, -distance+k),
(-distance+k, k),
(k, distance-k),
(distance-k, -k)]:
start, end = span[0]+start_offset, span[1]+end_offset
if 0 <= start < end <= num_tokens:
yield (start, end)
def _spans_from_upper_triangular(seq_len: int):
"""Spans from the upper triangular area.
"""
for start in range(seq_len):
for end in range(start+1, seq_len+1):
yield (start, end)
class Boundaries(TargetWrapper):
"""A wrapper of boundaries with underlying chunks.
Parameters
----------
data_entry: dict
{'tokens': TokenSequence,
'chunks': List[tuple]}
"""
def __init__(self, data_entry: dict, config: BoundarySelectionDecoderMixin, training: bool=True):
super().__init__(training)
self.chunks = data_entry.get('chunks', None)
num_tokens = len(data_entry['tokens'])
if training and config.neg_sampling_rate < 1:
non_mask = (torch.arange(num_tokens) - torch.arange(num_tokens).unsqueeze(-1) >= 0)
pos_non_mask = torch.zeros_like(non_mask)
for label, start, end in self.chunks:
pos_non_mask[start, end-1] = True
neg_sampled = torch.empty_like(non_mask).bernoulli(p=config.neg_sampling_rate)
if config.hard_neg_sampling_rate > config.neg_sampling_rate:
hard_neg_non_mask = torch.zeros_like(non_mask)
for label, start, end in self.chunks:
for dist in range(1, config.hard_neg_sampling_size+1):
for sur_start, sur_end in _spans_from_surrounding((start, end), dist, num_tokens):
hard_neg_non_mask[sur_start, sur_end-1] = True
if config.hard_neg_sampling_rate < 1:
# Solve: 1 - (1 - p_{neg})(1 - p_{comp}) = p_{hard}
# Get: p_{comp} = (p_{hard} - p_{neg}) / (1 - p_{neg})
comp_sampling_rate = (config.hard_neg_sampling_rate - config.neg_sampling_rate) / (1 - config.neg_sampling_rate)
comp_sampled = torch.empty_like(non_mask).bernoulli(p=comp_sampling_rate)
neg_sampled = neg_sampled | (comp_sampled & hard_neg_non_mask)
else:
neg_sampled = neg_sampled | hard_neg_non_mask
self.non_mask = pos_non_mask | (neg_sampled & non_mask)
if self.chunks is not None:
if config.sb_epsilon <= 0 and config.sl_epsilon <= 0:
# Cross entropy loss
self.boundary2label_id = torch.full((num_tokens, num_tokens), config.none_idx, dtype=torch.long)
for label, start, end in self.chunks:
self.boundary2label_id[start, end-1] = config.label2idx[label]
else:
# Soft label loss for either boundary or label smoothing
self.boundary2label_id = torch.zeros(num_tokens, num_tokens, config.voc_dim, dtype=torch.float)
for label, start, end in self.chunks:
label_id = config.label2idx[label]
self.boundary2label_id[start, end-1, label_id] += (1 - config.sb_epsilon)
for dist in range(1, config.sb_size+1):
eps_per_span = config.sb_epsilon / (config.sb_size * dist * 4)
sur_spans = list(_spans_from_surrounding((start, end), dist, num_tokens))
for sur_start, sur_end in sur_spans:
self.boundary2label_id[sur_start, sur_end-1, label_id] += (eps_per_span*config.sb_adj_factor)
# Absorb the probabilities assigned to illegal positions
self.boundary2label_id[start, end-1, label_id] += eps_per_span * (dist * 4 - len(sur_spans))
# In very rare cases (e.g., ACE 2005), multiple entities may have the same span but different types
overflow_indic = (self.boundary2label_id.sum(dim=-1) > 1)
if overflow_indic.any().item():
self.boundary2label_id[overflow_indic] = torch.nn.functional.normalize(self.boundary2label_id[overflow_indic], p=1, dim=-1)
self.boundary2label_id[:, :, config.none_idx] = 1 - self.boundary2label_id.sum(dim=-1)
if config.sl_epsilon > 0:
# Do not smooth to `<none>` label
pos_indic = (torch.arange(config.voc_dim) != config.none_idx)
self.boundary2label_id[:, :, pos_indic] = (self.boundary2label_id[:, :, pos_indic] * (1-config.sl_epsilon) +
self.boundary2label_id[:, :, pos_indic].sum(dim=-1, keepdim=True)*config.sl_epsilon / (config.voc_dim-1))
class BoundarySelectionDecoderConfig(SingleDecoderConfigBase, BoundarySelectionDecoderMixin):
def __init__(self, **kwargs):
self.use_biaffine = kwargs.pop('use_biaffine', True)
self.affine = kwargs.pop('affine', EncoderConfig(arch='FFN', hid_dim=150, num_layers=1, in_drop_rates=(0.4, 0.0, 0.0), hid_drop_rate=0.2))
self.max_len = kwargs.pop('max_len', None)
self.max_span_size = kwargs.pop('max_span_size', 50)
self.size_emb_dim = kwargs.pop('size_emb_dim', 25)
self.hid_drop_rates = kwargs.pop('hid_drop_rates', (0.2, 0.0, 0.0))
self.neg_sampling_rate = kwargs.pop('neg_sampling_rate', 1.0)
self.hard_neg_sampling_rate = kwargs.pop('hard_neg_sampling_rate', 1.0)
self.hard_neg_sampling_rate = max(self.hard_neg_sampling_rate, self.neg_sampling_rate)
self.hard_neg_sampling_size = kwargs.pop('hard_neg_sampling_size', 5)
self.none_label = kwargs.pop('none_label', '<none>')
self.idx2label = kwargs.pop('idx2label', None)
# Note: non-nested overlapping chunks are never allowed
self.allow_nested = kwargs.pop('allow_nested', None)
# Boundary smoothing epsilon
self.sb_epsilon = kwargs.pop('sb_epsilon', 0.0)
self.sb_size = kwargs.pop('sb_size', 1)
self.sb_adj_factor = kwargs.pop('sb_adj_factor', 1.0)
super().__init__(**kwargs)
@property
def name(self):
return self._name_sep.join([self.affine.arch, self.criterion])
def __repr__(self):
repr_attr_dict = {key: getattr(self, key) for key in ['in_dim', 'hid_drop_rates', 'criterion']}
return self._repr_non_config_attrs(repr_attr_dict)
@property
def in_dim(self):
return self.affine.in_dim
@in_dim.setter
def in_dim(self, dim: int):
self.affine.in_dim = dim
@property
def criterion(self):
if self.sb_epsilon > 0:
return f"SB({self.sb_epsilon:.2f}, {self.sb_size})"
else:
return super().criterion
def instantiate_criterion(self, **kwargs):
if self.criterion.lower().startswith(('sb', 'sl')):
# For boundary/label smoothing, the `Boundaries` object has been accordingly changed;
# hence, do not use `SmoothLabelCrossEntropyLoss`
return SoftLabelCrossEntropyLoss(**kwargs)
else:
return super().instantiate_criterion(**kwargs)
def build_vocab(self, *partitions):
counter = Counter(label for data in partitions for entry in data for label, start, end in entry['chunks'])
self.idx2label = [self.none_label] + list(counter.keys())
self.allow_nested = any(detect_nested(entry['chunks']) for data in partitions for entry in data)
if self.allow_nested:
logger.info("Nested chunks detected, nested chunks are allowed in decoding...")
else:
logger.info("No nested chunks detected, only flat chunks are allowed in decoding...")
self.max_len = max(len(data_entry['tokens']) for data in partitions for data_entry in data)
def instantiate(self):
return BoundarySelectionDecoder(self)
class BoundarySelectionDecoder(DecoderBase, BoundarySelectionDecoderMixin):
def __init__(self, config: BoundarySelectionDecoderConfig):
super().__init__()
self.none_label = config.none_label
self.idx2label = config.idx2label
self.allow_nested = config.allow_nested
if config.use_biaffine:
self.affine_start = config.affine.instantiate()
self.affine_end = config.affine.instantiate()
else:
self.affine = config.affine.instantiate()
if config.size_emb_dim > 0:
self.size_embedding = torch.nn.Embedding(config.max_span_size, config.size_emb_dim)
reinit_embedding_(self.size_embedding)
# Use buffer to accelerate computation
# Note: size_id = size - 1
self.register_buffer('_span_size_ids', torch.arange(config.max_len) - torch.arange(config.max_len).unsqueeze(-1))
# Create `_span_non_mask` before changing values of `_span_size_ids`
self.register_buffer('_span_non_mask', self._span_size_ids >= 0)
self._span_size_ids.masked_fill_(self._span_size_ids < 0, 0)
self._span_size_ids.masked_fill_(self._span_size_ids >= config.max_span_size, config.max_span_size-1)
self.dropout = CombinedDropout(*config.hid_drop_rates)
self.U = torch.nn.Parameter(torch.empty(config.voc_dim, config.affine.out_dim, config.affine.out_dim))
self.W = torch.nn.Parameter(torch.empty(config.voc_dim, config.affine.out_dim*2 + config.size_emb_dim))
self.b = torch.nn.Parameter(torch.empty(config.voc_dim))
torch.nn.init.orthogonal_(self.U.data)
torch.nn.init.orthogonal_(self.W.data)
torch.nn.init.zeros_(self.b.data)
self.criterion = config.instantiate_criterion(reduction='sum')
def _get_span_size_ids(self, seq_len: int):
return self._span_size_ids[:seq_len, :seq_len]
def _get_span_non_mask(self, seq_len: int):
return self._span_non_mask[:seq_len, :seq_len]
def compute_scores(self, batch: Batch, full_hidden: torch.Tensor):
if hasattr(self, 'affine_start'):
affined_start = self.affine_start(full_hidden, batch.mask)
affined_end = self.affine_end(full_hidden, batch.mask)
else:
affined_start = self.affine(full_hidden, batch.mask)
affined_end = self.affine(full_hidden, batch.mask)
# affined_start: (batch, start_step, affine_dim) -> (batch, 1, start_step, affine_dim)
# affined_end: (batch, end_step, affine_dim) -> (batch, 1, affine_dim, end_step)
# scores1: (batch, 1, start_step, affine_dim) * (voc_dim, affine_dim, affine_dim) * (batch, 1, affine_dim, end_step) -> (batch, voc_dim, start_step, end_step)
scores1 = self.dropout(affined_start).unsqueeze(1).matmul(self.U).matmul(self.dropout(affined_end).permute(0, 2, 1).unsqueeze(1))
# affined_cat: (batch, start_step, end_step, affine_dim*2)
affined_cat = torch.cat([self.dropout(affined_start).unsqueeze(2).expand(-1, -1, affined_end.size(1), -1),
self.dropout(affined_end).unsqueeze(1).expand(-1, affined_start.size(1), -1, -1)], dim=-1)
if hasattr(self, 'size_embedding'):
# size_embedded: (start_step, end_step, emb_dim)
size_embedded = self.size_embedding(self._get_span_size_ids(full_hidden.size(1)))
# affined_cat: (batch, start_step, end_step, affine_dim*2 + emb_dim)
affined_cat = torch.cat([affined_cat, self.dropout(size_embedded).unsqueeze(0).expand(full_hidden.size(0), -1, -1, -1)], dim=-1)
# scores2: (voc_dim, affine_dim*2 + emb_dim) * (batch, start_step, end_step, affine_dim*2 + emb_dim, 1) -> (batch, start_step, end_step, voc_dim, 1)
scores2 = self.W.matmul(affined_cat.unsqueeze(-1))
# scores: (batch, start_step, end_step, voc_dim)
return scores1.permute(0, 2, 3, 1) + scores2.squeeze(-1) + self.b
def forward(self, batch: Batch, full_hidden: torch.Tensor):
batch_scores = self.compute_scores(batch, full_hidden)
losses = []
for curr_scores, boundaries_obj, curr_len in zip(batch_scores, batch.boundaries_objs, batch.seq_lens.cpu().tolist()):
curr_non_mask = getattr(boundaries_obj, 'non_mask', self._get_span_non_mask(curr_len))
loss = self.criterion(curr_scores[:curr_len, :curr_len][curr_non_mask], boundaries_obj.boundary2label_id[curr_non_mask])
losses.append(loss)
return torch.stack(losses)
def decode(self, batch: Batch, full_hidden: torch.Tensor):
batch_scores = self.compute_scores(batch, full_hidden)
batch_chunks = []
for curr_scores, curr_len in zip(batch_scores, batch.seq_lens.cpu().tolist()):
curr_non_mask = self._get_span_non_mask(curr_len)
confidences, label_ids = curr_scores[:curr_len, :curr_len][curr_non_mask].softmax(dim=-1).max(dim=-1)
labels = [self.idx2label[i] for i in label_ids.cpu().tolist()]
chunks = [(label, start, end) for label, (start, end) in zip(labels, _spans_from_upper_triangular(curr_len)) if label != self.none_label]
confidences = [conf for label, conf in zip(labels, confidences.cpu().tolist()) if label != self.none_label]
assert len(confidences) == len(chunks)
# Sort chunks from high to low confidences
chunks = [ck for _, ck in sorted(zip(confidences, chunks), reverse=True)]
chunks = filter_clashed_by_priority(chunks, allow_nested=self.allow_nested)
batch_chunks.append(chunks)
return batch_chunks
| [
"torch.zeros",
"torch.nn.functional.normalize",
"torch.stack",
"torch.arange",
"torch.full",
"torch.zeros_like",
"torch.nn.init.orthogonal_",
"torch.nn.init.zeros_",
"torch.empty",
"torch.nn.Embedding",
"torch.empty_like"
] | 1.7.1 | syuoni/eznlp | 9d1397d8e9630c099295712cbcffa495353a3268 |
1.0 | """
This file defines class DecisionModule.
@author: Clemens Rosenbaum :: [email protected]
@created: 6/7/18
"""
import abc
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.distribution import Distribution
from .PolicyStorage import ApproxPolicyStorage, TabularPolicyStorage
from PytorchRouting.RewardFunctions.PerAction.PerActionBaseReward import PerActionBaseReward
class Decision(nn.Module, metaclass=abc.ABCMeta):
"""
Class DecisionModule defines the base class for all decision modules.
"""
def __init__(
self,
num_selections,
in_features,
num_agents=1,
exploration=0.1,
policy_storage_type='approx',
detach=True,
approx_hidden_dims=(),
policy_net=None,
additional_reward_func=PerActionBaseReward(),
set_pg_temp=False,
**kwargs
):
nn.Module.__init__(self)
self._in_features = in_features
self._num_selections = num_selections
self._num_agents = num_agents
self._exploration = exploration
self._detach = detach
self._pol_type = policy_storage_type
self._pol_hidden_dims = approx_hidden_dims
self._policy = self._construct_policy_storage(
self._num_selections, self._pol_type, policy_net, self._pol_hidden_dims)
self.additional_reward_func = additional_reward_func
self._dist_dim = 1
self._set_pg_temp = set_pg_temp
self._pg_temperature = 1.
def set_exploration(self, exploration):
self._exploration = exploration
@abc.abstractmethod
def _forward(self, xs, prior_action):
return torch.zeros(1, 1), [], torch.zeros(1, 1)
@staticmethod
def _eval_stochastic_are_exp(actions, dist):
if len(dist.shape) == 3:
dist = dist[:, :, 0]
return (torch.max(dist, dim=1)[1].view(-1) == actions.view(-1)).byte()
@abc.abstractmethod
def _forward(self, xs, prior_action):
return torch.zeros(1, 1), [], torch.zeros(1, 1)
@staticmethod
def _loss(self, is_terminal, state, next_state, action, next_action, reward, cum_return, final_reward):
pass
def _construct_policy_storage(self, out_dim, policy_storage_type, approx_module, approx_hidden_dims, in_dim=None):
in_dim = in_dim or self._in_features
if approx_module is not None:
policy = nn.ModuleList(
[ApproxPolicyStorage(approx=copy.deepcopy(approx_module), detach=self._detach)
for _ in range(self._num_agents)]
)
elif policy_storage_type in ('approx', 0):
policy = nn.ModuleList(
[ApproxPolicyStorage(
in_features=in_dim,
num_selections=out_dim,
hidden_dims=approx_hidden_dims,
detach=self._detach)
for _ in range(self._num_agents)]
)
elif policy_storage_type in ('tabular', 1):
policy = nn.ModuleList(
[TabularPolicyStorage(num_selections=out_dim)
for _ in range(self._num_agents)]
)
else:
raise ValueError(f'Policy storage type {policy_storage_type} not understood.')
return policy
def forward(self, xs, mxs, prior_actions=None, mask=None, update_target=None):
"""
The forward method of DecisionModule takes a batch of inputs, and a list of metainformation, and
append the decision made to the metainformation objects.
:param xs:
:param mxs:
:param prior_actions: prior actions that select the agent
:param mask: a torch.ByteTensor that determines if the trajectory is active. if it is not, no action
will be executed
:param update_target: (only relevant for GumbelSoftmax) if specified, this will include the gradientflow
in update_target, and will thus return update_target
:return: xs OR update_target, if specified, with potentially an attached backward object
"""
# input checking
assert len(xs) == len(mxs)
batch_size = xs.size(0)
assert self._num_agents == 1 or prior_actions is not None, \
'Decision makers with more than one action have to have prior_actions provided.'
assert mask is None or mask.max() == 1, \
'Please check that a batch being passed in has at least one active (non terminated) trajectory.'
# computing the termination mask and the prior actions if not passed in
mask = torch.ones(batch_size, dtype=torch.uint8, device=xs.device) \
if mask is None else mask
prior_actions = torch.zeros(batch_size, dtype=torch.long, device=xs.device) \
if prior_actions is None or len(prior_actions) == 0 else prior_actions.reshape(-1)
ys = xs.clone() if update_target is None else update_target.clone() # required as in-place ops follow
# initializing the return vars
actions = torch.zeros(batch_size, dtype=torch.long, device=xs.device)
are_exp = torch.zeros(batch_size, dtype=torch.uint8, device=xs.device)
dists = torch.zeros((batch_size, self._num_selections, 5), device=xs.device)
# "clustering" by agent
for i in torch.arange(0, prior_actions.max() + 1, device=xs.device):
if i not in prior_actions:
continue
# computing the mask as the currently computed agent on the active trajectories
m = ((prior_actions == i) * mask)
if not any(m):
continue
# selecting the actions
y, a, e, d = self._forward(xs[m], i)
# merging the results
ys[m], actions[m], are_exp[m], dists[m, :, :d.size(-1)] = \
y, a.view(-1), e.view(-1), d.view(d.size(0), d.size(1), -1)
actions = actions.view(-1) # flattens the actions tensor, but does not produce a scalar
assert len(actions) == len(are_exp) == dists.size(0) == len(mxs)
# amending the metas
for ia, a, e, d, mx in zip(mask, actions, are_exp, dists.split(1, dim=0), mxs):
if ia:
mx.append('actions', a, new_step=True)
mx.append('is_exploratory', e.squeeze())
mx.append('states', d)
mx.append('loss_funcs', self._loss)
mx.append('reward_func', self.additional_reward_func)
self.additional_reward_func.register(d, a)
return ys, mxs, actions
| [
"torch.zeros",
"torch.max",
"torch.nn.Module.__init__",
"torch.ones"
] | 1.0 | cle-ros/RoutingNetworks | 0f1fe1221c67a224a02bca6247d3c4488ede0a04 |
0.4 | import torch
import itertools
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
from torch.autograd import Variable
import numpy as np
from .modules import loss
from util.util import gaussian
class CycleGANSemanticModel(BaseModel):
#def name(self):
# return 'CycleGANModel'
# new, copied from cyclegan model
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
A (source domain), B (target domain).
Generators: G_A: A -> B; G_B: B -> A.
Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
Dropout is not used in the original CycleGAN paper.
"""
parser.set_defaults(no_dropout=False) # default CycleGAN did not use dropout, beniz: we do
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
parser.add_argument('--rec_noise', type=float, default=0.0, help='whether to add noise to reconstruction')
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A',
'D_B', 'G_B', 'cycle_B', 'idt_B',
'sem_AB', 'sem_BA', 'CLS']
# specify the images you want to save/display. The program will call base_model.get_current_visuals
visual_names_A = ['real_A', 'fake_B', 'rec_A']
visual_names_B = ['real_B', 'fake_A', 'rec_B']
if self.isTrain and self.opt.lambda_identity > 0.0:
visual_names_A.append('idt_B')
visual_names_B.append('idt_A') # beniz: inverted for original
self.visual_names = visual_names_A + visual_names_B
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B', 'CLS']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B']
# load/define networks
# The naming conversion is different from those used in the paper
# Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc,
opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.G_spectral, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc,
opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.G_spectral, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
#use_sigmoid = opt.no_lsgan
self.netD_A = networks.define_D(opt.output_nc, opt.ndf,
opt.netD,
opt.n_layers_D, opt.norm, opt.D_dropout, opt.D_spectral, #use_sigmoid,
opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf,
opt.netD,
opt.n_layers_D, opt.norm, opt.D_dropout, opt.D_spectral, #use_sigmoid,
opt.init_type, opt.init_gain, self.gpu_ids)
self.netCLS = networks.define_C(opt.output_nc, opt.ndf,opt.crop_size,
init_type=opt.init_type, init_gain=opt.init_gain,
gpu_ids=self.gpu_ids, nclasses=opt.semantic_nclasses)
if self.isTrain:
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
assert(opt.input_nc == opt.output_nc)
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
# define loss functions
self.criterionGAN = loss.GANLoss(opt.gan_mode).to(self.device)
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
self.criterionCLS = torch.nn.modules.CrossEntropyLoss()
# initialize optimizers
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_CLS = torch.optim.Adam(self.netCLS.parameters(), lr=1e-3, betas=(opt.beta1, 0.999))
self.optimizers = []
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
#beniz: not adding optimizers CLS (?)
self.rec_noise = opt.rec_noise
def set_input(self, input):
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
#print(input['B'])
if 'A_label' in input:# and 'B_label' in input:
#self.input_A_label = input['A_label' if AtoB else 'B_label'].to(self.device)
self.input_A_label = input['A_label'].to(self.device)
#self.input_B_label = input['B_label' if AtoB else 'A_label'].to(self.device) # beniz: unused
#self.image_paths = input['B_paths'] # Hack!! forcing the labels to corresopnd to B domain
def forward(self):
self.fake_B = self.netG_A(self.real_A)
if self.rec_noise > 0.0:
self.fake_B_noisy1 = gaussian(self.fake_B, self.rec_noise)
self.rec_A= self.netG_B(self.fake_B_noisy1)
else:
self.rec_A = self.netG_B(self.fake_B)
self.fake_A = self.netG_B(self.real_B)
if self.rec_noise > 0.0:
self.fake_A_noisy1 = gaussian(self.fake_A, self.rec_noise)
self.rec_B = self.netG_A(self.fake_A_noisy1)
else:
self.rec_B = self.netG_A(self.fake_A)
if self.isTrain:
# Forward all four images through classifier
# Keep predictions from fake images only
#print('real_A shape=',self.real_A.shape)
#print('real_A=',self.real_A)
self.pred_real_A = self.netCLS(self.real_A)
_,self.gt_pred_A = self.pred_real_A.max(1)
pred_real_B = self.netCLS(self.real_B)
_,self.gt_pred_B = pred_real_B.max(1)
self.pred_fake_A = self.netCLS(self.fake_A)
self.pred_fake_B = self.netCLS(self.fake_B)
_,self.pfB = self.pred_fake_B.max(1) #beniz: unused ?
def backward_D_basic(self, netD, real, fake):
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss
loss_D = (loss_D_real + loss_D_fake) * 0.5
# backward
loss_D.backward()
return loss_D
def backward_CLS(self):
label_A = self.input_A_label
# forward only real source image through semantic classifier
pred_A = self.netCLS(self.real_A)
self.loss_CLS = self.criterionCLS(pred_A, label_A)
self.loss_CLS.backward()
def backward_D_A(self):
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed.
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed.
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True) # removed the factor 2...
# GAN loss D_B(G_B(B))
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
# Forward cycle loss
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Backward cycle loss
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# combined loss standard cyclegan
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
# semantic loss AB
#print('fake_B=',self.pred_fake_B)
#print('input_A_label=',self.input_A_label)
#print(self.pred_fake_B.shape,self.input_A_label.shape)
self.loss_sem_AB = self.criterionCLS(self.pred_fake_B, self.input_A_label)
#self.loss_sem_AB = self.criterionCLS(self.pred_fake_B, self.gt_pred_A)
# semantic loss BA
self.loss_sem_BA = self.criterionCLS(self.pred_fake_A, self.gt_pred_B)
#self.loss_sem_BA = 0
#self.loss_sem_BA = self.criterionCLS(self.pred_fake_A, self.pfB) # beniz
# only use semantic loss when classifier has reasonably low loss
#if True:
if not hasattr(self, 'loss_CLS') or self.loss_CLS.detach().item() > 1.0:
self.loss_sem_AB = 0 * self.loss_sem_AB
self.loss_sem_BA = 0 * self.loss_sem_BA
self.loss_G += self.loss_sem_BA + self.loss_sem_AB
self.loss_G.backward()
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # compute fake images and reconstruction images.
# G_A and G_B
self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
self.set_requires_grad([self.netG_A, self.netG_B], True)
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
self.backward_G() # calculate gradients for G_A and G_B
self.optimizer_G.step() # update G_A and G_B's weights
# D_A and D_B
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.backward_D_A() # calculate gradients for D_A
self.backward_D_B() # calculate graidents for D_B
self.optimizer_D.step() # update D_A and D_B's weights
# CLS
self.set_requires_grad([self.netD_A, self.netD_B], False)
self.set_requires_grad([self.netCLS], True)
self.optimizer_CLS.zero_grad()
self.backward_CLS()
self.optimizer_CLS.step()
| [
"torch.nn.modules.CrossEntropyLoss",
"torch.nn.L1Loss"
] | 0.4.1 | jolibrain/pytorch-CycleGAN-and-pix2pix | 43465d660d445e020067979fa8d592a1b480c869 |
1.8 | from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import polyak_update
from stable_baselines3.sac.policies import SACPolicy
class SAC(OffPolicyAlgorithm):
"""
Soft Actor-Critic (SAC)
Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo
(https://github.com/rail-berkeley/softlearning/)
and from Stable Baselines (https://github.com/hill-a/stable-baselines)
Paper: https://arxiv.org/abs/1801.01290
Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html
Note: we use double q target and not value target as discussed
in https://github.com/hill-a/stable-baselines/issues/270
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param ent_coef: Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.
Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
:param target_update_interval: update the target network every ``target_network_update_freq``
gradient steps.
:param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling
during the warm up phase (before learning starts)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[SACPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
buffer_size: int = 1_000_000, # 1e6
learning_starts: int = 100,
batch_size: int = 256,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = 1,
gradient_steps: int = 1,
action_noise: Optional[ActionNoise] = None,
replay_buffer_class: Optional[ReplayBuffer] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
ent_coef: Union[str, float] = "auto",
target_update_interval: int = 1,
target_entropy: Union[str, float] = "auto",
use_sde: bool = False,
sde_sample_freq: int = -1,
use_sde_at_warmup: bool = False,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(SAC, self).__init__(
policy,
env,
SACPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
action_noise,
replay_buffer_class=replay_buffer_class,
replay_buffer_kwargs=replay_buffer_kwargs,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
use_sde_at_warmup=use_sde_at_warmup,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(gym.spaces.Box),
)
self.target_entropy = target_entropy
self.log_ent_coef = None # type: Optional[th.Tensor]
# Entropy coefficient / Entropy temperature
# Inverse of the reward scale
self.ent_coef = ent_coef
self.target_update_interval = target_update_interval
self.ent_coef_optimizer = None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(SAC, self)._setup_model()
self._create_aliases()
# Target entropy is used when learning the entropy coefficient
if self.target_entropy == "auto":
# automatically set target entropy if needed
self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32)
else:
# Force conversion
# this will also throw an error for unexpected string
self.target_entropy = float(self.target_entropy)
# The entropy coefficient or entropy can be learned automatically
# see Automating Entropy Adjustment for Maximum Entropy RL section
# of https://arxiv.org/abs/1812.05905
if isinstance(self.ent_coef, str) and self.ent_coef.startswith("auto"):
# Default initial value of ent_coef when learned
init_value = 1.0
if "_" in self.ent_coef:
init_value = float(self.ent_coef.split("_")[1])
assert init_value > 0.0, "The initial value of ent_coef must be greater than 0"
# Note: we optimize the log of the entropy coeff which is slightly different from the paper
# as discussed in https://github.com/rail-berkeley/softlearning/issues/37
self.log_ent_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True)
self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef], lr=self.lr_schedule(1))
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(self.device)
def _create_aliases(self) -> None:
self.actor = self.policy.actor
self.critic = self.policy.critic
self.critic_target = self.policy.critic_target
def train(self, gradient_steps: int, batch_size: int = 64) -> None:
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update optimizers learning rate
optimizers = [self.actor.optimizer, self.critic.optimizer]
if self.ent_coef_optimizer is not None:
optimizers += [self.ent_coef_optimizer]
# Update learning rate according to lr schedule
self._update_learning_rate(optimizers)
ent_coef_losses, ent_coefs = [], []
actor_losses, critic_losses = [], []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
# We need to sample because `log_std` may have changed between two gradient steps
if self.use_sde:
self.actor.reset_noise()
# Action by the current actor for the sampled state
actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations)
log_prob = log_prob.reshape(-1, 1)
ent_coef_loss = None
if self.ent_coef_optimizer is not None:
# Important: detach the variable from the graph
# so we don't change it with other losses
# see https://github.com/rail-berkeley/softlearning/issues/60
ent_coef = th.exp(self.log_ent_coef.detach())
ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean()
ent_coef_losses.append(ent_coef_loss.item())
else:
ent_coef = self.ent_coef_tensor
ent_coefs.append(ent_coef.item())
# Optimize entropy coefficient, also called
# entropy temperature or alpha in the paper
if ent_coef_loss is not None:
self.ent_coef_optimizer.zero_grad()
ent_coef_loss.backward()
self.ent_coef_optimizer.step()
with th.no_grad():
# Select action according to policy
next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations)
# Compute the next Q values: min over all critics targets
next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1)
next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)
# add entropy term
next_q_values = next_q_values - ent_coef * next_log_prob.reshape(-1, 1)
# td error + entropy term
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
# Get current Q-values estimates for each critic network
# using action from the replay buffer
current_q_values = self.critic(replay_data.observations, replay_data.actions)
# Compute critic loss
critic_loss = 0.5 * sum([F.mse_loss(current_q, target_q_values) for current_q in current_q_values])
critic_losses.append(critic_loss.item())
# Optimize the critic
self.critic.optimizer.zero_grad()
critic_loss.backward()
self.critic.optimizer.step()
# Compute actor loss
# Alternative: actor_loss = th.mean(log_prob - qf1_pi)
# Mean over all critic networks
q_values_pi = th.cat(self.critic.forward(replay_data.observations, actions_pi), dim=1)
min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True)
actor_loss = (ent_coef * log_prob - min_qf_pi).mean()
actor_losses.append(actor_loss.item())
# Optimize the actor
self.actor.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
# Update target networks
if gradient_step % self.target_update_interval == 0:
polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)
self._n_updates += gradient_steps
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/ent_coef", np.mean(ent_coefs))
self.logger.record("train/actor_loss", np.mean(actor_losses))
self.logger.record("train/critic_loss", np.mean(critic_losses))
if len(ent_coef_losses) > 0:
self.logger.record("train/ent_coef_loss", np.mean(ent_coef_losses))
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "SAC",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
render: Optional[bool] = False,
) -> OffPolicyAlgorithm:
return super(SAC, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
render=render,
)
def _excluded_save_params(self) -> List[str]:
return super(SAC, self)._excluded_save_params() + ["actor", "critic", "critic_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
if self.ent_coef_optimizer is not None:
saved_pytorch_variables = ["log_ent_coef"]
state_dicts.append("ent_coef_optimizer")
else:
saved_pytorch_variables = ["ent_coef_tensor"]
return state_dicts, saved_pytorch_variables
| [
"torch.min",
"torch.no_grad",
"torch.ones",
"torch.nn.functional.mse_loss"
] | 1.8.1 | shivamvats/stable-baselines3 | d67a3bc800389212f94f274c4cf6036c78923105 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import torch
from tests.helpers.testers import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES
Input = namedtuple('InputMultiple', ["indexes", "preds", "target"])
# correct
_input_retrieval_scores = Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_retrieval_scores_extra = Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
)
_input_retrieval_scores_non_binary_target = Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(high=4, size=(NUM_BATCHES, BATCH_SIZE)),
)
# with errors
_input_retrieval_scores_no_target = Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(high=1, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_retrieval_scores_all_target = Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(low=1, high=2, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_retrieval_scores_empty = Input(
indexes=torch.randint(high=10, size=[0]),
preds=torch.rand(0),
target=torch.randint(high=2, size=[0]),
)
_input_retrieval_scores_mismatching_sizes = Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE - 2)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_retrieval_scores_mismatching_sizes_func = Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE - 2),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_retrieval_scores_wrong_targets = Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(low=-2**31, high=2**31, size=(NUM_BATCHES, BATCH_SIZE)),
)
| [
"torch.rand",
"torch.randint"
] | 1.3.1 | BeyondTheProof/metrics | 8af688daff819a95f4cb3d757ffc919c86072ee9 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor, tensor
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
def retrieval_precision(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor:
"""
Computes the precision metric (for information retrieval),
as explained `here <https://en.wikipedia.org/wiki/Precision_and_recall#Precision>`__.
Precision is the fraction of relevant documents among all the retrieved documents.
``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,
``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`,
otherwise an error is raised. If you want to measure Precision@K, ``k`` must be a positive integer.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document being relevant or not.
k: consider only the top k elements (default: None)
Returns:
a single-value tensor with the precision (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``.
Example:
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> retrieval_precision(preds, target, k=2)
tensor(0.5000)
"""
preds, target = _check_retrieval_functional_inputs(preds, target)
if k is None:
k = preds.shape[-1]
if not (isinstance(k, int) and k > 0):
raise ValueError("`k` has to be a positive integer or None")
if not target.sum():
return tensor(0.0, device=preds.device)
relevant = target[torch.argsort(preds, dim=-1, descending=True)][:k].sum().float()
return relevant / k
| [
"torch.argsort",
"torch.tensor"
] | 1.3.1 | BeyondTheProof/metrics | 8af688daff819a95f4cb3d757ffc919c86072ee9 |
1.3 | import torch
from torch import nn as nn
from torch.nn import functional as F
from basicsr.utils.registry import ARCH_REGISTRY
from .spynet_arch import SpyNet
from .basicvsr_arch import ConvResBlock, PSUpsample
from .edvr_arch import PredeblurModule, PCDAlignment, TSAFusion
from .arch_util import ResidualBlockNoBN, flow_warp, make_layer
@ARCH_REGISTRY.register()
class IconVSR(nn.Module):
"""IconVSR network for video super-resolution.
Args:
num_feat (int): Channel number of intermediate features.
Default: 64.
num_block (int): Block number of residual blocks in each propagation branch.
Default: 30.
keyframe_stride (int): Number determining the keyframes. If stride=5,
then the (0, 5, 10, 15, ...)-th frame will be the keyframes.
Default: 5.
temporal_padding (int): Number of frames to be padded at two ends of
the sequence. 2 for REDS and 3 for Vimeo-90K. Default: 2
spynet_path (str): The path of Pre-trained SPyNet model.
Default: None.
"""
def __init__(self,
num_feat=64, num_block=30,
keyframe_stride=5, temporal_padding=2,
spynet_path=None):
super(IconVSR, self).__init__()
self.num_feat = num_feat
self.t_pad = temporal_padding
self.kframe_stride = keyframe_stride
self.edvr = EDVRExtractor(num_frame=temporal_padding*2 + 1,
center_frame_idx=temporal_padding)
# Flow-based Feature Alignment
self.spynet = SpyNet(load_path=spynet_path)
# Coupled Propagation and Information-refill
self.backward_fuse = nn.Conv2d(num_feat * 2, num_feat, kernel_size=3, stride=1, padding=1, bias=True)
self.backward_resblocks = ConvResBlock(num_feat + 3, num_feat, num_block)
self.forward_fuse = nn.Conv2d(num_feat * 2, num_feat, kernel_size=3, stride=1, padding=1, bias=True)
self.forward_resblocks = ConvResBlock(num_feat + 3, num_feat, num_block)
# Pixel-Shuffle Upsampling
self.up1 = PSUpsample(num_feat, num_feat, scale_factor=2)
self.up2 = PSUpsample(num_feat, 64, scale_factor=2)
# The channel of the tail layers is 64
self.conv_hr = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.conv_last = nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1)
# Global Residual Learning
self.img_up = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False)
# Activation Function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def comp_flow(self, lrs):
"""Compute optical flow using SPyNet for feature warping.
Args:
lrs (tensor): LR frames, the shape is (n, t, c, h, w)
Return:
tuple(Tensor): Optical flow.
forward_flow refers to the flow from current frame to the previous frame.
backward_flow is the flow from current frame to the next frame.
"""
n, t, c, h, w = lrs.size()
forward_lrs = lrs[:, 1:, :, :, :].reshape(-1, c, h, w) # 'n t c h w -> (n t) c h w'
backward_lrs = lrs[:, :-1, :, :, :].reshape(-1, c, h, w) # 'n t c h w -> (n t) c h w')
forward_flow = self.spynet(forward_lrs, backward_lrs).view(n, t-1, 2, h, w)
backward_flow = self.spynet(backward_lrs, forward_lrs).view(n, t-1, 2, h, w)
return forward_flow, backward_flow
def extract_refill_features(self, lrs, keyframe_idx):
"""Compute the features for information refill.
We use EDVR-M to extract features from the selected keyframes
and its neighbor. The window size in EDVR-M is 5 for REDS and
7 for Vimeo-90K (following the settings in EDVR).
Args:
lrs (Tensor): The input LR sequence with shape (n, t, c, h, w).
keyframe_idx (list[int]): List of the indices of the selected
keyframes.
Returns:
dict: The features for information-refill. The keys are the
corresponding index.
"""
lrs_start = lrs[:, 1+self.t_pad : 1+self.t_pad*2].flip(1)
lrs_end = lrs[:, -1-self.t_pad*2 : -1-self.t_pad].flip(1)
lrs = torch.cat([lrs_start, lrs, lrs_end], dim=1)
num_frame = 2 * self.t_pad + 1
refill_feat = {}
for i in keyframe_idx:
refill_feat[i] = self.edvr(lrs[:, i:i + num_frame].contiguous())
return refill_feat
def spatial_padding(self, lrs):
""" Apply spatial pdding.
Since the PCD module in EDVR requires a resolution of a multiple of 4,
we use reflect padding on the LR frame to match the requirements..
Args:
lrs (Tensor): Input LR sequence with shape (n, t, c, h, w).
Returns:
Tensor: Padded LR sequence with shape (n, t, c, h_pad, w_pad).
"""
n, t, c, h, w = lrs.size()
pad_h = (4 - h % 4) % 4
pad_w = (4 - w % 4) % 4
# padding
lrs = lrs.view(-1, c, h, w)
lrs = F.pad(lrs, [0, pad_w, 0, pad_h], mode='reflect')
return lrs.view(n, t, c, h + pad_h, w + pad_w)
def forward(self, lrs):
n, t, c, h_in, w_in = lrs.size()
assert h_in >= 64 and w_in >= 64, (
'The height and width of input should be at least 64, '
f'but got {h_in} and {w_in}.')
# Padding
lrs = self.spatial_padding(lrs)
h, w = lrs.size(3), lrs.size(4)
# get the keyframe for information-refill
keyframe_idx = list(range(0, t, self.kframe_stride))
if keyframe_idx[-1] != t-1:
keyframe_idx.append(t-1) # the last frame is a keyframe
# compute flow and refill
forward_flow, backward_flow = self.comp_flow(lrs)
refill_feat = self.extract_refill_features(lrs, keyframe_idx)
# backward propgation
rlt = []
feat_prop = lrs.new_zeros(n, self.num_feat, h, w)
for i in range(t-1, -1, -1):
curr_lr = lrs[:, i, :, :, ]
if i < t-1:
flow = backward_flow[:, i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
if i in keyframe_idx:
feat_prop = torch.cat([feat_prop, refill_feat[i]], dim=1)
feat_prop = self.backward_fuse(feat_prop)
feat_prop = torch.cat([feat_prop, curr_lr], dim=1)
feat_prop = self.backward_resblocks(feat_prop)
rlt.append(feat_prop)
rlt = rlt[::-1]
# forward propgation
feat_prop = torch.zeros_like(feat_prop)
for i in range(0, t):
curr_lr = lrs[:, i, :, :, :]
if i > 0:
flow = forward_flow[:, i-1, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
if i in keyframe_idx:
feat_prop = torch.cat([feat_prop, refill_feat[i]], dim=1)
feat_prop = self.forward_fuse(feat_prop)
feat_prop = torch.cat([curr_lr, rlt[i], feat_prop], dim=1)
feat_prop = self.forward_resblocks(feat_prop)
# Upsampling
sr_rlt = self.lrelu(self.up1(sr_rlt))
sr_rlt = self.lrelu(self.up2(sr_rlt))
sr_rlt = self.lrelu(self.conv_hr(sr_rlt))
sr_rlt = self.conv_last(sr_rlt)
# Global Residual Learning
base = self.img_up(curr_lr)
sr_rlt += base
rlt[i] = sr_rlt
return torch.stack(rlt, dim=1)[:, :, :, :4 * h_in, :4 * w_in]
class EDVRExtractor(nn.Module):
"""EDVR feature extractor for information-refill in IconVSR.
We use EDVR-M in IconVSR.
Paper:
EDVR: Video Restoration with Enhanced Deformable Convolutional Networks.
Args:
num_in_ch (int): Channel number of input image. Default: 3.
num_out_ch (int): Channel number of output image. Default: 3.
num_feat (int): Channel number of intermediate features. Default: 64.
num_frame (int): Number of input frames. Default: 5.
deformable_groups (int): Deformable groups. Defaults: 8.
num_extract_block (int): Number of blocks for feature extraction.
Default: 5.
center_frame_idx (int): The index of center frame. Frame counting from
0. Default: Middle of input frames.
hr_in (bool): Whether the input has high resolution. Default: False.
with_predeblur (bool): Whether has predeblur module.
Default: False.
with_tsa (bool): Whether has TSA module. Default: True.
"""
def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_frame=5,
deformable_groups=8, num_extract_block=5,
center_frame_idx=None, hr_in=None,
with_predeblur=False, with_tsa=True):
super(EDVRExtractor, self).__init__()
if center_frame_idx is None:
self.center_frame_idx = num_frame // 2
else:
self.center_frame_idx = center_frame_idx
self.hr_in = hr_in
self.with_predeblur = with_predeblur
self.with_tsa = with_tsa
# extract features for each frame
if self.with_predeblur:
self.pre_deblur = PredeblurModule(num_feat=num_feat, hr_in=self.hr_in)
self.conv_1x1 = nn.Conv2d(num_feat, num_feat, kernel_size=1, stride=1, padding=0, bias=True)
else:
self.conv_first = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
# extract pyramid features
self.feature_extraction = make_layer(ResidualBlockNoBN, num_extract_block, num_feat=num_feat)
self.conv_l2_1 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=2, padding=1)
self.conv_l2_2 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=1, padding=1)
self.conv_l3_1 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=2, padding=1)
self.conv_l3_2 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=1, padding=1)
# pcd and tsa module
self.pcd_align = PCDAlignment(num_feat=num_feat, deformable_groups=deformable_groups)
if self.with_tsa:
self.fusion = TSAFusion(
num_feat=num_feat,
num_frame=num_frame,
center_frame_idx=self.center_frame_idx)
else:
self.fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, x):
n, t, c, h, w = x.size()
if self.hr_in:
assert h % 16 == 0 and w % 16 == 0, (
'The height and width must be multiple of 16.')
else:
assert h % 4 == 0 and w % 4 == 0, (
'The height and width must be multiple of 4.')
# extract features for each frame
# Level 1
if self.with_predeblur:
feat_l1 = self.conv_1x1(self.pre_deblur(x.view(-1, c, h, w)))
if self.hr_in:
h, w = h // 4, w // 4
else:
feat_l1 = self.lrelu(self.conv_first(x.view(-1, c, h, w)))
feat_l1 = self.feature_extraction(feat_l1)
# Level 2
feat_l2 = self.lrelu(self.conv_l2_1(feat_l1))
feat_l2 = self.lrelu(self.conv_l2_2(feat_l2))
# Level 3
feat_l3 = self.lrelu(self.conv_l3_1(feat_l2))
feat_l3 = self.lrelu(self.conv_l3_2(feat_l3))
feat_l1 = feat_l1.view(n, t, -1, h, w)
feat_l2 = feat_l2.view(n, t, -1, h // 2, w // 2)
feat_l3 = feat_l3.view(n, t, -1, h // 4, w // 4)
# PCD alignment
ref_feat_l = [ # reference feature list
feat_l1[:, self.center_frame_idx, :, :, :].clone(),
feat_l2[:, self.center_frame_idx, :, :, :].clone(),
feat_l3[:, self.center_frame_idx, :, :, :].clone()
]
aligned_feat = []
for i in range(t):
nbr_feat_l = [ # neighboring feature list
feat_l1[:, i, :, :, :].clone(), feat_l2[:, i, :, :, :].clone(),
feat_l3[:, i, :, :, :].clone()
]
aligned_feat.append(self.pcd_align(nbr_feat_l, ref_feat_l))
aligned_feat = torch.stack(aligned_feat, dim=1) # (n, t, c, h, w)
if not self.with_tsa:
aligned_feat = aligned_feat.view(n, -1, h, w)
feat = self.fusion(aligned_feat)
return feat
if __name__ == '__main__':
model = IconVSR()
lrs = torch.randn(3, 4, 3, 64, 64)
rlt = model(lrs)
print(rlt.size())
| [
"torch.cat",
"torch.stack",
"torch.nn.LeakyReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.functional.pad",
"torch.randn"
] | 1.3 | IanYeung/ReCp | 1a7ace0e1ca3c262e24a222f3f0ab0d5674e9410 |
0.4 | from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.nn import functional as F
from torch.optim import lr_scheduler
from tensorboardX import SummaryWriter
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.losses import CrossEntropyLoss, DeepSupervision
from torchreid.utils.iotools import save_checkpoint, check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger, RankLogger
from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers
from torchreid.utils.reidtools import visualize_ranked_results
from torchreid.utils.generaltools import set_random_seed
from torchreid.eval_metrics import evaluate, accuracy
from torchreid.optimizers import init_optimizer
def exp_name(cfg):
name = [
'e_' + cfg.prefix,
'S_' + '-'.join(cfg.source_names),
'T_' + '-'.join(cfg.target_names),
cfg.arch,
'E',
'' if cfg.resume == '' else 'r',
'' if cfg.fixbase_epoch is 0 else 'warmup' + str(cfg.fixbase_epoch),
str(cfg.stepsize),
'm' + str(cfg.max_epoch),
'P',
'b' + str(cfg.train_batch_size),
cfg.optim,
'lr' + str(cfg.lr),
'wd' + str(cfg.weight_decay),
]
return '_'.join(name)
# read config
parser = argument_parser()
args = parser.parse_args()
args.fixbase_epoch = 0
args.arch = 'dpfl'
args.save_dir = exp_name(args)
def main():
global args
set_random_seed(args.seed)
if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print("==========\nArgs:{}\n==========".format(args))
if use_gpu:
print("Currently using GPU {}".format(args.gpu_devices))
cudnn.benchmark = True
else:
print("Currently using CPU, however, GPU is highly recommended")
print("Initializing MultiScale data manager")
assert args.train_batch_size % args.train_loss_batch_size == 0, "'{}' is not divisable by {}".format(args.train_loss_batch_size, args.train_loss_batch_size)
dm = ImageDataManager(use_gpu, scales=[224,160], **image_dataset_kwargs(args))
trainloader, testloader_dict = dm.return_dataloaders()
# sys.exit(0)
print("Initializing model: {}".format(args.arch))
model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, input_size=args.width, loss={'xent'}, use_gpu=use_gpu)
print("Model size: {:.3f} M".format(count_num_param(model)))
# print(model)
criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args))
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)
# # scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=3, verbose=True, threshold=1e-04)
if args.load_weights and check_isfile(args.load_weights): # load pretrained weights but ignore layers that don't match in size
checkpoint = torch.load(args.load_weights)
pretrain_dict = checkpoint['state_dict']
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.resume and check_isfile(args.resume):
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
args.start_epoch = checkpoint['epoch'] + 1
print("Loaded checkpoint from '{}'".format(args.resume))
print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch, checkpoint['rank1']))
if use_gpu:
model = nn.DataParallel(model).cuda()
if args.evaluate:
print("Evaluate only")
for name in args.target_names:
print("Evaluating {} ...".format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
test_set = dm.return_testdataset_by_name(name)
rank1, mAP = test(model, test_set, name, queryloader, galleryloader, use_gpu, visualize=args.visualize_ranks)
return
start_time = time.time()
ranklogger = RankLogger(args.source_names, args.target_names)
maplogger = RankLogger(args.source_names, args.target_names)
train_time = 0
# Tensorboard
writer = SummaryWriter(log_dir=osp.join('runs', args.save_dir))
print("=> Start training")
if args.fixbase_epoch > 0:
print("Train {} for {} epochs while keeping other layers frozen".format(args.open_layers, args.fixbase_epoch))
initial_optim_state = optimizer.state_dict()
for epoch in range(args.fixbase_epoch):
start_train_time = time.time()
loss, prec1 = train(epoch, model, criterion, optimizer, trainloader, writer, use_gpu, fixbase=True)
writer.add_scalar('train/loss', loss, epoch+1)
writer.add_scalar('train/prec1', prec1, epoch+1)
print('Epoch: [{:02d}] [Average Loss:] {:.4f}\t [Average Prec.:] {:.2%}'.format(epoch+1, loss, prec1))
train_time += round(time.time() - start_train_time)
print("Done. All layers are open to train for {} epochs".format(args.max_epoch))
optimizer.load_state_dict(initial_optim_state)
args.start_epoch += args.fixbase_epoch
args.max_epoch += args.fixbase_epoch
for epoch in range(args.start_epoch, args.max_epoch):
start_train_time = time.time()
loss, prec1 = train(epoch, model, criterion, optimizer, trainloader, writer, use_gpu)
writer.add_scalar('train/loss', loss, epoch+1)
writer.add_scalar('train/prec1', prec1, epoch+1)
print('Epoch: [{:02d}] [Average Loss:] {:.4f}\t [Average Prec.:] {:.2%}'.format(epoch+1, loss, prec1))
train_time += round(time.time() - start_train_time)
scheduler.step()
if (epoch + 1) > args.start_eval and args.eval_freq > 0 and (epoch + 1) % args.eval_freq == 0 or (epoch + 1) == args.max_epoch:
print("=> Test")
for name in args.target_names:
print("Evaluating {} ...".format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
test_set = dm.return_testdataset_by_name(name)
if epoch+1 == args.max_epoch:
rank1, mAP = test(model, test_set, name, queryloader, galleryloader, use_gpu, visualize=True)
else:
rank1, mAP = test(model, test_set, name, queryloader, galleryloader, use_gpu)
writer.add_scalar(name + '_test/top1', rank1, epoch+1)
writer.add_scalar(name + '_test/mAP', mAP, epoch+1)
ranklogger.write(name, epoch + 1, rank1)
maplogger.write(name, epoch + 1, mAP)
if use_gpu:
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
save_checkpoint({
'state_dict': state_dict,
'rank1': rank1,
'epoch': epoch,
}, False, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))
# save last checkpoint
save_checkpoint({
'state_dict': state_dict,
'rank1': rank1,
'epoch': epoch,
}, False, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
train_time = str(datetime.timedelta(seconds=train_time))
print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
ranklogger.show_summary()
maplogger.show_summary()
def train(epoch, model, criterion, optimizer, trainloader, writer, use_gpu, fixbase=False):
losses = AverageMeter()
precisions = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
epoch_iterations = len(trainloader)
model.train()
if fixbase or args.always_fixbase:
open_specified_layers(model, args.open_layers)
else:
open_all_layers(model)
end = time.time()
for batch_idx, ((img1, img2), pids, _, _) in enumerate(trainloader):
data_time.update(time.time() - end)
if use_gpu:
img1, img2, pids = img1.cuda(), img2.cuda(), pids.cuda()
y_large, y_small, y_joint = model(img1, img2)
loss_batch = args.train_loss_batch_size
how_many_mini = args.train_batch_size // loss_batch
for mini_idx in range(how_many_mini):
start_index = mini_idx * loss_batch
end_index = start_index + loss_batch
mini_y_large = y_large[start_index:end_index, :]
mini_y_small = y_small[start_index:end_index, :]
mini_y_joint = y_joint[start_index:end_index, :]
mini_pids = pids[start_index:end_index]
loss_large = criterion(mini_y_large, mini_pids)
loss_small = criterion(mini_y_small, mini_pids)
loss_joint = criterion(mini_y_joint, mini_pids)
joint_prob = F.softmax(mini_y_joint, dim=1)
loss_joint_large = criterion(mini_y_large, joint_prob, one_hot=True)
loss_joint_small = criterion(mini_y_small, joint_prob, one_hot=True)
total_loss_large = loss_large + loss_joint_large #+
total_loss_small = loss_small + loss_joint_small #+
total_loss_joint = loss_joint #+
prec, = accuracy(mini_y_joint.data, mini_pids.data)
prec1 = prec[0] # get top 1
optimizer.zero_grad()
# total_loss_large.backward(retain_graph=True)
# total_loss_small.backward(retain_graph=True)
# total_loss_joint.backward()
# sum losses
loss = total_loss_joint + total_loss_small + total_loss_large
loss.backward(retain_graph=True)
optimizer.step()
loss_iter = epoch*epoch_iterations+batch_idx*how_many_mini+mini_idx
writer.add_scalar('iter/loss_small', loss_small, loss_iter)
writer.add_scalar('iter/loss_large', loss_large, loss_iter)
writer.add_scalar('iter/loss_joint', loss_joint, loss_iter)
writer.add_scalar('iter/loss_joint_small', loss_joint_small, loss_iter)
writer.add_scalar('iter/loss_joint_large', loss_joint_large, loss_iter)
writer.add_scalar('iter/total_loss_small', total_loss_small, loss_iter)
writer.add_scalar('iter/total_loss_large', total_loss_large, loss_iter)
writer.add_scalar('iter/total_loss_joint', total_loss_joint, loss_iter)
writer.add_scalar('iter/loss', loss, loss_iter)
losses.update(loss.item(), pids.size(0))
precisions.update(prec1, pids.size(0))
if (batch_idx*how_many_mini+mini_idx + 1) % args.print_freq == 0:
print('Epoch: [{0:02d}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec {prec.val:.2%} ({prec.avg:.2%})\t'.format(
epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time,
data_time=data_time, loss=losses, prec=precisions))
batch_time.update(time.time() - end)
end = time.time()
return losses.avg, precisions.avg
def test(model, test_set, name, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], visualize=False):
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
qf, q_pids, q_camids = [], [], []
for batch_idx, ((img1, img2), pids, camids, _) in enumerate(queryloader):
if use_gpu: img1, img2 = img1.cuda(), img2.cuda()
end = time.time()
features = model(img1, img2)
batch_time.update(time.time() - end)
features = features.data.cpu()
qf.append(features)
q_pids.extend(pids)
q_camids.extend(camids)
qf = torch.cat(qf, 0)
q_pids = np.asarray(q_pids)
q_camids = np.asarray(q_camids)
print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))
gf, g_pids, g_camids = [], [], []
end = time.time()
for batch_idx, ((img1, img2), pids, camids, _) in enumerate(galleryloader):
if use_gpu: img1, img2 = img1.cuda(), img2.cuda()
end = time.time()
features = model(img1, img2)
batch_time.update(time.time() - end)
features = features.data.cpu()
gf.append(features)
g_pids.extend(pids)
g_camids.extend(camids)
gf = torch.cat(gf, 0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))
print("=> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch_size))
m, n = qf.size(0), gf.size(0)
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, qf, gf.t())
distmat = distmat.numpy()
print("Computing CMC and mAP")
cmc, mAP, all_AP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)
if visualize:
visualize_ranked_results(
distmat, all_AP, test_set, name,
save_path=args.save_dir,
topk=100
)
print("Results ----------")
print("mAP: {:.1%}".format(mAP))
print("CMC curve")
for r in ranks:
print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1]))
print("------------------")
return cmc[0], mAP
if __name__ == '__main__':
main()
| [
"torch.cat",
"torch.no_grad",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.cuda.is_available",
"torch.load",
"torch.nn.functional.softmax",
"torch.nn.DataParallel",
"torch.pow"
] | 0.4.1 | aytackanaci/deep-vehicle-reid | 9f951288a38f8b295b5c77cc6c9b26f0632ecea3 |
1.0 | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch.nn as nn
from torch.nn import init
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def print_network(self):
if isinstance(self, list):
self = self[0]
num_params = 0
for param in self.parameters():
num_params += param.numel()
print('Network [%s] was created. Total number of parameters: %.1f million. '
'To see the architecture, do print(network).'
% (type(self).__name__, num_params / 1000000))
def init_weights(self, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if classname.find('BatchNorm2d') != -1:
if hasattr(m, 'weight') and m.weight is not None:
init.normal_(m.weight.data, 1.0, gain)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'xavier_uniform':
init.xavier_uniform_(m.weight.data, gain=1.0)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
elif init_type == 'none': # uses pytorch's default init method
m.reset_parameters()
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
self.apply(init_func)
# propagate to children
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights(init_type, gain)
| [
"torch.nn.init.constant_",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.normal_",
"torch.nn.init.orthogonal_",
"torch.nn.init.xavier_normal_"
] | 1.0.0 | jiye-ML/CoCosNet | c4b3f44393462c8353c6c6952d7b05496298df1c |
1.7 | import argparse
import math
import sys
import time
import os
import socket
import statistics
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from model import MLMTask, MLMTask2, MLMTaskEmbedding, MLMTaskEncoder, MLMTaskHead
from cuda_local_pipeline import LocalSequential, sync_all_device
IS_SLURM = os.getenv('SLURM_LOCALID')
USE_TQDM = os.getenv('USE_TQDM', True if not IS_SLURM else False)
def collate_batch(batch_data, args, mask_id, cls_id):
batch_data = torch.tensor(batch_data).long().view(args.batch_size, -1).t().contiguous()
# Generate masks with args.mask_frac
data_len = batch_data.size(0)
ones_num = int(data_len * args.mask_frac)
zeros_num = data_len - ones_num
lm_mask = torch.cat([torch.zeros(zeros_num), torch.ones(ones_num)])
lm_mask = lm_mask[torch.randperm(data_len)]
batch_data = torch.cat((torch.tensor([[cls_id] * batch_data.size(1)]).long(), batch_data))
lm_mask = torch.cat((torch.tensor([0.0]), lm_mask))
targets = torch.stack([batch_data[i] for i in range(lm_mask.size(0)) if lm_mask[i]]).view(-1)
batch_data = batch_data.masked_fill(lm_mask.bool().unsqueeze(1), mask_id)
return batch_data, lm_mask, targets
def process_raw_data(raw_data, args):
_num = raw_data.size(0) // (args.batch_size * args.bptt)
raw_data = raw_data[:(_num * args.batch_size * args.bptt)]
return raw_data
def train(model, vocab, train_loss_log, train_data,
optimizer, criterion, ntokens, epoch, args):
model.train()
total_loss = 0.
start_time = time.time()
mask_id = vocab.stoi['<MASK>']
cls_id = vocab.stoi['<cls>']
train_loss_log.append(0.0)
dataloader = DataLoader(train_data, batch_size=args.batch_size * args.bptt,
shuffle=False, collate_fn=lambda b: collate_batch(b, args, mask_id, cls_id))
forward_pyth_elapsed = []
forward_cuda_elapsed = []
forward_comm_elapsed = []
forward_comp_elapsed = []
backward_pyth_elapsed = []
backward_cuda_elapsed = []
for batch, (data, lm_mask, targets) in enumerate(dataloader):
optimizer.zero_grad()
data = data.to(0)
targets = targets.to(args.gpus - 1)
data = data.transpose(0, 1)
fwd_tik = torch.cuda.Event(enable_timing=True)
fwd_tok = torch.cuda.Event(enable_timing=True)
sync_all_device(args.gpus)
forward_start_time = time.time()
fwd_tik.record()
output = model(data)
output = torch.stack([output[i] for i in range(lm_mask.size(0)) if lm_mask[i]])
loss = criterion(output.view(-1, ntokens), targets)
total_loss += loss.item()
fwd_tok.record()
fwd_tok.synchronize()
fwd_delay = fwd_tik.elapsed_time(fwd_tok)
forward_cuda_elapsed.append(fwd_delay)
forward_comp_elapsed.append(model.get_fwd_compute_delay())
forward_comm_elapsed.append(model.get_fwd_communication_delay()) # forward_comm_elapsed.append(fwd_delay - model.get_fwd_compute_delay())
sync_all_device(args.gpus)
forward_pyth_elapsed.append((time.time() - forward_start_time) * 1000)
bwd_tik = torch.cuda.Event(enable_timing=True)
bwd_tok = torch.cuda.Event(enable_timing=True)
backward_start_time = time.time()
bwd_tik.record()
loss.backward()
bwd_tok.record()
bwd_tok.synchronize()
bwd_delay = bwd_tik.elapsed_time(bwd_tok)
backward_cuda_elapsed.append(bwd_delay)
sync_all_device(args.gpus)
backward_pyth_elapsed.append((time.time() - backward_start_time) * 1000)
optimizer.step()
if (batch + 1) % args.log_interval == 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
train_loss_log[-1] = cur_loss
num_of_batches = len(train_data) // (args.bptt * args.batch_size)
last = 10 # len(forward_comm_elapsed) // 2
f_comm_last = forward_comm_elapsed[-last:]
f_comm_last_mean = statistics.mean(f_comm_last)
f_comm_last_std = statistics.stdev(f_comm_last) if len(f_comm_last) > 1 else 0.0
f_comp_last = forward_comp_elapsed[-last:]
f_comp_last_mean = statistics.mean(f_comp_last)
f_comp_last_std = statistics.stdev(f_comp_last) if len(f_comp_last) > 1 else 0.0
f_last = forward_cuda_elapsed[-last:]
f_last_mean = statistics.mean(f_last)
f_last_std = statistics.stdev(f_last) if len(f_last) > 1 else 0.0
b_last = backward_cuda_elapsed[-last:]
b_last_mean = statistics.mean(b_last)
b_last_std = statistics.stdev(b_last) if len(b_last) > 1 else 0.0
print(
f"EPOCH:{epoch:2}|"
f"BATCH:{(batch + 1):3}/{num_of_batches:3}|"
f"LOSS:{cur_loss:5.2f}|"
"\t"
f"TIME:{(elapsed * 1000 / args.log_interval):10.2f} = {forward_pyth_elapsed[-1]:10.2f} + {backward_pyth_elapsed[-1]:10.2f}|"
"\t"
f"FORWARD:{forward_cuda_elapsed[-1]:10.2f}({f_last_mean:10.2f} ±{f_last_std:8.2f})=({f_comp_last_mean:10.2f} ±{f_comp_last_std:8.2f})+({f_comm_last_mean:10.2f} ±{f_comm_last_std:8.2f}) |"
"\t"
f"BACKWARD:{backward_cuda_elapsed[-1]:10.2f}({b_last_mean:10.2f} ±{b_last_std:8.2f})|"
)
total_loss = 0
start_time = time.time()
def run_main(args):
torch.manual_seed(args.seed)
import torchtext
if args.dataset == 'WikiText103':
from torchtext.experimental.datasets import WikiText103 as WLMDataset
elif args.dataset == 'WikiText2':
from torchtext.experimental.datasets import WikiText2 as WLMDataset
elif args.dataset == 'WMTNewsCrawl':
from torchtext.experimental.datasets import WMTNewsCrawl as WLMDataset
elif args.dataset == 'EnWik9':
from torchtext.datasets import EnWik9
elif args.dataset == 'BookCorpus':
from data import BookCorpus
else:
print("dataset for MLM task is not supported")
try:
vocab = torch.load(args.save_vocab)
except:
print(f"WLMDataset = {WLMDataset}")
train_dataset, valid_dataset, test_dataset = WLMDataset()
old_vocab = train_dataset.vocab
print(f"len(old_vocab) = {len(old_vocab)}")
vocab = torchtext.vocab.Vocab(counter=old_vocab.freqs,
specials=['<unk>', '<pad>', '<MASK>'])
with open(args.save_vocab, 'wb') as f:
torch.save(vocab, f)
if args.dataset == 'WikiText103' or args.dataset == 'WikiText2':
train_dataset, valid_dataset, test_dataset = WLMDataset(vocab=vocab)
train_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))
valid_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))
test_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))
elif args.dataset == 'WMTNewsCrawl':
from torchtext.experimental.datasets import WikiText2
test_dataset, valid_dataset = WikiText2(vocab=vocab, split=('test', 'valid'))
valid_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))
test_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))
train_dataset = WLMDataset(vocab=vocab, split='train')
train_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))
elif args.dataset == 'EnWik9':
enwik9 = EnWik9()
idx1, idx2 = int(len(enwik9) * 0.8), int(len(enwik9) * 0.9)
train_data = torch.tensor([vocab.stoi[_id]
for _id in enwik9[0:idx1]]).long()
val_data = torch.tensor([vocab.stoi[_id]
for _id in enwik9[idx1:idx2]]).long()
test_data = torch.tensor([vocab.stoi[_id]
for _id in enwik9[idx2:]]).long()
from torchtext.experimental.datasets import LanguageModelingDataset
train_dataset = LanguageModelingDataset(train_data, vocab, lambda x: x)
valid_dataset = LanguageModelingDataset(val_data, vocab, lambda x: x)
test_dataset = LanguageModelingDataset(test_data, vocab, lambda x: x)
elif args.dataset == 'BookCorpus':
train_dataset, valid_dataset, test_dataset = BookCorpus(vocab)
train_data = process_raw_data(train_dataset.data, args)
val_data = process_raw_data(valid_dataset.data, args)
test_data = process_raw_data(test_dataset.data, args)
ntokens = len(train_dataset.get_vocab())
print(f"Vocabulary size = {ntokens}")
if args.gpus == 1:
model = LocalSequential(
nn.Sequential(
MLMTask(ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout).to(0)
)
)
elif args.gpus == 2:
assert(args.nlayers % 2 == 0)
model = LocalSequential(
nn.Sequential(
MLMTaskEmbedding(ntokens, args.emsize).to(0),
MLMTaskEncoder(args.emsize, args.nhead, args.nhid, args.nlayers // 2, args.dropout).to(0),
),
nn.Sequential(
MLMTaskEncoder(args.emsize, args.nhead, args.nhid, args.nlayers // 2, args.dropout).to(1),
MLMTaskHead(ntokens, args.emsize).to(1),
),
)
else:
assert(args.nlayers % (args.gpus - 2) == 0)
model = LocalSequential(
MLMTaskEmbedding(ntokens, args.emsize).to(0),
*(MLMTaskEncoder(args.emsize, args.nhead, args.nhid, args.nlayers // (args.gpus - 2), args.dropout).to(i) for i in range(1, args.gpus - 1)),
MLMTaskHead(ntokens, args.emsize).to(args.gpus - 1),
)
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'Total parameters = {params // 10**6}M')
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
best_val_loss = None
train_loss_log, val_loss_log = [], []
for epoch in range(1, args.epochs + 1):
epoch_start_time = time.time()
train(model, train_dataset.vocab, train_loss_log, train_data,
optimizer, criterion, ntokens, epoch, args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Pipeline experiments')
parser.add_argument('--emsize', type=int, default=768,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=3072,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=12,
help='number of layers')
parser.add_argument('--nhead', type=int, default=12,
help='the number of heads in the encoder/decoder of the transformer model')
parser.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.1,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=128,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--seed', type=int, default=5431916812,
help='random seed')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='report interval')
parser.add_argument('--save-vocab', type=str, default='torchtext_bert_vocab.pt',
help='path to save the vocab')
parser.add_argument('--mask_frac', type=float, default=0.15,
help='the fraction of masked tokens')
parser.add_argument('--dataset', type=str, default='WikiText2',
help='dataset used for MLM task')
parser.add_argument('--gpus', type=int, default=8,
help='number of GPUs per worker node to use')
args = parser.parse_args()
run_main(args)
| [
"torch.zeros",
"torch.cuda.Event",
"torch.save",
"torch.randperm",
"torch.ones",
"torch.manual_seed",
"torch.tensor",
"torch.load",
"torch.nn.CrossEntropyLoss"
] | 1.7.1 | imaginary-person/pipeline_experiments | 32d20f1b9a4192e75ed6ba709c9acd2e0cf23e06 |
1.7 | import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
import torch.multiprocessing as mp
import torch.distributed.rpc as rpc
import os
import concurrent.futures
from torch.distributed.optim import DistributedOptimizer
from torch.distributed.rpc import RRef
import torch.distributed.autograd as dist_autograd
from tqdm import tqdm
from rpc_framework import MyRPCPipeline, MyRPCPipelineWrapper
def LayerOnDevice(device):
return nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
def run_main():
rref = rpc.remote("worker1", LayerOnDevice, args=("cuda:0",))
for _ in range(100):
x = torch.randn(100, 1, 28, 28).to("cuda:1")
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
assert((expected == actual).all())
def run_worker(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '29500'
options = rpc.TensorPipeRpcBackendOptions(num_worker_threads=256)
if rank == 0:
options.set_device_map("worker1", {1:0})
rpc.init_rpc(
"master",
rank=rank,
world_size=world_size,
rpc_backend_options=options
)
run_main()
else:
if rank == 1:
options.set_device_map("master", {0:1})
rpc.init_rpc(
f"worker{rank}",
rank=rank,
world_size=world_size,
rpc_backend_options=options
)
rpc.shutdown()
if __name__=="__main__":
gpus = 1
world_size = gpus + 1
mp.spawn(run_worker, args=(world_size,), nprocs=world_size, join=True)
| [
"torch.nn.Linear",
"torch.distributed.rpc.remote",
"torch.nn.MaxPool2d",
"torch.distributed.rpc.TensorPipeRpcBackendOptions",
"torch.multiprocessing.spawn",
"torch.nn.ReLU",
"torch.distributed.rpc.init_rpc",
"torch.nn.Conv2d",
"torch.distributed.rpc.shutdown",
"torch.randn",
"torch.nn.Flatten"
] | 1.7.1 | imaginary-person/pipeline_experiments | 32d20f1b9a4192e75ed6ba709c9acd2e0cf23e06 |
1.1 | import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime, reverse=True)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| [
"torch.no_grad"
] | 1.1 | fjczx/OpenPCDet-lazurite | e3f17ab17b2c295e1786e34c6feb86adffe84b49 |
1.0 | import torch
import time
import sys
import os
import math
import random
from termcolor import colored
from .distributions import Empirical
from . import util, state, TraceMode, PriorInflation, InferenceEngine, InferenceNetwork, ImportanceWeighting, Optimizer, LearningRateScheduler, AddressDictionary
from .nn import InferenceNetwork as InferenceNetworkBase
from .nn import OnlineDataset, OfflineDataset, InferenceNetworkFeedForward, InferenceNetworkLSTM
from .remote import ModelServer
class Model():
def __init__(self, name='Unnamed pyprob model', address_dict_file_name=None):
super().__init__()
self.name = name
self._inference_network = None
if address_dict_file_name is None:
self._address_dictionary = None
else:
self._address_dictionary = AddressDictionary(address_dict_file_name)
def forward(self):
raise NotImplementedError()
def _trace_generator(self, trace_mode=TraceMode.PRIOR, prior_inflation=PriorInflation.DISABLED, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, inference_network=None, observe=None, metropolis_hastings_trace=None, likelihood_importance=1., importance_weighting=ImportanceWeighting.IW0, *args, **kwargs):
state._init_traces(func=self.forward, trace_mode=trace_mode, prior_inflation=prior_inflation, inference_engine=inference_engine, inference_network=inference_network, observe=observe, metropolis_hastings_trace=metropolis_hastings_trace, address_dictionary=self._address_dictionary, likelihood_importance=likelihood_importance, importance_weighting=importance_weighting)
while True:
state._begin_trace()
result = self.forward(*args, **kwargs)
trace = state._end_trace(result)
yield trace
def _traces(self, num_traces=10, trace_mode=TraceMode.PRIOR, prior_inflation=PriorInflation.DISABLED, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, inference_network=None, map_func=None, silent=False, observe=None, file_name=None, likelihood_importance=1., *args, **kwargs):
generator = self._trace_generator(trace_mode=trace_mode, prior_inflation=prior_inflation, inference_engine=inference_engine, inference_network=inference_network, observe=observe, likelihood_importance=likelihood_importance, *args, **kwargs)
traces = Empirical(file_name=file_name)
if map_func is None:
map_func = lambda trace: trace
time_start = time.time()
if (util._verbosity > 1) and not silent:
len_str_num_traces = len(str(num_traces))
print('Time spent | Time remain.| Progress | {} | Traces/sec'.format('Trace'.ljust(len_str_num_traces * 2 + 1)))
prev_duration = 0
for i in range(num_traces):
if (util._verbosity > 1) and not silent:
duration = time.time() - time_start
if (duration - prev_duration > util._print_refresh_rate) or (i == num_traces - 1):
prev_duration = duration
traces_per_second = (i + 1) / duration
print('{} | {} | {} | {}/{} | {:,.2f} '.format(util.days_hours_mins_secs_str(duration), util.days_hours_mins_secs_str((num_traces - i) / traces_per_second), util.progress_bar(i+1, num_traces), str(i+1).rjust(len_str_num_traces), num_traces, traces_per_second), end='\r')
sys.stdout.flush()
trace = next(generator)
if trace_mode == TraceMode.PRIOR:
log_weight = 1.
else:
log_weight = trace.log_importance_weight
traces.add(map_func(trace), log_weight)
if (util._verbosity > 1) and not silent:
print()
traces.finalize()
return traces
def get_trace(self, *args, **kwargs):
return next(self._trace_generator(*args, **kwargs))
def prior(self, num_traces=10, prior_inflation=PriorInflation.DISABLED, map_func=None, file_name=None, likelihood_importance=1., *args, **kwargs):
prior = self._traces(num_traces=num_traces, trace_mode=TraceMode.PRIOR, prior_inflation=prior_inflation, map_func=map_func, file_name=file_name, likelihood_importance=likelihood_importance, *args, **kwargs)
prior.rename('Prior, traces: {:,}'.format(prior.length))
prior.add_metadata(op='prior', num_traces=num_traces, prior_inflation=str(prior_inflation), likelihood_importance=likelihood_importance)
return prior
def prior_results(self, num_traces=10, prior_inflation=PriorInflation.DISABLED, map_func=lambda trace: trace.result, file_name=None, likelihood_importance=1., *args, **kwargs):
return self.prior(num_traces=num_traces, prior_inflation=prior_inflation, map_func=map_func, file_name=file_name, likelihood_importance=likelihood_importance, *args, **kwargs)
def posterior(self, num_traces=10, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, initial_trace=None, map_func=None, observe=None, file_name=None, thinning_steps=None, likelihood_importance=1., *args, **kwargs):
if inference_engine == InferenceEngine.IMPORTANCE_SAMPLING:
posterior = self._traces(num_traces=num_traces, trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, inference_network=None, map_func=map_func, observe=observe, file_name=file_name, likelihood_importance=likelihood_importance, *args, **kwargs)
posterior.rename('Posterior, IS, traces: {:,}, ESS: {:,.2f}'.format(posterior.length, posterior.effective_sample_size))
posterior.add_metadata(op='posterior', num_traces=num_traces, inference_engine=str(inference_engine), effective_sample_size=posterior.effective_sample_size, likelihood_importance=likelihood_importance)
elif inference_engine == InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK:
if self._inference_network is None:
raise RuntimeError('Cannot run inference engine IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK because no inference network for this model is available. Use learn_inference_network or load_inference_network first.')
with torch.no_grad():
posterior = self._traces(num_traces=num_traces, trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, inference_network=self._inference_network, map_func=map_func, observe=observe, file_name=file_name, likelihood_importance=likelihood_importance, *args, **kwargs)
posterior.rename('Posterior, IC, traces: {:,}, train. traces: {:,}, ESS: {:,.2f}'.format(posterior.length, self._inference_network._total_train_traces, posterior.effective_sample_size))
posterior.add_metadata(op='posterior', num_traces=num_traces, inference_engine=str(inference_engine), effective_sample_size=posterior.effective_sample_size, likelihood_importance=likelihood_importance, train_traces=self._inference_network._total_train_traces)
else: # inference_engine == InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS or inference_engine == InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS
posterior = Empirical(file_name=file_name)
if map_func is None:
map_func = lambda trace: trace
if initial_trace is None:
current_trace = next(self._trace_generator(trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, observe=observe, *args, **kwargs))
else:
current_trace = initial_trace
time_start = time.time()
traces_accepted = 0
samples_reused = 0
samples_all = 0
if thinning_steps is None:
thinning_steps = 1
if util._verbosity > 1:
len_str_num_traces = len(str(num_traces))
print('Time spent | Time remain.| Progress | {} | Accepted|Smp reuse| Traces/sec'.format('Trace'.ljust(len_str_num_traces * 2 + 1)))
prev_duration = 0
for i in range(num_traces):
if util._verbosity > 1:
duration = time.time() - time_start
if (duration - prev_duration > util._print_refresh_rate) or (i == num_traces - 1):
prev_duration = duration
traces_per_second = (i + 1) / duration
print('{} | {} | {} | {}/{} | {} | {} | {:,.2f} '.format(util.days_hours_mins_secs_str(duration), util.days_hours_mins_secs_str((num_traces - i) / traces_per_second), util.progress_bar(i+1, num_traces), str(i+1).rjust(len_str_num_traces), num_traces, '{:,.2f}%'.format(100 * (traces_accepted / (i + 1))).rjust(7), '{:,.2f}%'.format(100 * samples_reused / max(1, samples_all)).rjust(7), traces_per_second), end='\r')
sys.stdout.flush()
candidate_trace = next(self._trace_generator(trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, metropolis_hastings_trace=current_trace, observe=observe, *args, **kwargs))
log_acceptance_ratio = math.log(current_trace.length_controlled) - math.log(candidate_trace.length_controlled) + candidate_trace.log_prob_observed - current_trace.log_prob_observed
for variable in candidate_trace.variables_controlled:
if variable.reused:
log_acceptance_ratio += torch.sum(variable.log_prob)
log_acceptance_ratio -= torch.sum(current_trace.variables_dict_address[variable.address].log_prob)
samples_reused += 1
samples_all += candidate_trace.length_controlled
if state._metropolis_hastings_site_transition_log_prob is None:
print(colored('Warning: trace did not hit the Metropolis Hastings site, ensure that the model is deterministic except pyprob.sample calls', 'red', attrs=['bold']))
else:
log_acceptance_ratio += torch.sum(state._metropolis_hastings_site_transition_log_prob)
# print(log_acceptance_ratio)
if math.log(random.random()) < float(log_acceptance_ratio):
traces_accepted += 1
current_trace = candidate_trace
# do thinning
if i % thinning_steps == 0:
posterior.add(map_func(current_trace))
if util._verbosity > 1:
print()
posterior.finalize()
posterior.rename('Posterior, {}, traces: {:,}{}, accepted: {:,.2f}%, sample reuse: {:,.2f}%'.format('LMH' if inference_engine == InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS else 'RMH', posterior.length, '' if thinning_steps == 1 else ' (thinning steps: {:,})'.format(thinning_steps), 100 * (traces_accepted / num_traces), 100 * samples_reused / samples_all))
posterior.add_metadata(op='posterior', num_traces=num_traces, inference_engine=str(inference_engine), likelihood_importance=likelihood_importance, thinning_steps=thinning_steps, num_traces_accepted=traces_accepted, num_samples_reuised=samples_reused, num_samples=samples_all)
return posterior
def posterior_results(self, num_traces=10, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, initial_trace=None, map_func=lambda trace: trace.result, observe=None, file_name=None, thinning_steps=None, *args, **kwargs):
return self.posterior(num_traces=num_traces, inference_engine=inference_engine, initial_trace=initial_trace, map_func=map_func, observe=observe, file_name=file_name, thinning_steps=thinning_steps, *args, **kwargs)
def reset_inference_network(self):
self._inference_network = None
def learn_inference_network(self, num_traces, num_traces_end=1e9, inference_network=InferenceNetwork.FEEDFORWARD, prior_inflation=PriorInflation.DISABLED, dataset_dir=None, dataset_valid_dir=None, observe_embeddings={}, batch_size=64, valid_size=None, valid_every=None, optimizer_type=Optimizer.ADAM, learning_rate_init=0.001, learning_rate_end=1e-6, learning_rate_scheduler_type=LearningRateScheduler.NONE, momentum=0.9, weight_decay=0., save_file_name_prefix=None, save_every_sec=600, pre_generate_layers=True, distributed_backend=None, distributed_params_sync_every_iter=10000, distributed_num_buckets=None, dataloader_offline_num_workers=0, stop_with_bad_loss=True, log_file_name=None, lstm_dim=512, lstm_depth=1, proposal_mixture_components=10):
if dataset_dir is None:
dataset = OnlineDataset(model=self, prior_inflation=prior_inflation)
else:
dataset = OfflineDataset(dataset_dir=dataset_dir)
if dataset_valid_dir is None:
dataset_valid = None
else:
dataset_valid = OfflineDataset(dataset_dir=dataset_valid_dir)
if self._inference_network is None:
print('Creating new inference network...')
if inference_network == InferenceNetwork.FEEDFORWARD:
self._inference_network = InferenceNetworkFeedForward(model=self, observe_embeddings=observe_embeddings, proposal_mixture_components=proposal_mixture_components)
elif inference_network == InferenceNetwork.LSTM:
self._inference_network = InferenceNetworkLSTM(model=self, observe_embeddings=observe_embeddings, lstm_dim=lstm_dim, lstm_depth=lstm_depth, proposal_mixture_components=proposal_mixture_components)
else:
raise ValueError('Unknown inference_network: {}'.format(inference_network))
if pre_generate_layers:
if dataset_valid_dir is not None:
self._inference_network._pre_generate_layers(dataset_valid, save_file_name_prefix=save_file_name_prefix)
if dataset_dir is not None:
self._inference_network._pre_generate_layers(dataset, save_file_name_prefix=save_file_name_prefix)
else:
print('Continuing to train existing inference network...')
print('Total number of parameters: {:,}'.format(self._inference_network._history_num_params[-1]))
self._inference_network.to(device=util._device)
self._inference_network.optimize(num_traces=num_traces, dataset=dataset, dataset_valid=dataset_valid, num_traces_end=num_traces_end, batch_size=batch_size, valid_every=valid_every, optimizer_type=optimizer_type, learning_rate_init=learning_rate_init, learning_rate_end=learning_rate_end, learning_rate_scheduler_type=learning_rate_scheduler_type, momentum=momentum, weight_decay=weight_decay, save_file_name_prefix=save_file_name_prefix, save_every_sec=save_every_sec, distributed_backend=distributed_backend, distributed_params_sync_every_iter=distributed_params_sync_every_iter, distributed_num_buckets=distributed_num_buckets, dataloader_offline_num_workers=dataloader_offline_num_workers, stop_with_bad_loss=stop_with_bad_loss, log_file_name=log_file_name)
def save_inference_network(self, file_name):
if self._inference_network is None:
raise RuntimeError('The model has no trained inference network.')
self._inference_network._save(file_name)
def load_inference_network(self, file_name):
self._inference_network = InferenceNetworkBase._load(file_name)
# The following is due to a temporary hack related with https://github.com/pytorch/pytorch/issues/9981 and can be deprecated by using dill as pickler with torch > 0.4.1
self._inference_network._model = self
def save_dataset(self, dataset_dir, num_traces, num_traces_per_file, prior_inflation=PriorInflation.DISABLED, *args, **kwargs):
if not os.path.exists(dataset_dir):
print('Directory does not exist, creating: {}'.format(dataset_dir))
os.makedirs(dataset_dir)
dataset = OnlineDataset(self, None, prior_inflation=prior_inflation)
dataset.save_dataset(dataset_dir=dataset_dir, num_traces=num_traces, num_traces_per_file=num_traces_per_file, *args, **kwargs)
class RemoteModel(Model):
def __init__(self, server_address='tcp://127.0.0.1:5555', before_forward_func=None, after_forward_func=None, *args, **kwargs):
self._server_address = server_address
self._model_server = None
self._before_forward_func = before_forward_func # Optional mthod to run before each forward call of the remote model (simulator)
self._after_forward_func = after_forward_func # Optional method to run after each forward call of the remote model (simulator)
super().__init__(*args, **kwargs)
def close(self):
if self._model_server is not None:
self._model_server.close()
super().close()
def forward(self):
if self._model_server is None:
self._model_server = ModelServer(self._server_address)
self.name = '{} running on {}'.format(self._model_server.model_name, self._model_server.system_name)
if self._before_forward_func is not None:
self._before_forward_func()
ret = self._model_server.forward() # Calls the forward run of the remove model (simulator)
if self._after_forward_func is not None:
self._after_forward_func()
return ret
| [
"torch.no_grad",
"torch.sum"
] | 1.0.0 | SwapneelM/pyprob | 4d93441ea838c3491a49050ae05d218a34708e6d |
1.3 | import copy
import gc
import json
import os
import random
import warnings
from collections import defaultdict
from typing import Dict
import matplotlib.pyplot as plt
import scipy.misc
import habitat_sim
import gc
import magnum as mn
import quaternion
from habitat_sim.utils.common import quat_to_magnum, quat_from_magnum
from fastdtw import fastdtw
import gzip
from transformers.optimization import Adafactor
import lmdb
import msgpack_numpy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import tqdm
from habitat import Config, logger
from habitat.utils.visualizations.utils import append_text_to_image
from habitat_baselines.common.base_trainer import BaseRLTrainer
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.common.utils import generate_video
from robo_vln_baselines.common.continuous_path_follower import (
ContinuousPathFollower,
track_waypoint
)
from habitat_extensions.utils import observations_to_image
from robo_vln_baselines.common.aux_losses import AuxLosses
from robo_vln_baselines.common.env_utils import (
construct_env,
construct_envs,
construct_envs_auto_reset_false,
SimpleRLEnv
)
from robo_vln_baselines.common.utils import transform_obs, batch_obs, batch_obs_data_collect, repackage_hidden, split_batch_tbptt, repackage_mini_batch
from robo_vln_baselines.models.seq2seq_highlevel_cma import Seq2Seq_HighLevel_CMA as Seq2Seq_HighLevel
from robo_vln_baselines.models.seq2seq_lowlevel import Seq2Seq_LowLevel
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf
class ObservationsDict(dict):
def pin_memory(self):
for k, v in self.items():
self[k] = v.pin_memory()
return self
def collate_fn(batch):
"""Each sample in batch: (
obs,
prev_actions,
oracle_actions,
inflec_weight,
)
"""
def _pad_helper(t, max_len, fill_val=0):
pad_amount = max_len - t.size(0)
if pad_amount == 0:
return t
pad = torch.full_like(t[0:1], fill_val).expand(pad_amount, *t.size()[1:])
return torch.cat([t, pad], dim=0)
def _pad_instruction(t, max_len, fill_val=0):
pad_amount = max_len - t.size(1)
if pad_amount == 0:
return t
pad = torch.full_like(t[:,0], fill_val).expand(*t.size()[:1], pad_amount)
return torch.cat([t, pad], dim=1)
transposed = list(zip(*batch))
observations_batch = list(transposed[0])
prev_actions_batch = list(transposed[1])
corrected_actions_batch = list(transposed[2])
oracle_stop_batch = list(transposed[3])
N = len(corrected_actions_batch)
B = len(prev_actions_batch)
new_observations_batch = defaultdict(list)
for sensor in observations_batch[0]:
if sensor == 'instruction':
for bid in range(N):
new_observations_batch[sensor].append(observations_batch[bid][sensor])
else:
for bid in range(B):
new_observations_batch[sensor].append(observations_batch[bid][sensor])
observations_batch = new_observations_batch
max_traj_len = max(ele.size(0) for ele in prev_actions_batch)
max_insr_len = max(ele.size(1) for ele in observations_batch['instruction'])
for bid in range(B):
for sensor in observations_batch:
if sensor == 'instruction':
observations_batch[sensor][bid] = _pad_instruction(
observations_batch[sensor][bid], max_insr_len, fill_val=0.0
)
continue
observations_batch[sensor][bid] = _pad_helper(
observations_batch[sensor][bid], max_traj_len, fill_val=0.0
)
prev_actions_batch[bid] = _pad_helper(prev_actions_batch[bid], max_traj_len)
corrected_actions_batch[bid] = _pad_helper(
corrected_actions_batch[bid], max_traj_len, fill_val=0.0
)
oracle_stop_batch[bid] = _pad_helper(oracle_stop_batch[bid], max_traj_len, fill_val=-1.0)
for sensor in observations_batch:
observations_batch[sensor] = torch.stack(observations_batch[sensor], dim=1)
observations_batch[sensor] = observations_batch[sensor].transpose(1,0)
observations_batch[sensor] = observations_batch[sensor].contiguous().view(
-1, *observations_batch[sensor].size()[2:]
)
prev_actions_batch = torch.stack(prev_actions_batch, dim=1)
corrected_actions_batch = torch.stack(corrected_actions_batch, dim=1)
not_done_masks = torch.ones_like(corrected_actions_batch, dtype=torch.float)
not_done_masks[0] = 0
oracle_stop_batch = torch.stack(oracle_stop_batch, dim=1)
prev_actions_batch = prev_actions_batch.transpose(1,0)
not_done_masks = not_done_masks.transpose(1,0)
corrected_actions_batch = corrected_actions_batch.transpose(1,0)
oracle_stop_batch = oracle_stop_batch.transpose(1,0)
observations_batch = ObservationsDict(observations_batch)
return (
observations_batch,
prev_actions_batch.contiguous().view(-1, 2),
not_done_masks.contiguous().view(-1, 2),
corrected_actions_batch.contiguous().view(-1,2),
oracle_stop_batch.contiguous().view(-1,1)
)
def _block_shuffle(lst, block_size):
blocks = [lst[i : i + block_size] for i in range(0, len(lst), block_size)]
random.shuffle(blocks)
return [ele for block in blocks for ele in block]
class IWTrajectoryDataset(torch.utils.data.IterableDataset):
def __init__(
self,
lmdb_features_dir,
use_iw,
inflection_weight_coef=1.0,
lmdb_map_size=1e9,
batch_size=1,
is_bert=False
):
super().__init__()
self.lmdb_features_dir = lmdb_features_dir
self.lmdb_map_size = lmdb_map_size
self.preload_size = batch_size * 100
self._preload = []
self.batch_size = batch_size
self.is_bert = is_bert
if use_iw:
self.inflec_weights = torch.tensor([1.0, inflection_weight_coef])
else:
self.inflec_weights = torch.tensor([1.0, 1.0])
with lmdb.open(
self.lmdb_features_dir,
map_size=int(self.lmdb_map_size),
readonly=True,
lock=False,
) as lmdb_env:
self.length = lmdb_env.stat()["entries"]
def _load_next(self):
if len(self._preload) == 0:
if len(self.load_ordering) == 0:
raise StopIteration
new_preload = []
lengths = []
with lmdb.open(
self.lmdb_features_dir,
map_size=int(self.lmdb_map_size),
readonly=True,
lock=False,
) as lmdb_env, lmdb_env.begin(buffers=True) as txn:
for _ in range(self.preload_size):
if len(self.load_ordering) == 0:
break
new_preload.append(
msgpack_numpy.unpackb(
txn.get(str(self.load_ordering.pop()).encode()), raw=False
)
)
lengths.append(len(new_preload[-1][0]))
sort_priority = list(range(len(lengths)))
random.shuffle(sort_priority)
sorted_ordering = list(range(len(lengths)))
sorted_ordering.sort(key=lambda k: (lengths[k], sort_priority[k]))
for idx in _block_shuffle(sorted_ordering, self.batch_size):
self._preload.append(new_preload[idx])
return self._preload.pop()
def __next__(self):
obs, prev_actions, oracle_actions, stop_step = self._load_next()
discrete_oracle_actions = obs['vln_oracle_action_sensor'].copy()
val = int(stop_step[-1])-1
discrete_oracle_actions[val:]=4
obs['vln_oracle_action_sensor'] = discrete_oracle_actions
oracle_stop = np.zeros_like(obs['vln_oracle_action_sensor'])
oracle_stop[val:] = 1
if self.is_bert:
instruction_batch = obs['instruction'][0]
instruction_batch = np.expand_dims(instruction_batch, axis=0)
obs['instruction'] = instruction_batch
else:
instruction_batch = obs['glove_tokens'][0]
instruction_batch = np.expand_dims(instruction_batch, axis=0)
obs['instruction'] = instruction_batch
del obs['glove_tokens']
for k, v in obs.items():
obs[k] = torch.from_numpy(v)
prev_actions = torch.from_numpy(prev_actions)
oracle_stop = torch.from_numpy(oracle_stop)
oracle_actions = torch.from_numpy(oracle_actions)
return (obs, prev_actions, oracle_actions, oracle_stop)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
start = 0
end = self.length
else:
per_worker = int(np.ceil(self.length / worker_info.num_workers))
start = per_worker * worker_info.id
end = min(start + per_worker, self.length)
# Reverse so we can use .pop()
self.load_ordering = list(
reversed(_block_shuffle(list(range(start, end)), self.preload_size))
)
return self
@baseline_registry.register_trainer(name="hierarchical_trainer")
class RoboDaggerTrainer(BaseRLTrainer):
def __init__(self, config=None):
super().__init__(config)
self.high_level = None
self.low_level = None
self.actor_critic = None
self.envs = None
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self.device2 = (
torch.device("cuda:1")
if torch.cuda.is_available()
else torch.device("cpu")
)
self.lmdb_features_dir = self.config.DAGGER.LMDB_FEATURES_DIR.format(
split=config.TASK_CONFIG.DATASET.SPLIT
)
self.lmdb_eval_dir = self.config.DAGGER.LMDB_EVAL_DIR
def _setup_actor_critic_agent(
self, config: Config, load_from_ckpt: bool, ckpt_path: str
) -> None:
r"""Sets up actor critic and agent.
Args:
config: MODEL config
Returns:
None
"""
config.defrost()
config.TORCH_GPU_ID = self.config.TORCH_GPU_ID
config.freeze()
self.high_level = Seq2Seq_HighLevel(
observation_space=self.envs.observation_space,
num_actions=self.envs.action_space.n,
model_config=config,
batch_size = self.config.DAGGER.BATCH_SIZE,
)
self.low_level = Seq2Seq_LowLevel(
observation_space=self.envs.observation_space,
num_actions=2,
num_sub_tasks=self.envs.action_space.n,
model_config=config,
batch_size = self.config.DAGGER.BATCH_SIZE,
)
self.optimizer_high_level = torch.optim.AdamW(
self.high_level.parameters(), lr=self.config.DAGGER.LR, weight_decay=self.config.MODEL.TRANSFORMER.weight_decay)
self.optimizer_low_level = torch.optim.Adam(
self.low_level.parameters(), lr=self.config.DAGGER.LR,weight_decay=self.config.MODEL.TRANSFORMER.weight_decay
)
self.scheduler_high_level = torch.optim.lr_scheduler.CyclicLR(self.optimizer_high_level, base_lr=2e-6, max_lr=1e-4, step_size_up=1000,step_size_down=30000, cycle_momentum=False)
if not self.config.MODEL.TRANSFORMER.split_gpus:
self.high_level.to(self.device)
if load_from_ckpt:
ckpt_dict = self.load_checkpoint(ckpt_path, map_location="cpu")
self.high_level.load_state_dict(ckpt_dict["high_level_state_dict"])
self.low_level.load_state_dict(ckpt_dict["low_level_state_dict"])
logger.info(f"Loaded weights from checkpoint: {ckpt_path}")
logger.info("Finished setting up actor critic model.")
def save_checkpoint(self, file_name) -> None:
r"""Save checkpoint with specified name.
Args:
file_name: file name for checkpoint
Returns:
None
"""
checkpoint = {
"high_level_state_dict": self.high_level.state_dict(),
"low_level_state_dict": self.low_level.state_dict(),
"config": self.config,
}
torch.save(checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name))
def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
def _update_dataset(self, data_it):
if torch.cuda.is_available():
with torch.cuda.device(self.device):
torch.cuda.empty_cache()
prev_actions = np.zeros((1,2))
done = False
vel_control = habitat_sim.physics.VelocityControl()
vel_control.controlling_lin_vel = True
vel_control.lin_vel_is_local = True
vel_control.controlling_ang_vel = True
vel_control.ang_vel_is_local = True
collected_eps = 0
with tqdm.tqdm(total=self.config.DAGGER.UPDATE_SIZE) as pbar, lmdb.open(
self.lmdb_features_dir, map_size=int(self.config.DAGGER.LMDB_MAP_SIZE)
) as lmdb_env, torch.no_grad():
start_id = lmdb_env.stat()["entries"]
txn = lmdb_env.begin(write=True)
stop_step=0
for episode in range(self.config.DAGGER.UPDATE_SIZE):
episode = []
observations = self.envs.reset()
observations = transform_obs(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, is_bert=self.config.MODEL.INSTRUCTION_ENCODER.is_bert
)
reference_path = self.envs.habitat_env.current_episode.reference_path + [
self.envs.habitat_env.current_episode.goals[0].position
]
continuous_path_follower = ContinuousPathFollower(
self.envs.habitat_env._sim, reference_path, waypoint_threshold=0.4)
is_done = False
steps=0
stop_flag = False
valid_trajectories = True
while continuous_path_follower.progress < 1.0:
steps+=1
if is_done:
break
continuous_path_follower.update_waypoint()
agent_state = self.envs.habitat_env._sim.get_agent_state()
previous_rigid_state = habitat_sim.RigidState(
quat_to_magnum(agent_state.rotation), agent_state.position
)
if np.isnan(continuous_path_follower.waypoint).any() or np.isnan(previous_rigid_state.translation).any() or np.isnan(quaternion.as_euler_angles(quat_from_magnum(previous_rigid_state.rotation))).any():
valid_trajectories = False
break
vel,omega = track_waypoint(
continuous_path_follower.waypoint,
previous_rigid_state,
vel_control,
progress = continuous_path_follower.progress,
dt=self.config.DAGGER.time_step,
)
observations, reward, done, info = self.envs.step(vel_control)
episode_over, success = done
if continuous_path_follower.progress >0.985 and not stop_flag:
stop_step = steps
stop_flag = True
is_done = episode_over or (success and abs(vel)<0.005)
observations = transform_obs(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, is_bert=self.config.MODEL.INSTRUCTION_ENCODER.is_bert
)
actions = np.expand_dims(np.asarray([vel,omega]), axis=0)
episode.append(
(
observations,
prev_actions,
actions,
stop_step
)
)
prev_actions = actions
# Save episode to LMDB directory
if valid_trajectories:
traj_obs = batch_obs_data_collect([step[0] for step in episode], device=torch.device("cpu"))
for k, v in traj_obs.items():
traj_obs[k] = v.numpy()
transposed_ep = [
traj_obs,
np.array([step[1] for step in episode], dtype=float),
np.array([step[2] for step in episode], dtype=float),
[step[3] for step in episode],
]
txn.put(
str(start_id + collected_eps).encode(),
msgpack_numpy.packb(transposed_ep, use_bin_type=True),
)
pbar.update()
collected_eps += 1
if (
collected_eps % self.config.DAGGER.LMDB_COMMIT_FREQUENCY
) == 0:
txn.commit()
txn = lmdb_env.begin(write=True)
episode = []
prev_actions = np.zeros((1,2))
txn.commit()
self.envs.close()
self.envs = None
def _update_agent(
self, observations, prev_actions, not_done_masks, corrected_actions, oracle_stop, high_recurrent_hidden_states,
low_recurrent_hidden_states, detached_state_low
):
self.optimizer_high_level.zero_grad()
self.optimizer_low_level.zero_grad()
high_level_criterion = nn.CrossEntropyLoss(ignore_index=-1, reduction="mean")
low_level_criterion = nn.MSELoss()
low_level_stop_criterion = nn.BCEWithLogitsLoss()
AuxLosses.clear()
high_recurrent_hidden_states = repackage_hidden(high_recurrent_hidden_states)
low_recurrent_hidden_states = repackage_hidden(low_recurrent_hidden_states)
batch = (observations, high_recurrent_hidden_states, prev_actions, not_done_masks)
output, high_recurrent_hidden_states = self.high_level(batch)
del batch
high_level_action_mask = observations['vln_oracle_action_sensor'] ==0
output = output.masked_fill_(high_level_action_mask, 0)
observations['vln_oracle_action_sensor'] = observations['vln_oracle_action_sensor'].squeeze(1).to(dtype=torch.int64)
high_level_loss = high_level_criterion(output,(observations['vln_oracle_action_sensor']-1))
high_level_loss.backward()
self.optimizer_high_level.step()
high_level_loss_data = high_level_loss.detach()
del output
self.low_level.to(self.device2)
observations = {
k: v.to(device=self.device2, non_blocking=True)
for k, v in observations.items()
}
discrete_actions = observations['vln_oracle_action_sensor']
discrete_action_mask = discrete_actions ==0
discrete_actions = (discrete_actions-1).masked_fill_(discrete_action_mask, 4)
del observations['vln_oracle_action_sensor']
batch = (observations,
low_recurrent_hidden_states,
prev_actions.to(
device=self.device2, non_blocking=True
),
not_done_masks.to(
device=self.device2, non_blocking=True
),
discrete_actions.view(-1))
del observations, prev_actions, not_done_masks
oracle_stop = oracle_stop.to(self.device2)
output, stop_out, low_recurrent_hidden_states = self.low_level(batch)
corrected_actions = corrected_actions.to(self.device2)
action_mask = corrected_actions==0
output = output.masked_fill_(action_mask, 0)
output = output.to(dtype=torch.float)
corrected_actions = corrected_actions.to(dtype=torch.float)
low_level_action_loss = low_level_criterion(output, corrected_actions)
mask = (oracle_stop!=-1)
oracle_stop = torch.masked_select(oracle_stop, mask)
stop_out = torch.masked_select(stop_out, mask)
low_level_stop_loss = low_level_stop_criterion(stop_out, oracle_stop)
low_level_loss = low_level_action_loss + low_level_stop_loss
low_level_loss.backward()
self.optimizer_low_level.step()
aux_loss_data =0
loss = (high_level_loss_data.item(), low_level_action_loss.detach().item(),
low_level_stop_loss.detach().item(), aux_loss_data)
return loss, high_recurrent_hidden_states, low_recurrent_hidden_states, detached_state_low
def _update_agent_val(
self, observations, prev_actions, not_done_masks, corrected_actions, oracle_stop, high_recurrent_hidden_states,
low_recurrent_hidden_states, detached_state_low
):
high_level_criterion = nn.CrossEntropyLoss(ignore_index=-1, reduction="mean")
low_level_criterion = nn.MSELoss()
low_level_stop_criterion = nn.BCEWithLogitsLoss()
AuxLosses.clear()
high_recurrent_hidden_states = repackage_hidden(high_recurrent_hidden_states)
low_recurrent_hidden_states = repackage_hidden(low_recurrent_hidden_states)
batch = (observations, high_recurrent_hidden_states, prev_actions, not_done_masks)
output, high_recurrent_hidden_states = self.high_level(batch)
del batch
high_level_action_mask = observations['vln_oracle_action_sensor'] ==0
output = output.masked_fill_(high_level_action_mask, 0)
observations['vln_oracle_action_sensor'] = observations['vln_oracle_action_sensor'].squeeze(1).to(dtype=torch.int64)
high_level_loss = high_level_criterion(output,(observations['vln_oracle_action_sensor']-1))
predicted = torch.argmax(output, dim=1)
corrected_mask = ~high_level_action_mask
correct = torch.masked_select((observations['vln_oracle_action_sensor']-1), corrected_mask)
predicted = torch.masked_select(predicted, corrected_mask)
accuracy = (predicted == correct).sum().item()
total = predicted.size(0)
del output
self.low_level.to(self.device2)
observations = {
k: v.to(device=self.device2, non_blocking=True)
for k, v in observations.items()
}
discrete_actions = observations['vln_oracle_action_sensor']
discrete_action_mask = discrete_actions ==0
discrete_actions = (discrete_actions-1).masked_fill_(discrete_action_mask, 4)
batch = (observations,
low_recurrent_hidden_states,
prev_actions.to(
device=self.device2, non_blocking=True
),
not_done_masks.to(
device=self.device2, non_blocking=True
),
discrete_actions.view(-1))
del observations, prev_actions, not_done_masks
oracle_stop = oracle_stop.to(self.device2)
output, stop_out, low_recurrent_hidden_states = self.low_level(batch)
corrected_actions = corrected_actions.to(self.device2)
action_mask = corrected_actions==0
output = output.masked_fill_(action_mask, 0)
output = output.to(dtype=torch.float)
corrected_actions = corrected_actions.to(dtype=torch.float)
low_level_action_loss = low_level_criterion(output, corrected_actions)
mask = (oracle_stop!=-1)
oracle_stop = torch.masked_select(oracle_stop, mask)
stop_out = torch.masked_select(stop_out, mask)
low_level_stop_loss = low_level_stop_criterion(stop_out, oracle_stop)
aux_loss_data =0
loss = (high_level_loss.item(), low_level_action_loss.item(),
low_level_stop_loss.item(), aux_loss_data)
return loss, high_recurrent_hidden_states, low_recurrent_hidden_states, detached_state_low, accuracy, total
def train_epoch(self, diter, length, batch_size, epoch, writer, train_steps):
loss, action_loss, aux_loss = 0, 0, 0
step_id = 0
self.high_level.train()
self.low_level.train()
for batch in tqdm.tqdm(
diter, total=length // batch_size, leave=False
):
( observations_batch,
prev_actions_batch,
not_done_masks,
corrected_actions_batch,
oracle_stop_batch
) = batch
high_recurrent_hidden_states = torch.zeros(
self.high_level.state_encoder.num_recurrent_layers,
self.config.DAGGER.BATCH_SIZE,
self.config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
low_recurrent_hidden_states = torch.zeros(
self.low_level.state_encoder.num_recurrent_layers,
self.config.DAGGER.BATCH_SIZE,
self.config.MODEL.STATE_ENCODER.hidden_size,
device=self.device2,
)
detached_state_low = None
batch_split = split_batch_tbptt(observations_batch, prev_actions_batch, not_done_masks,
corrected_actions_batch, oracle_stop_batch, self.config.DAGGER.tbptt_steps,
self.config.DAGGER.split_dim)
del observations_batch, prev_actions_batch, not_done_masks, corrected_actions_batch, batch
for split in batch_split:
( observations_batch,
prev_actions_batch,
not_done_masks,
corrected_actions_batch,
oracle_stop_batch
) = split
observations_batch = {
k: v.to(device=self.device, non_blocking=True)
for k, v in observations_batch.items()
}
try:
loss, high_recurrent_hidden_states, low_recurrent_hidden_states, detached_state_low= self._update_agent(
observations_batch,
prev_actions_batch.to(
device=self.device, non_blocking=True
),
not_done_masks.to(
device=self.device, non_blocking=True
),
corrected_actions_batch.to(
device=self.device, non_blocking=True
),
oracle_stop_batch.to(
device=self.device, non_blocking=True
),
high_recurrent_hidden_states,
low_recurrent_hidden_states,
detached_state_low
)
writer.add_scalar(f"Train High Level Action Loss", loss[0], train_steps)
writer.add_scalar(f"Train Low Level Action Loss", loss[1], train_steps)
writer.add_scalar(f"Train Low Level Stop Loss", loss[2], train_steps)
writer.add_scalar(f"Train Low_level Total Loss", loss[1]+loss[2], train_steps)
train_steps += 1
except:
logger.info(
"ERROR: failed to update agent. Updating agent with batch size of 1."
)
loss, action_loss, aux_loss = 0, 0, 0
prev_actions_batch = prev_actions_batch.cpu()
not_done_masks = not_done_masks.cpu()
corrected_actions_batch = corrected_actions_batch.cpu()
weights_batch = weights_batch.cpu()
observations_batch = {
k: v.cpu() for k, v in observations_batch.items()
}
for i in range(not_done_masks.size(0)):
output = self._update_agent(
{
k: v[i].to(
device=self.device, non_blocking=True
)
for k, v in observations_batch.items()
},
prev_actions_batch[i].to(
device=self.device, non_blocking=True
),
not_done_masks[i].to(
device=self.device, non_blocking=True
),
corrected_actions_batch[i].to(
device=self.device, non_blocking=True
),
weights_batch[i].to(
device=self.device, non_blocking=True
),
)
loss += output[0]
action_loss += output[1]
aux_loss += output[2]
self.scheduler_high_level.step()
# self.scheduler_low_level.step()
self.save_checkpoint(
f"ckpt.{self.config.DAGGER.EPOCHS + epoch}.pth"
)
return train_steps
def val_epoch(self, diter, length, batch_size, epoch, writer, val_steps):
loss, aux_loss = 0, 0
step_id = 0
val_high_losses = []
val_low_losses = []
self.high_level.eval()
self.low_level.eval()
correct_labels = 0
total_correct=0
with torch.no_grad():
for batch in tqdm.tqdm(
diter, total=length // batch_size, leave=False
):
( observations_batch,
prev_actions_batch,
not_done_masks,
corrected_actions_batch,
oracle_stop_batch
) = batch
high_recurrent_hidden_states = torch.zeros(
self.high_level.state_encoder.num_recurrent_layers,
self.config.DAGGER.BATCH_SIZE,
self.config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
low_recurrent_hidden_states = torch.zeros(
self.low_level.state_encoder.num_recurrent_layers,
self.config.DAGGER.BATCH_SIZE,
self.config.MODEL.STATE_ENCODER.hidden_size,
device=self.device2,
)
detached_state_low = None
batch_split = split_batch_tbptt(observations_batch, prev_actions_batch, not_done_masks,
corrected_actions_batch, oracle_stop_batch, self.config.DAGGER.tbptt_steps,
self.config.DAGGER.split_dim)
del observations_batch, prev_actions_batch, not_done_masks, corrected_actions_batch, batch
for split in batch_split:
( observations_batch,
prev_actions_batch,
not_done_masks,
corrected_actions_batch,
oracle_stop_batch
) = split
observations_batch = {
k: v.to(device=self.device, non_blocking=True)
for k, v in observations_batch.items()
}
loss, high_recurrent_hidden_states, low_recurrent_hidden_states, detached_state_low, correct, total= self._update_agent_val(
observations_batch,
prev_actions_batch.to(
device=self.device, non_blocking=True
),
not_done_masks.to(
device=self.device, non_blocking=True
),
corrected_actions_batch.to(
device=self.device, non_blocking=True
),
oracle_stop_batch.to(
device=self.device, non_blocking=True
),
high_recurrent_hidden_states,
low_recurrent_hidden_states,
detached_state_low
)
correct_labels+= correct
total_correct+=total
writer.add_scalar(f"Val High Level Action Loss", loss[0], val_steps)
writer.add_scalar(f"Val Low_level Total Loss", loss[1]+loss[2], val_steps)
val_steps += 1
val_low_losses.append(loss[0])
val_high_losses.append(loss[1]+loss[2])
final_accuracy = 100 * correct_labels / total_correct
writer.add_scalar(f"Val High level Loss epoch", np.mean(val_high_losses), epoch)
writer.add_scalar(f"Val Low level Loss epoch", np.mean(val_low_losses), epoch)
writer.add_scalar(f"Validation Accuracy", final_accuracy, epoch)
return val_steps
def train(self) -> None:
r"""Main method for training DAgger.
Returns:
None
"""
os.makedirs(self.lmdb_features_dir, exist_ok=True)
os.makedirs(self.config.CHECKPOINT_FOLDER, exist_ok=True)
if self.config.DAGGER.PRELOAD_LMDB_FEATURES:
try:
lmdb.open(self.lmdb_features_dir, readonly=True)
lmdb.open(self.lmdb_eval_dir, readonly=True)
except lmdb.Error as err:
logger.error("Cannot open database for teacher forcing preload.")
raise err
else:
with lmdb.open(
self.lmdb_features_dir, map_size=int(self.config.DAGGER.LMDB_MAP_SIZE)
) as lmdb_env, lmdb_env.begin(write=True) as txn:
txn.drop(lmdb_env.open_db())
split = self.config.TASK_CONFIG.DATASET.SPLIT
self.config.defrost()
self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split
self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split
# if doing teacher forcing, don't switch the scene until it is complete
if self.config.DAGGER.P == 1.0:
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
self.config.freeze()
if self.config.DAGGER.PRELOAD_LMDB_FEATURES:
# when preloadeding features, its quicker to just load one env as we just
# need the observation space from it.
single_proc_config = self.config.clone()
single_proc_config.defrost()
single_proc_config.NUM_PROCESSES = 1
single_proc_config.freeze()
self.envs = construct_env(self.config)
else:
self.envs = construct_env(self.config)
self._setup_actor_critic_agent(
self.config.MODEL,
self.config.DAGGER.LOAD_FROM_CKPT,
self.config.DAGGER.CKPT_TO_LOAD,
)
logger.info(
"agent number of high level parameters: {}".format(
sum(param.numel() for param in self.high_level.parameters())
)
)
logger.info(
"agent number of low level parameters: {}".format(
sum(param.numel() for param in self.low_level.parameters())
)
)
if self.config.DAGGER.PRELOAD_LMDB_FEATURES:
self.envs.close()
del self.envs
self.envs = None
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs, purge_step=0
) as writer:
for dagger_it in range(self.config.DAGGER.ITERATIONS):
step_id = 0
if not self.config.DAGGER.PRELOAD_LMDB_FEATURES:
self._update_dataset(
dagger_it + (1 if self.config.DAGGER.LOAD_FROM_CKPT else 0)
)
if torch.cuda.is_available():
with torch.cuda.device(self.device):
torch.cuda.empty_cache()
gc.collect()
dataset = IWTrajectoryDataset(
self.lmdb_features_dir,
self.config.DAGGER.USE_IW,
inflection_weight_coef=self.config.MODEL.inflection_weight_coef,
lmdb_map_size=self.config.DAGGER.LMDB_MAP_SIZE,
batch_size=self.config.DAGGER.BATCH_SIZE,
is_bert = self.config.MODEL.INSTRUCTION_ENCODER.is_bert,
)
diter = torch.utils.data.DataLoader(
dataset,
batch_size=self.config.DAGGER.BATCH_SIZE,
shuffle=False,
collate_fn=collate_fn,
pin_memory=True,
drop_last=True, # drop last batch if smaller
num_workers=1,
)
dataset_eval = IWTrajectoryDataset(
self.lmdb_eval_dir,
self.config.DAGGER.USE_IW,
inflection_weight_coef=self.config.MODEL.inflection_weight_coef,
lmdb_map_size=self.config.DAGGER.LMDB_EVAL_SIZE,
batch_size=self.config.DAGGER.BATCH_SIZE,
is_bert = self.config.MODEL.INSTRUCTION_ENCODER.is_bert,
)
diter_eval = torch.utils.data.DataLoader(
dataset_eval,
batch_size=self.config.DAGGER.BATCH_SIZE,
shuffle=False,
collate_fn=collate_fn,
pin_memory=True,
drop_last=True, # drop last batch if smaller
num_workers=1,
)
train_steps = 0
val_steps = 0
AuxLosses.activate()
print("starting training loop")
for epoch in tqdm.trange(self.config.DAGGER.EPOCHS):
train_steps = self.train_epoch(diter, dataset.length, dataset.batch_size, epoch, writer, train_steps)
val_steps = self.val_epoch(diter_eval, dataset_eval.length, dataset_eval.batch_size, epoch, writer, val_steps)
AuxLosses.deactivate()
@staticmethod
def _pause_envs(
envs_to_pause,
envs,
recurrent_hidden_states,
not_done_masks,
prev_actions,
batch,
):
# pausing self.envs with no new episode
if len(envs_to_pause) > 0:
state_index = list(range(envs.num_envs))
for idx in reversed(envs_to_pause):
state_index.pop(idx)
envs.pause_at(idx)
# indexing along the batch dimensions
if recurrent_hidden_states:
recurrent_hidden_states = recurrent_hidden_states[:, state_index]
# recurrent_hidden_states = recurrent_hidden_states
not_done_masks = not_done_masks[state_index]
prev_actions = prev_actions[state_index]
for k, v in batch.items():
batch[k] = v[state_index]
return (envs, recurrent_hidden_states, not_done_masks, prev_actions, batch)
def _euclidean_distance(self, position_a, position_b):
return np.linalg.norm(np.array(position_b) - np.array(position_a), ord=2)
def _eval_checkpoint(
self, checkpoint_path: str, writer: TensorboardWriter, checkpoint_index: int = 0
) -> None:
r"""Evaluates a single checkpoint. Assumes episode IDs are unique.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object for logging to tensorboard
checkpoint_index: index of cur checkpoint for logging
Returns:
None
"""
logger.info(f"checkpoint_path: {checkpoint_path}")
if self.config.EVAL.USE_CKPT_CONFIG:
config = self._setup_eval_config(
self.load_checkpoint(checkpoint_path, map_location="cpu")["config"]
)
else:
config = self.config.clone()
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT
config.TASK_CONFIG.TASK.NDTW.SPLIT = config.EVAL.SPLIT
config.TASK_CONFIG.TASK.SDTW.SPLIT = config.EVAL.SPLIT
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = True
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = -1
if len(config.VIDEO_OPTION) > 0:
config.defrost()
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
config.freeze()
gt_path = config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(split=config.TASK_CONFIG.DATASET.SPLIT)
with gzip.open(gt_path, "rt") as f:
self.gt_json = json.load(f)
# setup agent
self.envs = construct_env(config)
self.device = (
torch.device("cuda", config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self._setup_actor_critic_agent(config.MODEL, True, checkpoint_path)
vc = habitat_sim.physics.VelocityControl()
vc.controlling_lin_vel = True
vc.lin_vel_is_local = True
vc.controlling_ang_vel = True
vc.ang_vel_is_local = True
observations = self.envs.reset()
observations = transform_obs(
observations, config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, is_bert=self.config.MODEL.INSTRUCTION_ENCODER.is_bert
)
observations = batch_obs(observations, self.device)
high_recurrent_hidden_states = torch.zeros(
self.high_level.state_encoder.num_recurrent_layers,
self.config.NUM_PROCESSES,
self.config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
low_recurrent_hidden_states = torch.zeros(
self.low_level.state_encoder.num_recurrent_layers,
self.config.NUM_PROCESSES,
self.config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
self.low_level.to(self.device)
prev_actions = torch.zeros(
config.NUM_PROCESSES, 2, device=self.device, dtype=torch.long
)
not_done_masks = torch.zeros(config.NUM_PROCESSES, 2, device=self.device)
stats_episodes = {} # dict of dicts that stores stats per episode
if len(config.VIDEO_OPTION) > 0:
rgb_frames = []
os.makedirs(config.VIDEO_DIR, exist_ok=True)
if config.PLOT_ATTENTION:
attention_weights = [[] for _ in range(config.NUM_PROCESSES)]
save_actions = [[] for _ in range(config.NUM_PROCESSES)]
self.high_level.eval()
self.low_level.eval()
k=0
ep_count = 0
min_2nd_dim = 1000
steps=0
locations=[]
detached_state_low = None
while (
len(stats_episodes) < config.EVAL.EPISODE_COUNT
):
current_episode = self.envs.habitat_env.current_episode
is_done = False
locations.append(self.envs.habitat_env._sim.get_agent_state().position.tolist())
with torch.no_grad():
batch = (observations, high_recurrent_hidden_states, prev_actions, not_done_masks)
output, high_recurrent_hidden_states = self.high_level(batch)
pred = torch.argmax(output, dim=1)
batch = (observations, low_recurrent_hidden_states,prev_actions, not_done_masks,pred)
output, stop_out, low_recurrent_hidden_states = self.low_level(batch)
prev_actions = output
not_done_masks = torch.ones(config.NUM_PROCESSES, 2, device=self.device)
lin_vel = output[:, 0]
vc.linear_velocity = mn.Vector3(0, 0, output[:,0].cpu().numpy())
max_turn_speed = 1.0
vc.angular_velocity = mn.Vector3(0, np.clip(output[:,1].cpu().numpy(), -max_turn_speed, max_turn_speed), 0)
observations, _, done, info = self.envs.step(vc)
episode_over, success = done
stop_pred = torch.round(torch.sigmoid(stop_out))
episode_success = success and (lin_vel<0.25 or stop_pred ==1)
is_done = episode_over or episode_success
steps+=1
if len(config.VIDEO_OPTION) > 0:
frame = observations_to_image(observations, info)
frame = append_text_to_image(
frame, current_episode.instruction.instruction_text
)
rgb_frames.append(frame)
if is_done or steps==self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS:
# calulcate NDTW here
detached_state_low = None
gt_locations = self.gt_json[str(current_episode.episode_id)]["locations"]
dtw_distance = fastdtw(locations, gt_locations, dist=self._euclidean_distance)[0]
nDTW = np.exp(-dtw_distance / (len(gt_locations) * config.TASK_CONFIG.TASK.NDTW.SUCCESS_DISTANCE))
locations=[]
is_done = False
ep_count+=1
steps=0
stats_episodes[current_episode.episode_id] = info
stats_episodes[current_episode.episode_id]['ndtw'] = nDTW
if episode_success:
stats_episodes[current_episode.episode_id]['actual_success'] = 1.0
else:
stats_episodes[current_episode.episode_id]['actual_success'] = 0.0
print("Current episode ID:", current_episode.episode_id)
print("Episode Completed:", ep_count)
observations = self.envs.reset()
prev_actions = torch.zeros(
config.NUM_PROCESSES, 2, device=self.device, dtype=torch.long
)
not_done_masks = torch.zeros(config.NUM_PROCESSES, 2, device=self.device)
high_recurrent_hidden_states = torch.zeros(
self.high_level.state_encoder.num_recurrent_layers,
self.config.NUM_PROCESSES,
self.config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
low_recurrent_hidden_states = torch.zeros(
self.low_level.state_encoder.num_recurrent_layers,
self.config.NUM_PROCESSES,
self.config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
metrics={"SPL":round(
stats_episodes[current_episode.episode_id]["spl"], 6
) }
if len(config.VIDEO_OPTION) > 0:
time_step=30
generate_video(
video_option=config.VIDEO_OPTION,
video_dir=config.VIDEO_DIR,
images=rgb_frames,
episode_id=current_episode.episode_id,
checkpoint_idx=checkpoint_index,
metrics=metrics,
tb_writer=writer,
fps = int (1.0/time_step),
)
del stats_episodes[current_episode.episode_id]["top_down_map"]
del stats_episodes[current_episode.episode_id]["collisions"]
rgb_frames =[]
if config.PLOT_ATTENTION:
for j in range(len(attention_weights[i])):
attention_weights[i][j] = attention_weights[i][j][:,:min_2nd_dim]
attention_weights[i]= torch.cat(attention_weights[i], dim=0).cpu().numpy()
attention_to_image(
image_dir = config.VIDEO_DIR,
attention = attention_weights[i],
episode_id=current_episodes[i].episode_id,
checkpoint_idx=checkpoint_index,
metrics=metrics,
actions = save_actions[i]
)
attention_weights[i] = []
save_actions[i] =[]
observations = transform_obs(
observations, config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, is_bert=self.config.MODEL.INSTRUCTION_ENCODER.is_bert
)
observations = batch_obs(observations, self.device)
k+=1
self.envs.close()
aggregated_stats = {}
num_episodes = len(stats_episodes)
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = (
sum([v[stat_key] for v in stats_episodes.values()]) / num_episodes
)
split = config.TASK_CONFIG.DATASET.SPLIT
os.makedirs(config.EVAL.VAL_LOG_DIR, exist_ok=True)
val_log_path = os.path.join(config.EVAL.VAL_LOG_DIR,f"stats_ckpt_{checkpoint_index}_{split}.json")
with open(val_log_path, "w") as f:
json.dump(aggregated_stats, f, indent=4)
logger.info(f"Episodes evaluated: {num_episodes}")
checkpoint_num = checkpoint_index + 1
for k, v in aggregated_stats.items():
logger.info(f"Average episode {k}: {v:.6f}")
writer.add_scalar(f"eval_{split}_{k}", v, checkpoint_num)
| [
"torch.cat",
"torch.stack",
"torch.cuda.device",
"torch.ones",
"torch.cuda.is_available",
"torch.load",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.CrossEntropyLoss",
"torch.sigmoid",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.masked_select",
"torch.zeros",
"torch.device",
"torch.utils.data.get_worker_info",
"torch.full_like",
"torch.cuda.empty_cache",
"torch.argmax",
"torch.nn.MSELoss",
"torch.no_grad",
"torch.from_numpy",
"torch.ones_like",
"torch.optim.lr_scheduler.CyclicLR"
] | 1.3.1 | GT-RIPL/robo-vln | 286870a7d1095fe2607b524572587a48854bc970 |
1.8 | from unittest.mock import MagicMock, patch
import numpy as np
import torch
class _TestNN(torch.nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.decoder = torch.nn.Linear(input_size, output_size)
self.forward_called = False
def forward(self, nn_input, hidden):
self.forward_called = True
output = self.decoder(nn_input)
return output, hidden
def _print_success_message():
print("Tests Passed")
class AssertTest:
def __init__(self, params):
self.assert_param_message = "\n".join(
[str(k) + ": " + str(v) + "" for k, v in params.items()]
)
def test(self, assert_condition, assert_message):
assert assert_condition, (
assert_message
+ "\n\nUnit Test Function Parameters\n"
+ self.assert_param_message
)
def test_create_lookup_tables(create_lookup_tables):
test_text = """
Moe_Szyslak Moe's Tavern Where the elite meet to drink
Bart_Simpson Eh yeah hello is Mike there Last name Rotch
Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately
Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick
Moe_Szyslak Whats the matter Homer You're not your normal effervescent self
Homer_Simpson I got my problems Moe Give me another one
Moe_Szyslak Homer hey you should not drink to forget your problems
Barney_Gumble Yeah you should only drink to enhance your social skills"""
test_text = test_text.lower()
test_text = test_text.split()
vocab_to_int, int_to_vocab = create_lookup_tables(test_text)
# Check types
assert isinstance(vocab_to_int, dict), "vocab_to_int is not a dictionary."
assert isinstance(int_to_vocab, dict), "int_to_vocab is not a dictionary."
# Compare lengths of dicts
assert len(vocab_to_int) == len(int_to_vocab), (
"Length of vocab_to_int and int_to_vocab don't match. "
"vocab_to_int is length {}. int_to_vocab is length {}".format(
len(vocab_to_int), len(int_to_vocab)
)
)
# Make sure the dicts have the same words
vocab_to_int_word_set = set(vocab_to_int.keys())
int_to_vocab_word_set = set(int_to_vocab.values())
assert not (vocab_to_int_word_set - int_to_vocab_word_set), (
"vocab_to_int and int_to_vocab don't have the same words."
"{} found in vocab_to_int, but not in int_to_vocab".format(
vocab_to_int_word_set - int_to_vocab_word_set
)
)
assert not (int_to_vocab_word_set - vocab_to_int_word_set), (
"vocab_to_int and int_to_vocab don't have the same words."
"{} found in int_to_vocab, but not in vocab_to_int".format(
int_to_vocab_word_set - vocab_to_int_word_set
)
)
# Make sure the dicts have the same word ids
vocab_to_int_word_id_set = set(vocab_to_int.values())
int_to_vocab_word_id_set = set(int_to_vocab.keys())
assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set), (
"vocab_to_int and int_to_vocab don't contain the same word ids."
"{} found in vocab_to_int, but not in int_to_vocab".format(
vocab_to_int_word_id_set - int_to_vocab_word_id_set
)
)
assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set), (
"vocab_to_int and int_to_vocab don't contain the same word ids."
"{} found in int_to_vocab, but not in vocab_to_int".format(
int_to_vocab_word_id_set - vocab_to_int_word_id_set
)
)
# Make sure the dicts make the same lookup
missmatches = [
(word, id, id, int_to_vocab[id])
for word, id in vocab_to_int.items()
if int_to_vocab[id] != word
]
assert (
not missmatches
), "Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}".format(
len(missmatches), *missmatches[0]
)
assert (
len(vocab_to_int) > len(set(test_text)) / 2
), "The length of vocab seems too small. Found a length of {}".format(
len(vocab_to_int)
)
_print_success_message()
def test_tokenize(token_lookup):
symbols = {".", ",", '"', ";", "!", "?", "(", ")", "-", "\n"}
token_dict = token_lookup()
# Check type
assert isinstance(token_dict, dict), f"Returned type is {type(token_dict)}."
# Check symbols
missing_symbols = symbols - set(token_dict.keys())
unknown_symbols = set(token_dict.keys()) - symbols
assert not missing_symbols, f"Missing symbols: {missing_symbols}"
assert not unknown_symbols, f"Unknown symbols: {unknown_symbols}"
# Check values type
bad_value_type = [
type(val) for val in token_dict.values() if not isinstance(val, str)
]
assert not bad_value_type, f"Found token as {bad_value_type[0]} type."
# Check for spaces
key_has_spaces = [k for k in token_dict.keys() if " " in k]
val_has_spaces = [val for val in token_dict.values() if " " in val]
assert (
not key_has_spaces
), 'The key "{}" includes spaces. Remove spaces from keys and values'.format(
key_has_spaces[0]
)
assert (
not val_has_spaces
), 'The value "{}" includes spaces. Remove spaces from keys and values'.format(
val_has_spaces[0]
)
# Check for symbols in values
symbol_val = ()
for symbol in symbols:
for val in token_dict.values():
if symbol in val:
symbol_val = (symbol, val)
assert (
not symbol_val
), "Don't use a symbol that will be replaced in your tokens. Found the symbol {} in value {}".format(
*symbol_val
)
_print_success_message()
def test_rnn(RNN, train_on_gpu):
batch_size = 50
sequence_length = 3
vocab_size = 20
output_size = 20
embedding_dim = 15
hidden_dim = 10
n_layers = 2
# create test RNN
# params: (vocab_size, output_size, embedding_dim, hidden_dim, n_layers)
rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)
# create test input
a = np.random.randint(vocab_size, size=(batch_size, sequence_length))
# b = torch.LongTensor(a)
b = torch.from_numpy(a)
hidden = rnn.init_hidden(batch_size)
if train_on_gpu:
rnn.cuda()
b = b.cuda()
output, hidden_out = rnn(b, hidden)
assert_test = AssertTest(
{
"Input Size": vocab_size,
"Output Size": output_size,
"Hidden Dim": hidden_dim,
"N Layers": n_layers,
"Batch Size": batch_size,
"Sequence Length": sequence_length,
"Input": b,
}
)
# initialization
correct_hidden_size = (n_layers, batch_size, hidden_dim)
if type(hidden) == tuple:
# LSTM
assert_condition = hidden[0].size() == correct_hidden_size
else:
# GRU
assert_condition = hidden.size() == correct_hidden_size
assert_message = "Wrong hidden state size. Expected type {}. Got type {}".format(
correct_hidden_size, hidden[0].size()
)
assert_test.test(assert_condition, assert_message)
# output of rnn
correct_hidden_size = (n_layers, batch_size, hidden_dim)
if type(hidden) == tuple:
# LSTM
assert_condition = hidden_out[0].size() == correct_hidden_size
else:
# GRU
assert_condition = hidden_out.size() == correct_hidden_size
assert_message = "Wrong hidden state size. Expected type {}. Got type {}".format(
correct_hidden_size, hidden_out[0].size()
)
assert_test.test(assert_condition, assert_message)
correct_output_size = (batch_size, output_size)
assert_condition = output.size() == correct_output_size
assert_message = "Wrong output size. Expected type {}. Got type {}".format(
correct_output_size, output.size()
)
assert_test.test(assert_condition, assert_message)
_print_success_message()
def test_forward_back_prop(RNN, forward_back_prop, train_on_gpu):
batch_size = 200
input_size = 20
output_size = 10
sequence_length = 3
embedding_dim = 15
hidden_dim = 10
n_layers = 2
learning_rate = 0.01
# create test RNN
rnn = RNN(input_size, output_size, embedding_dim, hidden_dim, n_layers)
mock_decoder = MagicMock(wraps=_TestNN(input_size, output_size))
if train_on_gpu:
mock_decoder.cuda()
mock_decoder_optimizer = MagicMock(
wraps=torch.optim.Adam(mock_decoder.parameters(), lr=learning_rate)
)
mock_criterion = MagicMock(wraps=torch.nn.CrossEntropyLoss())
with patch.object(
torch.autograd, "backward", wraps=torch.autograd.backward
) as mock_autograd_backward:
inp = torch.FloatTensor(np.random.rand(batch_size, input_size))
target = torch.LongTensor(np.random.randint(output_size, size=batch_size))
hidden = rnn.init_hidden(batch_size)
loss, hidden_out = forward_back_prop(
mock_decoder, mock_decoder_optimizer, mock_criterion, inp, target, hidden
)
if type(hidden_out) == tuple:
# LSTM
assert (
hidden_out[0][0] == hidden[0][0]
).sum() == batch_size * hidden_dim, (
"Returned hidden state is the incorrect size."
)
else:
# GRU
assert (
hidden_out[0] == hidden[0]
).sum() == batch_size * hidden_dim, (
"Returned hidden state is the incorrect size."
)
assert (
mock_decoder.zero_grad.called or mock_decoder_optimizer.zero_grad.called
), "Didn't set the gradients to 0."
assert mock_decoder.forward_called, "Forward propagation not called."
assert mock_autograd_backward.called, "Backward propagation not called"
assert mock_decoder_optimizer.step.called, "Optimization step not performed"
assert type(loss) == float, "Wrong return type. Expected {}, got {}".format(
float, type(loss)
)
_print_success_message()
| [
"torch.nn.Linear",
"torch.from_numpy",
"torch.nn.CrossEntropyLoss"
] | 1.8.1 | TeoZosa/deep-learning-v2-pytorch | 8e73c26f2ebf49769b798e9ff26bd90d7de69f7d |
0.3 | # pylint: disable=no-self-use,invalid-name,no-value-for-parameter
import os
from nltk import Tree
import torch
from torch.autograd import Variable
from allennlp.common.testing.model_test_case import ModelTestCase
from allennlp.models.constituency_parser import SpanInformation
class SpanConstituencyParserTest(ModelTestCase):
def setUp(self):
os.system("cd ./scripts/EVALB/ && make && cd ../../")
super(SpanConstituencyParserTest, self).setUp()
self.set_up_model("tests/fixtures/constituency_parser/constituency_parser.json",
"tests/fixtures/data/example_ptb.trees")
def tearDown(self):
os.system("rm scripts/EVALB/evalb")
super().tearDown()
def test_span_parser_can_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_forward_can_handle_a_single_word_as_input(self):
# A very annoying edge case: the PTB has several single word sentences.
# when running with a batch size 1, we have to be very careful
# about how we .squeeze/.unsqueeze things to make sure it still runs.
text = {"tokens": Variable(torch.LongTensor([[1]]).long())}
pos_tags = Variable(torch.LongTensor([[1]]).long())
spans = Variable(torch.LongTensor([[[0, 0]]]))
label = Variable(torch.LongTensor([[1]]))
self.model(text, spans, [{"tokens": ["hello"]}], pos_tags, label)
def test_decode_runs(self):
self.model.eval()
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.decode(output_dict)
assert set(decode_output_dict.keys()) == {'spans', 'class_probabilities', 'trees',
'tokens', 'pos_tags', 'num_spans', 'loss'}
metrics = self.model.get_metrics(reset=True)
metric_keys = set(metrics.keys())
assert "evalb_precision" in metric_keys
assert "evalb_recall" in metric_keys
assert "evalb_f1_measure" in metric_keys
def test_resolve_overlap_conflicts_greedily(self):
spans = [SpanInformation(start=1, end=5, no_label_prob=0.7,
label_prob=0.2, label_index=2),
SpanInformation(start=2, end=7, no_label_prob=0.5,
label_prob=0.3, label_index=4)]
resolved_spans = self.model.resolve_overlap_conflicts_greedily(spans)
assert resolved_spans == [SpanInformation(start=2, end=7, no_label_prob=0.5,
label_prob=0.3, label_index=4)]
def test_construct_tree_from_spans(self):
# (S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))
tree_spans = [((0, 1), 'D'), ((1, 2), 'N'), ((0, 2), 'NP'),
((2, 3), 'V'), ((3, 4), 'D'), ((4, 5), 'N'),
((3, 5), 'NP'), ((2, 5), 'VP'), ((0, 5), 'S')]
sentence = ["the", "dog", "chased", "the", "cat"]
tree = self.model.construct_tree_from_spans({x:y for x, y in tree_spans}, sentence)
correct_tree = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
assert tree == correct_tree
def test_construct_tree_from_spans_handles_nested_labels(self):
# The tree construction should split the "S-NP" into (S (NP ...)).
tree_spans = [((0, 1), 'D'), ((1, 2), 'N'), ((0, 2), 'S-NP')]
sentence = ["the", "dog"]
tree = self.model.construct_tree_from_spans({x:y for x, y in tree_spans}, sentence)
correct_tree = Tree.fromstring("(S (NP (D the) (N dog)))")
assert tree == correct_tree
def test_tree_construction_with_too_few_spans_creates_trees_with_depth_one_word_nodes(self):
# We only have a partial tree here: (S (NP (D the) (N dog)). Decoding should
# recover this from the spans, whilst attaching all other words to the root node with
# XX POS tag labels, as the right hand side splits will not occur in tree_spans.
tree_spans = [((0, 1), 'D'), ((1, 2), 'N'), ((0, 2), 'NP'), ((0, 5), 'S')]
sentence = ["the", "dog", "chased", "the", "cat"]
tree = self.model.construct_tree_from_spans({x:y for x, y in tree_spans}, sentence)
correct_tree = Tree.fromstring("(S (NP (D the) (N dog)) (XX chased) (XX the) (XX cat))")
assert tree == correct_tree
| [
"torch.LongTensor"
] | 0.3.1 | unendin/allennlp | 0dcbaea6dbc6cc43e24a3564d6d37f8a1421484c |
0.3 | # pylint: disable=invalid-name,no-self-use
import pytest
import numpy
import torch
import torch.nn.init
from torch.nn.modules.rnn import LSTM
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import sort_batch_by_length
class TestAugmentedLSTM(AllenNlpTestCase):
def setUp(self):
super(TestAugmentedLSTM, self).setUp()
tensor = torch.rand([5, 7, 10])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, 6:, :] = 0
tensor = torch.autograd.Variable(tensor)
sequence_lengths = torch.autograd.Variable(torch.LongTensor([3, 4, 2, 6, 7]))
self.random_tensor = tensor
self.sequence_lengths = sequence_lengths
def test_variable_length_sequences_return_correctly_padded_outputs(self):
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(self.random_tensor, self.sequence_lengths)
tensor = pack_padded_sequence(sorted_tensor, sorted_sequence.data.tolist(), batch_first=True)
lstm = AugmentedLstm(10, 11)
output, _ = lstm(tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 6:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 3:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[4, 2:, :].numpy(), 0.0)
def test_variable_length_sequences_run_backward_return_correctly_padded_outputs(self):
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(self.random_tensor, self.sequence_lengths)
tensor = pack_padded_sequence(sorted_tensor, sorted_sequence.data.tolist(), batch_first=True)
lstm = AugmentedLstm(10, 11, go_forward=False)
output, _ = lstm(tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 6:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 3:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[4, 2:, :].numpy(), 0.0)
def test_augmented_lstm_computes_same_function_as_pytorch_lstm(self):
augmented_lstm = AugmentedLstm(10, 11)
pytorch_lstm = LSTM(10, 11, num_layers=1, batch_first=True)
# Initialize all weights to be == 1.
initializer = InitializerApplicator([(".*", lambda tensor: torch.nn.init.constant(tensor, 1.))])
initializer(augmented_lstm)
initializer(pytorch_lstm)
initial_state = torch.autograd.Variable(torch.zeros([1, 5, 11]))
initial_memory = torch.autograd.Variable(torch.zeros([1, 5, 11]))
# Use bigger numbers to avoid floating point instability.
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(self.random_tensor * 5., self.sequence_lengths)
lstm_input = pack_padded_sequence(sorted_tensor, sorted_sequence.data.tolist(), batch_first=True)
augmented_output, augmented_state = augmented_lstm(lstm_input, (initial_state, initial_memory))
pytorch_output, pytorch_state = pytorch_lstm(lstm_input, (initial_state, initial_memory))
pytorch_output_sequence, _ = pad_packed_sequence(pytorch_output, batch_first=True)
augmented_output_sequence, _ = pad_packed_sequence(augmented_output, batch_first=True)
numpy.testing.assert_array_almost_equal(pytorch_output_sequence.data.numpy(),
augmented_output_sequence.data.numpy(), decimal=4)
numpy.testing.assert_array_almost_equal(pytorch_state[0].data.numpy(),
augmented_state[0].data.numpy(), decimal=4)
numpy.testing.assert_array_almost_equal(pytorch_state[1].data.numpy(),
augmented_state[1].data.numpy(), decimal=4)
def test_augmented_lstm_works_with_highway_connections(self):
augmented_lstm = AugmentedLstm(10, 11, use_highway=True)
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(self.random_tensor, self.sequence_lengths)
lstm_input = pack_padded_sequence(sorted_tensor, sorted_sequence.data.tolist(), batch_first=True)
augmented_lstm(lstm_input)
def test_augmented_lstm_throws_error_on_non_packed_sequence_input(self):
lstm = AugmentedLstm(3, 5)
tensor = torch.rand([5, 7, 9])
with pytest.raises(ConfigurationError):
lstm(tensor)
def test_augmented_lstm_is_initialized_with_correct_biases(self):
lstm = AugmentedLstm(2, 3)
true_state_bias = numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
numpy.testing.assert_array_equal(lstm.state_linearity.bias.data.numpy(), true_state_bias)
# Non-highway case.
lstm = AugmentedLstm(2, 3, use_highway=False)
true_state_bias = numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0])
numpy.testing.assert_array_equal(lstm.state_linearity.bias.data.numpy(), true_state_bias)
| [
"torch.zeros",
"torch.rand",
"torch.nn.init.constant",
"torch.nn.modules.rnn.LSTM",
"torch.autograd.Variable",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.LongTensor"
] | 0.3.1 | unendin/allennlp | 0dcbaea6dbc6cc43e24a3564d6d37f8a1421484c |
1.8 | # Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MdqCriticNetworks"""
import gin
import functools
import math
import numpy as np
import torch
import torch.nn.functional as f
import torch.nn as nn
import alf
import alf.layers as layers
import alf.nest as nest
from alf.networks import Network, EncodingNetwork, ParallelEncodingNetwork
from alf.initializers import variance_scaling_init
from alf.tensor_specs import TensorSpec, BoundedTensorSpec
from alf.utils import common, spec_utils, tensor_utils
import alf.utils.math_ops as math_ops
from alf.utils.action_quantizer import ActionQuantizer
@gin.configurable
class MdqCriticNetwork(Network):
"""Create an instance of MdqCriticNetwork for estimating action-value
of continuous actions and action sampling used in the MDQ algorithm."""
def __init__(self,
input_tensor_spec,
action_qt: ActionQuantizer = None,
num_critic_replicas=2,
obs_encoding_layer_params=None,
pre_encoding_layer_params=None,
mid_encoding_layer_params=None,
post_encoding_layer_params=None,
free_form_fc_layer_params=None,
activation=torch.relu_,
kernel_initializer=None,
debug_summaries=False,
name="MdqCriticNetwork"):
"""Creates an instance of `MdqCriticNetwork` for estimating action-value
of continuous actions and action sampling.
Currently there are two branches of networks:
- free-form branch: a plain MLP for Q-learning
- adv-form branch: an advantage form of network for action
generation. It is trained by a target from the free-form net.
The adv-form branch has the following structures for flexibility:
obs -> [obs_encoding_net] -> encoded_obs
encoded_obs, action ->
[pre_encoding_nets] ->
[mid_shared_encoding_nets] ->
[post_encoding_nets] -> outputs
where the pre_encoding_nets and post_encoding_nets do not share
parameters across action dimensions while mid_shared_encoding_nets
shares parameters across action dimensions.
If the encoding_layer_params for a sub-net is None, that sub-net is
effectively neglected.
Furthermore, to enable parallel computation across action dimension in
the case of value computation, we have both parallel and individual
versions for the nets without parameter sharing. For exmaple, for
post_encoding_nets, we also have post_encoding_parallel_net, which is
essentially the equivalent form of post_encoding_nets but supports
parallel forwarding. The parameters of the two versions are synced.
The partial actions (a[0:i]) are zero-padded for both parallel and
individual networks to enable parallel computation.
For conciseness purpose, the following notations will be used when
convenient:
- B: batch size
- d: dimensionality of feature
- n: number of network replica
- action_dim: the dimensionality of actions
- action_bin: number of discrete bins for each action dim
Args:
input_tensor_spec: A tuple of TensorSpecs (observation_spec, action_spec)
representing the inputs.
action_qt (ActionQuantizer): action quantization module
num_critic_replicas (int): number of critic networks
obs_encoding_layer_params (tuple[int]): a tuple of integers
representing hidden FC layer sizes for encoding observations.
pre_encoding_layer_params (tuple[int]): a tuple of integers
representing hidden FC layer sizes for encoding concatenated
[encoded_observation, actions]. Parameters are not shared across
action dimensions
mid_encoding_layer_params (tuple[int]): a tuple of integers
representing hidden FC layer for further encoding the outputs
from pre_encoding_net. The parameters are shared across action
dimentions.
post_encoding_layer_params (tuple[int]): a tuple of integers
representing hidden FC layer for further encoding the outputs
from mid_encoding_net. The parameters are not shared across
action dimentions.
free_form_fc_layer_params (tuple[int]): a tuple of integers
representing hidden FC layer for Q-learning. We refer it as
the free form to differentiate it from the mdq-form of network
which is structured.
activation (nn.functional): activation used for hidden layers. The
last layer will not be activated.
kernel_initializer (Callable): initializer for all the layers but
the last layer. If none is provided a variance_scaling_initializer
with uniform distribution will be used.
name (str):
"""
super().__init__(input_tensor_spec, name=name)
observation_spec, action_spec = input_tensor_spec
flat_action_spec = nest.flatten(action_spec)
if len(flat_action_spec) > 1:
raise ValueError(
'Only a single action is supported by this network')
self._single_action_spec = flat_action_spec[0]
if action_qt is None:
action_qt = ActionQuantizer(action_spec, "uniform", 15)
self._action_qt = action_qt
self._action_bins = self._action_qt._action_bins
# the logpi of the uniform prior used for KL computation
self._log_pi_uniform_prior = -np.log(self._action_bins)
self._action_dim = action_spec.shape[0] # control vector dim
self._num_critic_replicas = num_critic_replicas
self._obs_encoding_net = ParallelEncodingNetwork(
observation_spec,
self._num_critic_replicas,
fc_layer_params=obs_encoding_layer_params,
activation=activation,
kernel_initializer=kernel_initializer)
last_activation = math_ops.identity
last_kernel_initializer = functools.partial(torch.nn.init.uniform_, \
a=-0.003, b=0.003)
in_size = self._action_dim
self._pre_encoding_nets = []
for i in range(self._action_dim):
# output_spec.shape: [n, d]
self._pre_encoding_nets.append(
ParallelEncodingNetwork(
TensorSpec((self._obs_encoding_net.output_spec.shape[-1] +
in_size, )),
self._num_critic_replicas,
fc_layer_params=pre_encoding_layer_params,
activation=activation,
kernel_initializer=kernel_initializer))
# parallel along both critic and action dims without sharing parameters
# for each action dimension.
# input: [B, action_dim*n, d]: need to stack over dim1
# output: [B, action_dim*n, d']: need to unstack over dim1 for
# splitting over networks
self._pre_encoding_parallel_net = ParallelEncodingNetwork(
TensorSpec(
(self._obs_encoding_net.output_spec.shape[-1] + in_size, )),
self._num_critic_replicas * self._action_dim,
fc_layer_params=pre_encoding_layer_params,
activation=activation,
kernel_initializer=kernel_initializer)
# parallel along both critic and action dims with sharing parameters
# for each action dimension.
# input: [action_dim*B, n, d]: need to stack over dim0
# output: [action_dim*B, n, d']: need to unstack over dim0 for
# splitting over networks
self._mid_shared_encoding_nets = ParallelEncodingNetwork(
TensorSpec(
(self._pre_encoding_parallel_net.output_spec.shape[-1], )),
self._num_critic_replicas,
fc_layer_params=mid_encoding_layer_params,
activation=activation,
kernel_initializer=kernel_initializer)
out_size = self._mid_shared_encoding_nets.output_spec.shape[-1]
post_enc_out_size = self._action_qt.action_bins
self._post_encoding_nets = []
for i in range(self._action_dim):
self._post_encoding_nets.append(
ParallelEncodingNetwork(
TensorSpec((out_size, )),
self._num_critic_replicas,
fc_layer_params=post_encoding_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
last_layer_size=post_enc_out_size,
last_activation=last_activation,
last_kernel_initializer=last_kernel_initializer))
# parallel along both critic and action dims without sharing parameters
# for each action dimension.
# input: [B, action_dim*n, d]: need to stack over dim1
# output: [B, action_dim*n, d']: need to unstack over dim1 for
# splitting over networks
self._post_encoding_parallel_net = ParallelEncodingNetwork(
TensorSpec((out_size, )),
self._num_critic_replicas * self._action_dim,
fc_layer_params=post_encoding_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
last_layer_size=post_enc_out_size,
last_activation=last_activation,
last_kernel_initializer=last_kernel_initializer)
assert free_form_fc_layer_params is not None
self._free_form_q_net = ParallelEncodingNetwork(
TensorSpec((observation_spec.shape[-1] + self._action_dim, )),
self._num_critic_replicas,
fc_layer_params=free_form_fc_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
last_layer_size=1,
last_activation=math_ops.identity,
last_kernel_initializer=last_kernel_initializer)
MdqCriticNetwork._parallel_to_individual_network_sync(
self._pre_encoding_parallel_net,
self._pre_encoding_nets,
step=self._num_critic_replicas)
MdqCriticNetwork._parallel_to_individual_network_sync(
self._post_encoding_parallel_net,
self._post_encoding_nets,
step=self._num_critic_replicas)
self._output_spec = TensorSpec(())
self._debug_summaries = debug_summaries
@torch.no_grad()
def get_action(self, inputs, alpha, greedy):
"""Sample action from the distribution induced by the mdq-net.
Args:
inputs: A tuple of Tensors consistent with `input_tensor_spec`
alpha: the temperature used for the advantage computation
greedy (bool): If True, do greedy sampling by taking the mode of
the distribution. If False, do direct sampling from the
distribution.
Returns:
actions (torch.Tensor): a tensor of the shape [B, n, action_dim]
log_pi_per_dim (torch.Tensor): a tensor of the shape
[B, n, action_dim] representing the log_pi for each dimension
of the sampled multi-dimensional action
"""
observations = inputs
# [B, n, d]
t_shape = (observations.shape[0], self._num_critic_replicas,
self._action_dim)
actions = torch.zeros(t_shape)
log_pi_per_dim = torch.zeros(t_shape)
# [B, n, d]
encoded_obs, _ = self._obs_encoding_net(observations)
if actions.ndim == 2:
actions = tensor_utils.tensor_extend_new_dim(
actions, dim=1, n=self._num_critic_replicas)
action_padded = torch.zeros(t_shape)
for i in range(self._action_dim):
action_padded[..., 0:i] = actions[..., 0:i]
joint = torch.cat((encoded_obs, action_padded.detach()), -1)
action_values_i, _ = self._net_forward_individual(joint, alpha, i)
trans_action_values_i = self._transform_action_value(
action_values_i, alpha)
sampled_indices, sampled_log_pi = self._sample_action_from_value(
trans_action_values_i / alpha, alpha, greedy)
# convert index to action
actions[..., i] = self._action_qt.ind_to_action(sampled_indices)
log_pi_per_dim[..., i] = sampled_log_pi
return actions, log_pi_per_dim
def forward(self, inputs, alpha, state=(), free_form=False):
"""Computes action-value given an observation.
Args:
inputs: A tuple of Tensors consistent with `input_tensor_spec`
alpha: the temperature used for the advantage computation
state: empty for API consistenty
free_form (bool): use the free-form branch for computation if True;
default value is False
Returns:
Q_values (torch.Tensor):
- if free_form is True, its shape is [B, n]
- if free_form is False, its shape is [B, n, action_dim]
state: empty
"""
if free_form:
Q_values, state = self._free_form_q_net(inputs)
Q_values = Q_values.squeeze(2)
return Q_values, state
observations, actions = inputs
# observations: [B, d]
# encoded_obs: [B, n, d']
# Note that when obs_encoding_net is a dummy network
# (i.e., layer_params is None), d' is the same as d.
encoded_obs, _ = self._obs_encoding_net(observations)
if actions.ndim == 2:
# [B, action_dim] -> [B, n, action_dim]
actions = tensor_utils.tensor_extend_new_dim(
actions, dim=1, n=self._num_critic_replicas)
# [B, n, action_dim]
t_shape = (observations.shape[0], self._num_critic_replicas,
self._action_dim)
# [action_dim, B, n, 1]
Q_values = torch.zeros(self._action_dim, observations.shape[0],
self._num_critic_replicas, 1)
joint = torch.empty(0)
action_padded = torch.zeros(t_shape)
# prepare parallel-forwarding inputs
inputs_per_dim = []
for i in range(self._action_dim):
action_padded[..., 0:i] = actions[..., 0:i]
# concat (obs, action) for each action dimension
inputs_per_dim.append(
torch.cat((encoded_obs, action_padded.detach()), dim=-1))
# concat per dim input batch to a joint batch along dim1
# [B, action_dim*n, d]
joint = torch.cat(inputs_per_dim, dim=1)
# forward the joint batch
# action_values_per_dim: [action_dim, B, n, action_bin]
action_values_per_dim, _ = self._net_forward_parallel(
joint, alpha, batch_size=observations.shape[0])
trans_action_values_per_dim = self._transform_action_value(
action_values_per_dim, alpha)
for i in range(self._action_dim):
action_ind = self._action_qt.action_to_ind(actions[..., i])
if i == 0:
action_value_i = self._batched_index_select(
action_values_per_dim[i], -1, action_ind.long())
Q_values[i] = action_value_i
# KL-divergence
Q_values[i] = Q_values[i] - alpha * self._log_pi_uniform_prior
else:
selected_trans_action_value_i = self._batched_index_select(
trans_action_values_per_dim[i], -1, action_ind.long())
Q_values[i] = Q_values[i - 1] + selected_trans_action_value_i
# KL-divergence
Q_values[i] = Q_values[i] - alpha * self._log_pi_uniform_prior
# [action_dim, B, n, 1] -> [B, n, action_dim]
Q_values = Q_values.squeeze(3).permute(1, 2, 0)
return Q_values, state
def _net_forward_individual(self, inputs, alpha, i, state=()):
"""Individiual forwarding for a specified action dims for value computation.
Args:
inputs (torch.Tensor): a tensor of the shape [B, n, d]
alpha: the temperature used for the advantage computation
i (int): the specified action dim to perform forwarding
Returns:
action_values_i (torch.Tensor): a tensor of the shape [B, n, action_bin]
state: empty
"""
inputs, _ = self._pre_encoding_nets[i](inputs)
action_values_i, state = self._mid_shared_encoding_nets(inputs)
action_values_i, state = self._post_encoding_nets[i](action_values_i)
return action_values_i, state
def _net_forward_parallel(self, inputs, alpha, batch_size, state=()):
"""Parallel forwarding across action dims for value computation.
Args:
inputs (torch.Tensor): a tensor of the shape [B, action_dim*n, d]
with the data for each action dimension concanated along the
dim1 for parallel computation
alpha: the temperature used for the advantage computation
batch_size: the size of the original batch without stacking
all action dimensions
Returns:
action_values (torch.Tensor): a tensor of the shape
[action_dim, B, n, action_bin]
state: empty
"""
# [B, action_dim*n, d]
action_values_pre, _ = self._pre_encoding_parallel_net(inputs)
# [B, action_dim*n, d] -> [action_dim*B, n, d]
action_values_pre = self._reshape_from_ensemble_to_batch(
action_values_pre, batch_size)
action_values_mid, state = self._mid_shared_encoding_nets(
action_values_pre)
# [action_dim*B, n, d] -> [B, action_dim*n, d]
action_values_mid = self._reshape_from_batch_to_ensemble(
action_values_mid, batch_size)
action_values_final, _ = self._post_encoding_parallel_net(
action_values_mid)
# [B, action_dim*n, d]-> [B, action_dim, n, d] -> [action_dim, B, n, d]
action_values = action_values_final.view(batch_size, self._action_dim,
self._num_critic_replicas,
-1).transpose(0, 1)
return action_values, state
def _reshape_from_batch_to_ensemble(self, joint_batch, batch_size):
"""Reshape the joint batch of the shape [action_dim*B, n, d] to
[B, action_dim*n, d], i.e., separate and move the action dimension
axis from the batch dimension (dim0) to the ensemble dimension (dim1)
Args:
joint_batch (torch.Tensor): a tensor of the shape [action_dim*B, n, d]
with the data for each action dimension concanated along the
batch dimension (dim0)
batch_size: the size of the original batch without stacking
all action dimensions
Returns:
reshaped_batch (torch.Tensor): a tensor of the shape
[B, action_dim*n, d]
"""
assert len(joint_batch.shape) == 3 and joint_batch.shape[:-1] == \
(self._action_dim * batch_size, self._num_critic_replicas)
d = joint_batch.shape[-1]
# [action_dim*B, n, d] -> [action_dim, B, n, d]
reshaped_batch = joint_batch.view(self._action_dim, batch_size,
self._num_critic_replicas, d)
# [action_dim, B, n, d] -> [B, action_dim, n, d] -> [B, action_dim*n, d]
reshaped_batch = reshaped_batch.transpose(0, 1).reshape(
batch_size, -1, d)
return reshaped_batch
def _reshape_from_ensemble_to_batch(self, joint_batch, batch_size):
"""Reshape the joint batch of the shape [B, action_dim*n, d] to
[action_dim*B, n, d], i.e., separate and move the action dimension
axis from the ensemble dimension (dim1) to the batch dimension (dim0)
Args:
joint_batch (torch.Tensor): a tensor of the shape [B, action_dim*n, d]
with the data for each action dimension concanated along the
ensemble dimension (dim1)
batch_size: the size of the original batch without stacking
all action dimensions
Returns:
reshaped_batch (torch.Tensor): a tensor of the shape
[action_dim*B, n, d]
"""
assert len(joint_batch.shape) == 3 and joint_batch.shape[:-1] == \
(batch_size, self._action_dim * self._num_critic_replicas)
d = joint_batch.shape[-1]
# [B, action_dim*n, d] -> [B, action_dim, n, d]
reshaped_batch = joint_batch.view(batch_size, self._action_dim,
self._num_critic_replicas, d)
# [B, action_dim, n, d] -> [action_dim, B, n, d] -> [action_dim*B, n, d]
reshaped_batch = reshaped_batch.transpose(0, 1).reshape(
-1, self._num_critic_replicas, d)
return reshaped_batch
def _transform_action_value(self, action_values, alpha):
"""Transform raw action values to valid alpha * log_pi
Args:
action_values (torch.Tensor): raw action values computed from a
network, with the last dim as the distribution dimension
alpha: the temperature used for the transformation
Returns:
transformed_value (torch.Tensor): a tensor with value equals
alpha * log_pi computed from input action_values
"""
v_value = alpha * torch.logsumexp(
action_values / alpha, dim=-1, keepdim=True)
transformed_value = action_values - v_value
return transformed_value
def _sample_action_from_value(self, logits, alpha, greedy=False):
"""Sample discrete action from given logits
Args:
logits (torch.Tensor): log pi of the discrete distribution with
the last dim as the distribution dimension
alpha: the temperature used for the transformation
greedy (bool): if True, do greedy sampling by taking the mode
of the distribution; otherwise, sample according
to the probability of the distribution
Returns:
sampled_ind (torch.Tensor): the indices of the sampled action
sampled_log_pi (torch.Tensor): the log prob of the sampled action
"""
if greedy:
sampled_log_pi, sampled_ind = torch.max(logits, dim=-1)
else:
batch_size = logits.shape[0]
# logits [B, n, d] -> [B*n, d]
batched_logits = logits.reshape(-1, self._action_bins)
dist = torch.distributions.categorical.Categorical(
logits=batched_logits)
# [1, B*n] -> [B, n]
sampled_ind = dist.sample((1, ))
sampled_log_pi = dist.log_prob(sampled_ind)
sampled_ind = sampled_ind.view(batch_size, -1)
sampled_log_pi = sampled_log_pi.view(batch_size, -1)
return sampled_ind, sampled_log_pi
def _batched_index_select(self, t, dim, inds):
expanded_ind = inds.unsqueeze(-1)
out = t.gather(dim, expanded_ind)
return out
@staticmethod
def _parallel_to_individual_network_sync(p_net, np_net, step):
"""Sync parameters from parallel version to indivisual version
Args:
p_net (ParallelNetwork): the parallel version of network
np_net (list[Network|ParallelNetwork]): a list of the individual
networks. Note that each individual network can also be an
instance of ParallelNetwork.
step (int): the replica contained in the individual network.
For exmaple:
- if the individual net is a plain network, step=1
- if the individual net is a parallel network, step = replica
of the individual net
"""
split_num = len(np_net)
for i in range(split_num):
for ws, wt in zip(p_net.parameters(), np_net[i].parameters()):
wt.data.copy_(ws[i * step:(i + 1) * step])
def get_uniform_prior_logpi(self):
return self._log_pi_uniform_prior
def sync_net(self):
MdqCriticNetwork._parallel_to_individual_network_sync(
self._pre_encoding_parallel_net, self._pre_encoding_nets,
self._num_critic_replicas)
MdqCriticNetwork._parallel_to_individual_network_sync(
self._post_encoding_parallel_net, self._post_encoding_nets,
self._num_critic_replicas)
| [
"torch.zeros",
"torch.cat",
"torch.max",
"torch.no_grad",
"torch.logsumexp",
"torch.distributions.categorical.Categorical",
"torch.empty"
] | 1.8.1 | Haichao-Zhang/alf_randperm_reproduce | d5223b7534ab20ca725aac940ad274ef806d1d3e |
1.2 | # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import horovod.spark.common._namedtuple_fix
import copy
import io
import numbers
import time
from pyspark import keyword_only
from pyspark.ml.param.shared import Param, Params
from pyspark.ml.util import MLWritable, MLReadable
from pyspark.sql import SparkSession
from horovod.runner.common.util import codec
from horovod.spark.common import util
from horovod.spark.common.estimator import HorovodEstimator, HorovodModel
from horovod.spark.common.params import EstimatorParams
from horovod.spark.common.serialization import \
HorovodParamsWriter, HorovodParamsReader
from horovod.spark.torch import remote
from horovod.spark.torch.util import deserialize_fn, serialize_fn, \
save_into_bio
import numpy as np
import torch
import torch.utils.data
def _torch_param_serialize(param_name, param_val):
if param_val is None:
return None
if param_name in [EstimatorParams.backend.name, EstimatorParams.store.name]:
# We do not serialize backend and store. These params have to be regenerated for each
# run of the pipeline
return None
elif param_name == EstimatorParams.model.name:
serialize = serialize_fn()
return serialize(param_val)
return codec.dumps_base64(param_val)
class TorchEstimatorParamsWriter(HorovodParamsWriter):
def saveImpl(self, path):
# Write the parameters
HorovodParamsWriter.saveMetadata(self.instance, path, self.sc,
param_serializer_fn=_torch_param_serialize)
class TorchEstimatorParamsWritable(MLWritable):
def write(self):
return TorchEstimatorParamsWriter(self)
class TorchEstimatorParamsReader(HorovodParamsReader):
def _deserialize_dict(self, dict_values):
deserialized_dict = dict()
for key, val in dict_values.items():
if val is None:
deserialized_dict[key] = None
elif key == EstimatorParams.model.name:
deserialize = deserialize_fn()
deserialized_dict[key] = deserialize(val)
else:
deserialized_dict[key] = codec.loads_base64(val)
return deserialized_dict
class TorchEstimatorParamsReadable(MLReadable):
@classmethod
def read(cls):
"""Returns a DefaultParamsReader instance for this class."""
return TorchEstimatorParamsReader(cls)
class TorchEstimator(HorovodEstimator, TorchEstimatorParamsWritable,
TorchEstimatorParamsReadable):
"""Spark Estimator for fitting PyTorch models to a DataFrame.
Args:
num_proc: Number of Horovod processes. Defaults to `spark.default.parallelism`.
model: PyTorch model to train.
backend: Optional Backend object for running distributed training function. Defaults to SparkBackend with
`num_proc` worker processes. Cannot be specified if `num_proc` is also provided.
store: Store object that abstracts reading and writing of intermediate data and run results.
optimizer: PyTorch optimizer to be converted into a `hvd.DistributedOptimizer` for training.
loss: PyTorch loss or list of losses.
loss_constructors: Optional functions that generate losses.
metrics: Optional metrics to record.
loss_weights: Optional list of float weight values to assign each loss.
sample_weight_col: Optional column indicating the weight of each sample.
gradient_compression: Gradient compression used by `hvd.DistributedOptimizer`.
feature_cols: Column names used as feature inputs to the model. Must be a list with each feature
mapping to a sequential argument in the model's forward() function.
input_shapes: List of shapes for each input tensor to the model.
validation: Optional validation column name (string) where every row in the column is either 1/True or 0/False,
or validation split (float) giving percent of data to be randomly selected for validation.
label_cols: Column names used as labels. Must be a list with one label for each output of the model.
batch_size: Number of rows from the DataFrame per batch.
val_batch_size: Number of rows from the DataFrame per batch for validation, if not set, will use batch_size.
epochs: Number of epochs to train.
verbose: Verbosity level [0, 2] (default: 1).
shuffle_buffer_size: Optional size of in-memory shuffle buffer in rows. Allocating a larger buffer size
increases randomness of shuffling at the cost of more host memory. Defaults to estimating
with an assumption of 4GB of memory per host.
partitions_per_process: Number of Parquet partitions to assign per worker process from `num_proc` (default: 10).
run_id: Optional unique ID for this run for organization in the Store. Will be automatically assigned if not
provided.
train_minibatch_fn: Optional custom function to execute within the training loop. Defaults to standard
gradient descent process.
train_steps_per_epoch: Number of steps to train each epoch. Useful for testing that model trains successfully.
Defaults to training the entire dataset each epoch.
validation_steps_per_epoch: Number of validation steps to perform each epoch.
transformation_fn: Optional function that takes a row as its parameter
and returns a modified row that is then fed into the
train or validation step. This transformation is
applied after batching. See Petastorm [TransformSpec](https://github.com/uber/petastorm/blob/master/petastorm/transform.py)
for more details. Note that this fucntion constructs
another function which should perform the
transformation.
train_reader_num_workers: This parameter specifies the number of parallel processes that
read the training data from data store and apply data
transformations to it. Increasing this number
will generally increase the reading rate but will also
increase the memory footprint. More processes are
particularly useful if the bandwidth to the data store is not
high enough, or users need to apply transformation such as
decompression or data augmentation on raw data.
val_reader_num_workers: Similar to the train_reader_num_workers.
"""
input_shapes = Param(Params._dummy(), 'input_shapes', 'input layer shapes')
loss_constructors = Param(Params._dummy(), 'loss_constructors',
'functions that construct the loss')
train_minibatch_fn = Param(Params._dummy(), 'train_minibatch_fn',
'functions that construct the minibatch train function for torch')
@keyword_only
def __init__(self,
num_proc=None,
model=None,
backend=None,
store=None,
optimizer=None,
loss=None,
loss_constructors=None,
metrics=None,
loss_weights=None,
sample_weight_col=None,
gradient_compression=None,
feature_cols=None,
input_shapes=None,
validation=None,
label_cols=None,
callbacks=None,
batch_size=None,
val_batch_size=None,
epochs=None,
verbose=1,
shuffle_buffer_size=None,
partitions_per_process=None,
run_id=None,
train_minibatch_fn=None,
train_steps_per_epoch=None,
validation_steps_per_epoch=None,
transformation_fn=None,
train_reader_num_workers=None,
val_reader_num_workers=None,
label_shapes=None):
super(TorchEstimator, self).__init__()
self._setDefault(loss_constructors=None,
input_shapes=None,
train_minibatch_fn=None,
transformation_fn=None)
kwargs = self._input_kwargs
if EstimatorParams.loss.name in kwargs and TorchEstimator.loss_constructors.name in kwargs:
raise ValueError("only one of loss_constructors and loss parameters can be specified.")
self.setParams(**kwargs)
def setTrainMinibatchFn(self, value):
return self._set(train_minibatch_fn=value)
def getTrainMinibatchFn(self):
return self.getOrDefault(self.train_minibatch_fn)
def setInputShapes(self, value):
return self._set(input_shapes=value)
def getInputShapes(self):
return self.getOrDefault(self.input_shapes)
def setLossConstructors(self, value):
return self._set(loss_constructors=value)
def getLossConstructors(self):
return self.getOrDefault(self.loss_constructors)
def _get_optimizer(self):
return self.getOrDefault(self.optimizer)
# Overwrites Model's getOptimizer method
def getOptimizer(self):
model = self.getModel()
if model:
optimizer = self._get_optimizer()
optimizer_cls = optimizer.__class__
optimizer_state = optimizer.state_dict()
optimzer = optimizer_cls(model.parameters(), lr=1)
optimzer.load_state_dict(optimizer_state)
return optimzer
else:
return self._get_optimizer()
def _check_metadata_compatibility(self, metadata):
util.check_shape_compatibility(metadata,
self.getFeatureCols(),
self.getLabelCols(),
input_shapes=self.getInputShapes(),
label_shapes=self.getLabelShapes())
def _fit_on_prepared_data(self, backend, train_rows, val_rows, metadata, avg_row_size, dataset_idx=None):
self._check_params(metadata)
run_id = self.getRunId()
if run_id is None:
run_id = 'pytorch_' + str(int(time.time()))
last_checkpoint_state = None
if self._has_checkpoint(run_id):
last_checkpoint_state = self._load_checkpoint(run_id)
# Model parameters
model_pre_train = self.getModel()
model_state = model_pre_train.state_dict()
serialized_model = serialize_fn()(model_pre_train)
# Optimizer parameters
optimizer = self._get_optimizer()
optimizer_cls = optimizer.__class__
optimizer_state = optimizer.state_dict()
# Combine model and optimizer state
model_opt_state = {'model': model_state, 'optimizer': optimizer_state} \
if last_checkpoint_state is None else last_checkpoint_state
model_opt_state_serialized = save_into_bio(model_opt_state, torch.save)
trainer = remote.RemoteTrainer(self, metadata, last_checkpoint_state, run_id, dataset_idx)
handle = backend.run(trainer,
args=(serialized_model, optimizer_cls, model_opt_state_serialized,
train_rows, val_rows, avg_row_size),
env={})
return self._create_model(handle, run_id, metadata)
def _load_checkpoint(self, run_id):
store = self.getStore()
last_ckpt_path = store.get_checkpoint_path(run_id)
if self.getVerbose():
print('Resuming training from last checkpoint: {}'.format(last_ckpt_path))
ckpt_file = io.BytesIO(store.read(last_ckpt_path))
return torch.load(ckpt_file)
def _create_model(self, run_results, run_id, metadata):
history, serialized_checkpoint = run_results[0]
serialized_checkpoint.seek(0)
best_checkpoint = torch.load(serialized_checkpoint, map_location=torch.device('cpu'))
model = copy.deepcopy(self.getModel())
optimizer = copy.deepcopy(self.getOptimizer())
model.load_state_dict(best_checkpoint['model'])
optimizer.load_state_dict(best_checkpoint['optimizer'])
return self.get_model_class()(**self._get_model_kwargs(
model, history, optimizer, run_id, metadata))
def get_model_class(self):
return TorchModel
def _get_model_kwargs(self, model, history, optimizer, run_id, metadata):
return dict(history=history,
model=model,
optimizer=optimizer,
feature_columns=self.getFeatureCols(),
input_shapes=self.getInputShapes(),
label_columns=self.getLabelCols(),
run_id=run_id,
_metadata=metadata,
loss=self.getLoss(),
loss_constructors=self.getLossConstructors())
class TorchModel(HorovodModel, TorchEstimatorParamsWritable, TorchEstimatorParamsReadable):
"""Spark Transformer wrapping a PyTorch model, used for making predictions on a DataFrame.
Retrieve the underlying PyTorch model by calling `torch_model.getModel()`.
Args:
history: List of metrics, one entry per epoch during training.
model: Trained PyTorch model.
feature_columns: List of feature column names.
label_columns: List of label column names.
optimizer: PyTorch optimizer used during training, containing updated state.
run_id: ID of the run used to train the model.
loss: PyTorch loss(es).
loss_constructors: PyTorch loss constructors.
"""
optimizer = Param(Params._dummy(), 'optimizer', 'optimizer')
input_shapes = Param(Params._dummy(), 'input_shapes', 'input layer shapes')
loss = Param(Params._dummy(), 'loss', 'loss')
loss_constructors = Param(Params._dummy(), 'loss_constructors',
'functions that construct the loss')
@keyword_only
def __init__(self,
history=None,
model=None,
feature_columns=None,
input_shapes=None,
label_columns=None,
optimizer=None,
run_id=None,
_metadata=None,
loss=None,
loss_constructors=None):
super(TorchModel, self).__init__()
if label_columns:
self.setOutputCols([col + '__output' for col in label_columns])
self._setDefault(optimizer=None,
loss=None,
loss_constructors=None,
input_shapes=None)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def setLoss(self, value):
return self._set(loss=value)
def getLoss(self):
return self.getOrDefault(self.loss)
def setLossConstructors(self, value):
return self._set(loss_constructors=value)
def getLossConstructors(self):
return self.getOrDefault(self.loss_constructors)
def setInputShapes(self, value):
return self._set(input_shapes=value)
def getInputShapes(self):
return self.getOrDefault(self.input_shapes)
def setOptimizer(self, value):
return self._set(optimizer=value)
def _get_optimizer(self):
return self.getOrDefault(self.optimizer)
def getOptimizer(self):
model = self.getModel()
if model:
_optimizer = self._get_optimizer()
optimizer_cls = _optimizer.__class__
optimizer_state = _optimizer.state_dict()
optimzer = optimizer_cls(model.parameters(), lr=1)
optimzer.load_state_dict(optimizer_state)
return optimzer
else:
return self._get_optimizer()
# To run locally on OS X, need export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
def _transform(self, df):
model_pre_predict = self.getModel()
model_pre_predict.eval()
deserialize = deserialize_fn()
serialize = serialize_fn()
serialized_model = serialize(model_pre_predict)
input_shapes = self.getInputShapes()
label_cols = self.getLabelColumns()
output_cols = self.getOutputCols()
feature_cols = self.getFeatureColumns()
metadata = self._get_metadata()
def predict(rows):
from pyspark import Row
from pyspark.ml.linalg import DenseVector, SparseVector
model = deserialize(serialized_model)
# Perform predictions.
for row in rows:
fields = row.asDict().copy()
# Note: if the col is SparseVector, torch.tensor(col) correctly converts it to a
# dense torch tensor.
data = [torch.tensor([row[col]]).reshape(shape) for
col, shape in zip(feature_cols, input_shapes)]
with torch.no_grad():
preds = model(*data)
if not isinstance(preds, list) and not isinstance(preds, tuple):
preds = [preds]
for label_col, output_col, pred in zip(label_cols, output_cols, preds):
meta = metadata[label_col]
col_type = meta['spark_data_type']
# dtype for dense and spark tensor is always np.float64
if col_type == DenseVector:
shape = np.prod(pred.shape)
flattened_pred = pred.reshape(shape, )
field = DenseVector(flattened_pred)
elif col_type == SparseVector:
shape = meta['shape']
flattened_pred = pred.reshape(shape, )
nonzero_indices = flattened_pred.nonzero()[0]
field = SparseVector(shape, nonzero_indices,
flattened_pred[nonzero_indices])
elif pred.shape.numel() == 1:
# If the column is scalar type, int, float, etc.
value = pred.item()
python_type = util.spark_scalar_to_python_type(col_type)
if issubclass(python_type, numbers.Integral):
value = round(value)
field = python_type(value)
else:
field = DenseVector(pred.reshape(-1))
fields[output_col] = field
yield Row(**fields)
spark0 = SparkSession._instantiatedSession
# Get a limited DF and make predictions and get the schema of the final DF
limited_pred_rdd = df.limit(100000).rdd.mapPartitions(predict)
limited_pred_df = spark0.createDataFrame(limited_pred_rdd, samplingRatio=1)
final_output_schema = limited_pred_df.schema
# Spark has to infer whether a filed is nullable or not from a limited number of samples.
# It does not always get it right. We copy the nullable boolean variable for the fields
# from the original dataframe to the final DF schema.
nullables = {field.name: field.nullable for field in df.schema.fields}
for field in final_output_schema.fields:
if field.name in nullables:
field.nullable = nullables[field.name]
pred_rdd = df.rdd.mapPartitions(predict)
# Use the schema from previous section to construct the final DF with prediction
return spark0.createDataFrame(pred_rdd, schema=final_output_schema)
| [
"torch.device",
"torch.no_grad",
"torch.tensor",
"torch.load"
] | 1.2.0 | aoyandong/horovod | e94d8ea0dff8c2b45698cfe4fabb2e6553d0b9a8 |
1.3 | import torch
from torch.distributions import constraints
from torch.distributions.utils import lazy_property
from pyro.distributions.torch_distribution import TorchDistribution
from pyro.distributions.util import broadcast_shape
class MaskedConstraint(constraints.Constraint):
"""
Combines two constraints interleaved elementwise by a mask.
:param torch.Tensor mask: boolean mask tensor (of dtype ``torch.bool``)
:param torch.constraints.Constraint constraint0: constraint that holds
wherever ``mask == 0``
:param torch.constraints.Constraint constraint1: constraint that holds
wherever ``mask == 1``
"""
def __init__(self, mask, constraint0, constraint1):
self.mask = mask
self.constraint0 = constraint0
self.constraint1 = constraint1
def check(self, value):
result = self.constraint0.check(value)
mask = self.mask.expand(result.shape) if result.shape != self.mask.shape else self.mask
result[mask] = self.constraint1.check(value)[mask]
return result
class MaskedMixture(TorchDistribution):
"""
A masked deterministic mixture of two distributions.
This is useful when the mask is sampled from another distribution,
possibly correlated across the batch. Often the mask can be
marginalized out via enumeration.
Example::
change_point = pyro.sample("change_point",
dist.Categorical(torch.ones(len(data) + 1)),
infer={'enumerate': 'parallel'})
mask = torch.arange(len(data), dtype=torch.long) >= changepoint
with pyro.plate("data", len(data)):
pyro.sample("obs", MaskedMixture(mask, dist1, dist2), obs=data)
:param torch.Tensor mask: A byte tensor toggling between ``component0``
and ``component1``.
:param pyro.distributions.TorchDistribution component0: a distribution
for batch elements ``mask == 0``.
:param pyro.distributions.TorchDistribution component1: a distribution
for batch elements ``mask == 1``.
"""
arg_constraints = {} # nothing can be constrained
def __init__(self, mask, component0, component1, validate_args=None):
if not torch.is_tensor(mask) or mask.dtype != torch.bool:
raise ValueError('Expected mask to be a BoolTensor but got {}'.format(type(mask)))
if component0.event_shape != component1.event_shape:
raise ValueError('components event_shape disagree: {} vs {}'
.format(component0.event_shape, component1.event_shape))
batch_shape = broadcast_shape(mask.shape, component0.batch_shape, component1.batch_shape)
if mask.shape != batch_shape:
mask = mask.expand(batch_shape)
if component0.batch_shape != batch_shape:
component0 = component0.expand(batch_shape)
if component1.batch_shape != batch_shape:
component1 = component1.expand(batch_shape)
self.mask = mask
self.component0 = component0
self.component1 = component1
super(MaskedMixture, self).__init__(batch_shape, component0.event_shape, validate_args)
# We need to disable _validate_sample on each component since samples are only valid on the
# component from which they are drawn. Instead we perform validation using a MaskedConstraint.
self.component0._validate_args = False
self.component1._validate_args = False
@property
def has_rsample(self):
return self.component0.has_rsample and self.component1.has_rsample
@constraints.dependent_property
def support(self):
if self.component0.support is self.component1.support:
return self.component0.support
return MaskedConstraint(self.mask, self.component0.support, self.component1.support)
def expand(self, batch_shape):
try:
return super(MaskedMixture, self).expand(batch_shape)
except NotImplementedError:
mask = self.mask.expand(batch_shape)
component0 = self.component0.expand(batch_shape)
component1 = self.component1.expand(batch_shape)
return type(self)(mask, component0, component1)
def sample(self, sample_shape=torch.Size()):
mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask
result = self.component0.sample(sample_shape)
result[mask] = self.component1.sample(sample_shape)[mask]
return result
def rsample(self, sample_shape=torch.Size()):
mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask
result = self.component0.rsample(sample_shape)
result[mask] = self.component1.rsample(sample_shape)[mask]
return result
def log_prob(self, value):
value_shape = broadcast_shape(value.shape, self.batch_shape + self.event_shape)
if value.shape != value_shape:
value = value.expand(value_shape)
if self._validate_args:
self._validate_sample(value)
mask_shape = value_shape[:len(value_shape) - len(self.event_shape)]
mask = self.mask
if mask.shape != mask_shape:
mask = mask.expand(mask_shape)
result = self.component0.log_prob(value)
result[mask] = self.component1.log_prob(value)[mask]
return result
@lazy_property
def mean(self):
result = self.component0.mean.clone()
result[self.mask] = self.component1.mean[self.mask]
return result
@lazy_property
def variance(self):
result = self.component0.variance.clone()
result[self.mask] = self.component1.variance[self.mask]
return result
| [
"torch.Size",
"torch.is_tensor"
] | 1.3.0 | Capri2014/pyro | 546f9010aeb2308ae566726b1cec67a7b4fda9c2 |
1.3 | import math
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal, constraints
import pyro.distributions as dist
from pyro.contrib.timeseries.base import TimeSeriesModel
from pyro.nn import PyroParam, pyro_method
from pyro.ops.ssm_gp import MaternKernel
from pyro.ops.tensor_utils import block_diag_embed
class IndependentMaternGP(TimeSeriesModel):
"""
A time series model in which each output dimension is modeled independently
with a univariate Gaussian Process with a Matern kernel. The targets are assumed
to be evenly spaced in time. Training and inference are logarithmic in the length
of the time series T.
:param float nu: The order of the Matern kernel; one of 0.5, 1.5 or 2.5.
:param float dt: The time spacing between neighboring observations of the time series.
:param int obs_dim: The dimension of the targets at each time step.
:param torch.Tensor length_scale_init: optional initial values for the kernel length scale
given as a ``obs_dim``-dimensional tensor
:param torch.Tensor kernel_scale_init: optional initial values for the kernel scale
given as a ``obs_dim``-dimensional tensor
:param torch.Tensor obs_noise_scale_init: optional initial values for the observation noise scale
given as a ``obs_dim``-dimensional tensor
"""
def __init__(self, nu=1.5, dt=1.0, obs_dim=1,
length_scale_init=None, kernel_scale_init=None,
obs_noise_scale_init=None):
self.nu = nu
self.dt = dt
self.obs_dim = obs_dim
if obs_noise_scale_init is None:
obs_noise_scale_init = 0.2 * torch.ones(obs_dim)
assert obs_noise_scale_init.shape == (obs_dim,)
super().__init__()
self.kernel = MaternKernel(nu=nu, num_gps=obs_dim,
length_scale_init=length_scale_init,
kernel_scale_init=kernel_scale_init)
self.obs_noise_scale = PyroParam(obs_noise_scale_init,
constraint=constraints.positive)
obs_matrix = [1.0] + [0.0] * (self.kernel.state_dim - 1)
self.register_buffer("obs_matrix", torch.tensor(obs_matrix).unsqueeze(-1))
def _get_init_dist(self):
return torch.distributions.MultivariateNormal(self.obs_matrix.new_zeros(self.obs_dim, self.kernel.state_dim),
self.kernel.stationary_covariance().squeeze(-3))
def _get_obs_dist(self):
return dist.Normal(self.obs_matrix.new_zeros(self.obs_dim, 1, 1),
self.obs_noise_scale.unsqueeze(-1).unsqueeze(-1)).to_event(1)
def _get_dist(self):
"""
Get the :class:`~pyro.distributions.GaussianHMM` distribution that corresponds
to ``obs_dim``-many independent Matern GPs.
"""
trans_matrix, process_covar = self.kernel.transition_matrix_and_covariance(dt=self.dt)
trans_dist = MultivariateNormal(self.obs_matrix.new_zeros(self.obs_dim, 1, self.kernel.state_dim),
process_covar.unsqueeze(-3))
trans_matrix = trans_matrix.unsqueeze(-3)
return dist.GaussianHMM(self._get_init_dist(), trans_matrix, trans_dist,
self.obs_matrix, self._get_obs_dist())
@pyro_method
def log_prob(self, targets):
"""
:param torch.Tensor targets: A 2-dimensional tensor of real-valued targets
of shape ``(T, obs_dim)``, where ``T`` is the length of the time series and ``obs_dim``
is the dimension of the real-valued ``targets`` at each time step
:returns torch.Tensor: A 1-dimensional tensor of log probabilities of shape ``(obs_dim,)``
"""
assert targets.dim() == 2 and targets.size(-1) == self.obs_dim
return self._get_dist().log_prob(targets.t().unsqueeze(-1))
@torch.no_grad()
def _filter(self, targets):
"""
Return the filtering state for the associated state space model.
"""
assert targets.dim() == 2 and targets.size(-1) == self.obs_dim
return self._get_dist().filter(targets.t().unsqueeze(-1))
@torch.no_grad()
def _forecast(self, dts, filtering_state, include_observation_noise=True):
"""
Internal helper for forecasting.
"""
assert dts.dim() == 1
dts = dts.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
trans_matrix, process_covar = self.kernel.transition_matrix_and_covariance(dt=dts)
trans_matrix = trans_matrix[..., 0:1]
predicted_mean = torch.matmul(filtering_state.loc.unsqueeze(-2), trans_matrix).squeeze(-2)[..., 0]
predicted_function_covar = torch.matmul(trans_matrix.transpose(-1, -2), torch.matmul(
filtering_state.covariance_matrix, trans_matrix))[..., 0, 0] + \
process_covar[..., 0, 0]
if include_observation_noise:
predicted_function_covar = predicted_function_covar + self.obs_noise_scale.pow(2.0)
return predicted_mean, predicted_function_covar
@pyro_method
def forecast(self, targets, dts):
"""
:param torch.Tensor targets: A 2-dimensional tensor of real-valued targets
of shape ``(T, obs_dim)``, where ``T`` is the length of the time series and ``obs_dim``
is the dimension of the real-valued targets at each time step. These
represent the training data that are conditioned on for the purpose of making
forecasts.
:param torch.Tensor dts: A 1-dimensional tensor of times to forecast into the future,
with zero corresponding to the time of the final target ``targets[-1]``.
:returns torch.distributions.Normal: Returns a predictive Normal distribution with batch shape ``(S,)`` and
event shape ``(obs_dim,)``, where ``S`` is the size of ``dts``.
"""
filtering_state = self._filter(targets)
predicted_mean, predicted_covar = self._forecast(dts, filtering_state)
return torch.distributions.Normal(predicted_mean, predicted_covar.sqrt())
class LinearlyCoupledMaternGP(TimeSeriesModel):
"""
A time series model in which each output dimension is modeled as a linear combination
of shared univariate Gaussian Processes with Matern kernels.
In more detail, the generative process is:
:math:`y_i(t) = \\sum_j A_{ij} f_j(t) + \\epsilon_i(t)`
The targets :math:`y_i` are assumed to be evenly spaced in time. Training and inference
are logarithmic in the length of the time series T.
:param float nu: The order of the Matern kernel; one of 0.5, 1.5 or 2.5.
:param float dt: The time spacing between neighboring observations of the time series.
:param int obs_dim: The dimension of the targets at each time step.
:param int num_gps: The number of independent GPs that are mixed to model the time series.
Typical values might be :math:`\\N_{\\rm gp} \\in [\\D_{\\rm obs} / 2, \\D_{\\rm obs}]`
:param torch.Tensor length_scale_init: optional initial values for the kernel length scale
given as a ``num_gps``-dimensional tensor
:param torch.Tensor kernel_scale_init: optional initial values for the kernel scale
given as a ``num_gps``-dimensional tensor
:param torch.Tensor obs_noise_scale_init: optional initial values for the observation noise scale
given as a ``obs_dim``-dimensional tensor
"""
def __init__(self, nu=1.5, dt=1.0, obs_dim=2, num_gps=1,
length_scale_init=None, kernel_scale_init=None,
obs_noise_scale_init=None):
self.nu = nu
self.dt = dt
assert obs_dim > 1, "If obs_dim==1 you should use IndependentMaternGP"
self.obs_dim = obs_dim
self.num_gps = num_gps
if obs_noise_scale_init is None:
obs_noise_scale_init = 0.2 * torch.ones(obs_dim)
assert obs_noise_scale_init.shape == (obs_dim,)
self.dt = dt
self.obs_dim = obs_dim
self.num_gps = num_gps
super().__init__()
self.kernel = MaternKernel(nu=nu, num_gps=num_gps,
length_scale_init=length_scale_init,
kernel_scale_init=kernel_scale_init)
self.full_state_dim = num_gps * self.kernel.state_dim
self.obs_noise_scale = PyroParam(obs_noise_scale_init,
constraint=constraints.positive)
self.A = nn.Parameter(0.3 * torch.randn(self.num_gps, self.obs_dim))
def _get_obs_matrix(self):
# (num_gps, obs_dim) => (state_dim * num_gps, obs_dim)
return self.A.repeat_interleave(self.kernel.state_dim, dim=0) * \
self.A.new_tensor([1.0] + [0.0] * (self.kernel.state_dim - 1)).repeat(self.num_gps).unsqueeze(-1)
def _stationary_covariance(self):
return block_diag_embed(self.kernel.stationary_covariance())
def _get_init_dist(self):
loc = self.A.new_zeros(self.full_state_dim)
return MultivariateNormal(loc, self._stationary_covariance())
def _get_obs_dist(self):
loc = self.A.new_zeros(self.obs_dim)
return dist.Normal(loc, self.obs_noise_scale).to_event(1)
def _get_dist(self):
"""
Get the :class:`~pyro.distributions.GaussianHMM` distribution that corresponds
to a :class:`LinearlyCoupledMaternGP`.
"""
trans_matrix, process_covar = self.kernel.transition_matrix_and_covariance(dt=self.dt)
trans_matrix = block_diag_embed(trans_matrix)
process_covar = block_diag_embed(process_covar)
loc = self.A.new_zeros(self.full_state_dim)
trans_dist = MultivariateNormal(loc, process_covar)
return dist.GaussianHMM(self._get_init_dist(), trans_matrix, trans_dist,
self._get_obs_matrix(), self._get_obs_dist())
@pyro_method
def log_prob(self, targets):
"""
:param torch.Tensor targets: A 2-dimensional tensor of real-valued targets
of shape ``(T, obs_dim)``, where ``T`` is the length of the time series and ``obs_dim``
is the dimension of the real-valued ``targets`` at each time step
:returns torch.Tensor: a (scalar) log probability
"""
assert targets.dim() == 2 and targets.size(-1) == self.obs_dim
return self._get_dist().log_prob(targets)
@torch.no_grad()
def _filter(self, targets):
"""
Return the filtering state for the associated state space model.
"""
assert targets.dim() == 2 and targets.size(-1) == self.obs_dim
return self._get_dist().filter(targets)
@torch.no_grad()
def _forecast(self, dts, filtering_state, include_observation_noise=True, full_covar=True):
"""
Internal helper for forecasting.
"""
assert dts.dim() == 1
dts = dts.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
trans_mat, process_covar = self.kernel.transition_matrix_and_covariance(dt=dts)
trans_mat = block_diag_embed(trans_mat) # S x full_state_dim x full_state_dim
process_covar = block_diag_embed(process_covar) # S x full_state_dim x full_state_dim
obs_matrix = self._get_obs_matrix() # full_state_dim x obs_dim
trans_obs = torch.matmul(trans_mat, obs_matrix) # S x full_state_dim x obs_dim
predicted_mean = torch.matmul(filtering_state.loc.unsqueeze(-2), trans_obs).squeeze(-2)
predicted_function_covar = torch.matmul(trans_obs.transpose(-1, -2),
torch.matmul(filtering_state.covariance_matrix,
trans_obs))
predicted_function_covar = predicted_function_covar + \
torch.matmul(obs_matrix.transpose(-1, -2), torch.matmul(process_covar, obs_matrix))
if include_observation_noise:
obs_noise = self.obs_noise_scale.pow(2.0).diag_embed()
predicted_function_covar = predicted_function_covar + obs_noise
if not full_covar:
predicted_function_covar = predicted_function_covar.diagonal(dim1=-1, dim2=-2)
return predicted_mean, predicted_function_covar
@pyro_method
def forecast(self, targets, dts):
"""
:param torch.Tensor targets: A 2-dimensional tensor of real-valued targets
of shape ``(T, obs_dim)``, where ``T`` is the length of the time series and ``obs_dim``
is the dimension of the real-valued targets at each time step. These
represent the training data that are conditioned on for the purpose of making
forecasts.
:param torch.Tensor dts: A 1-dimensional tensor of times to forecast into the future,
with zero corresponding to the time of the final target ``targets[-1]``.
:returns torch.distributions.MultivariateNormal: Returns a predictive MultivariateNormal
distribution with batch shape ``(S,)`` and event shape ``(obs_dim,)``,
where ``S`` is the size of ``dts``.
"""
filtering_state = self._filter(targets)
predicted_mean, predicted_covar = self._forecast(dts, filtering_state)
return MultivariateNormal(predicted_mean, predicted_covar)
class DependentMaternGP(TimeSeriesModel):
"""
A time series model in which each output dimension is modeled as a univariate Gaussian Process
with a Matern kernel. The different output dimensions become correlated because the Gaussian
Processes are driven by a correlated Wiener process; see reference [1] for details.
If, in addition, `linearly_coupled` is True, additional correlation is achieved through
linear mixing as in :class:`LinearlyCoupledMaternGP`. The targets are assumed to be evenly
spaced in time. Training and inference are logarithmic in the length of the time series T.
:param float nu: The order of the Matern kernel; must be 1.5.
:param float dt: The time spacing between neighboring observations of the time series.
:param int obs_dim: The dimension of the targets at each time step.
:param bool linearly_coupled: Whether to linearly mix the various gaussian processes in the likelihood.
Defaults to False.
:param torch.Tensor length_scale_init: optional initial values for the kernel length scale
given as a ``obs_dim``-dimensional tensor
:param torch.Tensor obs_noise_scale_init: optional initial values for the observation noise scale
given as a ``obs_dim``-dimensional tensor
References
[1] "Dependent Matern Processes for Multivariate Time Series," Alexander Vandenberg-Rodes, Babak Shahbaba.
"""
def __init__(self, nu=1.5, dt=1.0, obs_dim=1, linearly_coupled=False,
length_scale_init=None, obs_noise_scale_init=None):
if nu != 1.5:
raise NotImplementedError("The only supported value of nu is 1.5")
self.dt = dt
self.obs_dim = obs_dim
if obs_noise_scale_init is None:
obs_noise_scale_init = 0.2 * torch.ones(obs_dim)
assert obs_noise_scale_init.shape == (obs_dim,)
super().__init__()
self.kernel = MaternKernel(nu=nu, num_gps=obs_dim,
length_scale_init=length_scale_init)
self.full_state_dim = self.kernel.state_dim * obs_dim
# we demote self.kernel.kernel_scale from being a nn.Parameter
# since the relevant scales are now encoded in the wiener noise matrix
del self.kernel.kernel_scale
self.kernel.register_buffer("kernel_scale", torch.ones(obs_dim))
self.obs_noise_scale = PyroParam(obs_noise_scale_init,
constraint=constraints.positive)
self.wiener_noise_tril = PyroParam(torch.eye(obs_dim) +
0.03 * torch.randn(obs_dim, obs_dim).tril(-1),
constraint=constraints.lower_cholesky)
if linearly_coupled:
self.obs_matrix = nn.Parameter(0.3 * torch.randn(self.obs_dim, self.obs_dim))
else:
obs_matrix = torch.zeros(self.full_state_dim, obs_dim)
for i in range(obs_dim):
obs_matrix[self.kernel.state_dim * i, i] = 1.0
self.register_buffer("obs_matrix", obs_matrix)
def _get_obs_matrix(self):
if self.obs_matrix.size(0) == self.obs_dim:
# (num_gps, obs_dim) => (state_dim * num_gps, obs_dim)
selector = [1.0] + [0.0] * (self.kernel.state_dim - 1)
return self.obs_matrix.repeat_interleave(self.kernel.state_dim, dim=0) * \
self.obs_matrix.new_tensor(selector).repeat(self.obs_dim).unsqueeze(-1)
else:
return self.obs_matrix
def _get_init_dist(self, stationary_covariance):
return torch.distributions.MultivariateNormal(self.obs_matrix.new_zeros(self.full_state_dim),
stationary_covariance)
def _get_obs_dist(self):
return dist.Normal(self.obs_matrix.new_zeros(self.obs_dim),
self.obs_noise_scale).to_event(1)
def _get_wiener_cov(self):
chol = self.wiener_noise_tril
wiener_cov = torch.mm(chol, chol.t()).reshape(self.obs_dim, 1, self.obs_dim, 1)
wiener_cov = wiener_cov * wiener_cov.new_ones(self.kernel.state_dim, 1, self.kernel.state_dim)
return wiener_cov.reshape(self.full_state_dim, self.full_state_dim)
def _stationary_covariance(self):
rho_j = math.sqrt(3.0) / self.kernel.length_scale.unsqueeze(-1).unsqueeze(-1)
rho_i = rho_j.unsqueeze(-1)
block = 2.0 * self.kernel.mask00 + \
(rho_i - rho_j) * (self.kernel.mask01 - self.kernel.mask10) + \
(2.0 * rho_i * rho_j) * self.kernel.mask11
block = block / (rho_i + rho_j).pow(3.0)
block = block.transpose(-2, -3).reshape(self.full_state_dim, self.full_state_dim)
return self._get_wiener_cov() * block
def _get_trans_dist(self, trans_matrix, stationary_covariance):
covar = stationary_covariance - torch.matmul(trans_matrix.transpose(-1, -2),
torch.matmul(stationary_covariance, trans_matrix))
return MultivariateNormal(covar.new_zeros(self.full_state_dim), covar)
def _trans_matrix_distribution_stat_covar(self, dts):
stationary_covariance = self._stationary_covariance()
trans_matrix = self.kernel.transition_matrix(dt=dts)
trans_matrix = block_diag_embed(trans_matrix)
trans_dist = self._get_trans_dist(trans_matrix, stationary_covariance)
return trans_matrix, trans_dist, stationary_covariance
def _get_dist(self):
"""
Get the :class:`~pyro.distributions.GaussianHMM` distribution that corresponds to a :class:`DependentMaternGP`
"""
trans_matrix, trans_dist, stat_covar = self._trans_matrix_distribution_stat_covar(self.dt)
return dist.GaussianHMM(self._get_init_dist(stat_covar), trans_matrix,
trans_dist, self._get_obs_matrix(), self._get_obs_dist())
@pyro_method
def log_prob(self, targets):
"""
:param torch.Tensor targets: A 2-dimensional tensor of real-valued targets
of shape ``(T, obs_dim)``, where ``T`` is the length of the time series and ``obs_dim``
is the dimension of the real-valued ``targets`` at each time step
:returns torch.Tensor: A (scalar) log probability
"""
assert targets.dim() == 2 and targets.size(-1) == self.obs_dim
return self._get_dist().log_prob(targets)
@torch.no_grad()
def _filter(self, targets):
"""
Return the filtering state for the associated state space model.
"""
assert targets.dim() == 2 and targets.size(-1) == self.obs_dim
return self._get_dist().filter(targets)
@torch.no_grad()
def _forecast(self, dts, filtering_state, include_observation_noise=True):
"""
Internal helper for forecasting.
"""
assert dts.dim() == 1
dts = dts.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
trans_matrix, trans_dist, _ = self._trans_matrix_distribution_stat_covar(dts)
obs_matrix = self._get_obs_matrix()
trans_obs = torch.matmul(trans_matrix, obs_matrix)
predicted_mean = torch.matmul(filtering_state.loc.unsqueeze(-2), trans_obs).squeeze(-2)
predicted_function_covar = torch.matmul(trans_obs.transpose(-1, -2),
torch.matmul(filtering_state.covariance_matrix, trans_obs)) + \
torch.matmul(obs_matrix.t(), torch.matmul(trans_dist.covariance_matrix, obs_matrix))
if include_observation_noise:
predicted_function_covar = predicted_function_covar + self.obs_noise_scale.pow(2.0)
return predicted_mean, predicted_function_covar
@pyro_method
def forecast(self, targets, dts):
"""
:param torch.Tensor targets: A 2-dimensional tensor of real-valued targets
of shape ``(T, obs_dim)``, where ``T`` is the length of the time series and ``obs_dim``
is the dimension of the real-valued targets at each time step. These
represent the training data that are conditioned on for the purpose of making
forecasts.
:param torch.Tensor dts: A 1-dimensional tensor of times to forecast into the future,
with zero corresponding to the time of the final target ``targets[-1]``.
:returns torch.distributions.MultivariateNormal: Returns a predictive MultivariateNormal
distribution with batch shape ``(S,)`` and event shape ``(obs_dim,)``, where ``S`` is the size of ``dts``.
"""
filtering_state = self._filter(targets)
predicted_mean, predicted_covar = self._forecast(dts, filtering_state)
return MultivariateNormal(predicted_mean, predicted_covar)
| [
"torch.zeros",
"torch.no_grad",
"torch.ones",
"torch.eye",
"torch.tensor",
"torch.distributions.MultivariateNormal",
"torch.matmul",
"torch.randn"
] | 1.3.0 | Capri2014/pyro | 546f9010aeb2308ae566726b1cec67a7b4fda9c2 |
1.3 | import torch
from torch.distributions import constraints
from pyro.contrib.gp.kernels.kernel import Kernel
from pyro.nn.module import PyroParam
class Constant(Kernel):
r"""
Implementation of Constant kernel:
:math:`k(x, z) = \sigma^2.`
"""
def __init__(self, input_dim, variance=None, active_dims=None):
super(Constant, self).__init__(input_dim, active_dims)
variance = torch.tensor(1.) if variance is None else variance
self.variance = PyroParam(variance, constraints.positive)
def forward(self, X, Z=None, diag=False):
if diag:
return self.variance.expand(X.size(0))
if Z is None:
Z = X
return self.variance.expand(X.size(0), Z.size(0))
class WhiteNoise(Kernel):
r"""
Implementation of WhiteNoise kernel:
:math:`k(x, z) = \sigma^2 \delta(x, z),`
where :math:`\delta` is a Dirac delta function.
"""
def __init__(self, input_dim, variance=None, active_dims=None):
super(WhiteNoise, self).__init__(input_dim, active_dims)
variance = torch.tensor(1.) if variance is None else variance
self.variance = PyroParam(variance, constraints.positive)
def forward(self, X, Z=None, diag=False):
if diag:
return self.variance.expand(X.size(0))
if Z is None:
return self.variance.expand(X.size(0)).diag()
else:
return X.data.new_zeros(X.size(0), Z.size(0))
| [
"torch.tensor"
] | 1.3.0 | Capri2014/pyro | 546f9010aeb2308ae566726b1cec67a7b4fda9c2 |
1.3 | """
This example demonstrates the functionality of `pyro.contrib.minipyro`,
which is a minimal implementation of the Pyro Probabilistic Programming
Language that was created for didactic purposes.
"""
import argparse
import torch
from pyro.generic import distributions as dist
# We use the pyro.generic interface to support dynamic choice of backend.
from pyro.generic import infer, ops, optim, pyro, pyro_backend
def main(args):
# Define a basic model with a single Normal latent random variable `loc`
# and a batch of Normally distributed observations.
def model(data):
loc = pyro.sample("loc", dist.Normal(0., 1.))
with pyro.plate("data", len(data), dim=-1):
pyro.sample("obs", dist.Normal(loc, 1.), obs=data)
# Define a guide (i.e. variational distribution) with a Normal
# distribution over the latent random variable `loc`.
def guide(data):
guide_loc = pyro.param("guide_loc", torch.tensor(0.))
guide_scale = ops.exp(pyro.param("guide_scale_log", torch.tensor(0.)))
pyro.sample("loc", dist.Normal(guide_loc, guide_scale))
# Generate some data.
torch.manual_seed(0)
data = torch.randn(100) + 3.0
# Because the API in minipyro matches that of Pyro proper,
# training code works with generic Pyro implementations.
with pyro_backend(args.backend):
# Construct an SVI object so we can do variational inference on our
# model/guide pair.
Elbo = infer.JitTrace_ELBO if args.jit else infer.Trace_ELBO
elbo = Elbo()
adam = optim.Adam({"lr": args.learning_rate})
svi = infer.SVI(model, guide, adam, elbo)
# Basic training loop
pyro.get_param_store().clear()
for step in range(args.num_steps):
loss = svi.step(data)
if step % 100 == 0:
print("step {} loss = {}".format(step, loss))
# Report the final values of the variational parameters
# in the guide after training.
for name in pyro.get_param_store():
value = pyro.param(name)
print("{} = {}".format(name, value.detach().cpu().numpy()))
# For this simple (conjugate) model we know the exact posterior. In
# particular we know that the variational distribution should be
# centered near 3.0. So let's check this explicitly.
assert (pyro.param("guide_loc") - 3.0).abs() < 0.1
if __name__ == "__main__":
assert pyro.__version__.startswith('1.1.0')
parser = argparse.ArgumentParser(description="Mini Pyro demo")
parser.add_argument("-b", "--backend", default="minipyro")
parser.add_argument("-n", "--num-steps", default=1001, type=int)
parser.add_argument("-lr", "--learning-rate", default=0.02, type=float)
parser.add_argument("--jit", action="store_true")
args = parser.parse_args()
main(args)
| [
"torch.manual_seed",
"torch.tensor",
"torch.randn"
] | 1.3.0 | Capri2014/pyro | 546f9010aeb2308ae566726b1cec67a7b4fda9c2 |
1.3 | import torch
import pyro.distributions as dist
from pyro.ops.hessian import hessian
from tests.common import assert_equal
def test_hessian_mvn():
tmp = torch.randn(3, 10)
cov = torch.matmul(tmp, tmp.t())
mvn = dist.MultivariateNormal(cov.new_zeros(3), cov)
x = torch.randn(3, requires_grad=True)
y = mvn.log_prob(x)
assert_equal(hessian(y, x), -mvn.precision_matrix)
def test_hessian_multi_variables():
x = torch.randn(3, requires_grad=True)
z = torch.randn(3, requires_grad=True)
y = (x ** 2 * z + z ** 3).sum()
H = hessian(y, (x, z))
Hxx = (2 * z).diag()
Hxz = (2 * x).diag()
Hzz = (6 * z).diag()
target_H = torch.cat([torch.cat([Hxx, Hxz]), torch.cat([Hxz, Hzz])], dim=1)
assert_equal(H, target_H)
| [
"torch.cat",
"torch.randn"
] | 1.3.0 | Capri2014/pyro | 546f9010aeb2308ae566726b1cec67a7b4fda9c2 |
1.0 | '''Evaluation script'''
import argparse
from pathlib import Path
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from PIL import Image
from albumentations import Compose
from albumentations import CenterCrop
from torchvision.transforms.functional import to_pil_image
from road_roughness_prediction.segmentation import datasets
from road_roughness_prediction.segmentation import models
from road_roughness_prediction.segmentation import logging
from road_roughness_prediction.segmentation import utils
import road_roughness_prediction.tools.torch as torch_tools
def evaluate(net, loader: DataLoader, criterion, device, save_dir, category_type):
net.eval()
losses = []
image_writer = ImageWriter(save_dir, category_type)
with torch.no_grad():
for batch in tqdm(loader):
X = batch['X'].to(device)
Y = batch['Y'].to(device)
out = net.forward(X)
losses.append(criterion(out, Y).item())
image_writer.write_batch_images(batch, out.cpu())
mean_loss = np.mean(losses)
print(f'loss: {mean_loss:.4f}')
class ImageWriter:
def __init__(self, save_dir: Path, category_type):
self._counter = 0
self.category_type = category_type
self.is_binary = category_type == datasets.surface_types.BinaryCategory
self.input_dir = save_dir / 'input'
self.output_dir = save_dir / 'output'
self.target_dir = save_dir / 'target'
self.blend_output_dir = save_dir / 'blend_output'
self.blend_target_dir = save_dir / 'blend_target'
dirs = [
self.input_dir,
self.output_dir,
self.target_dir,
self.blend_output_dir,
self.blend_target_dir,
]
for dir_ in dirs:
if not dir_.exists():
dir_.mkdir()
def write_batch_images(self, batch, out):
if self.is_binary:
self._write_batch_images_binary(batch, out)
else:
self._write_batch_images_multi(batch, out)
def _write_batch_images_binary(self, batch, out):
'''Write batch-wise data into images'''
X = batch['X']
Y = batch['Y']
# out: [n_batch, 1, height, width]
out_seg = (np.array(out.squeeze(dim=1)) > 0.5).astype(np.uint8)
n_batches = X.shape[0]
for i in range(n_batches):
file_name = f'{self._counter:05d}'
input_img = to_pil_image(logging.normalize(X[i, ::]))
save_path = self.input_dir / (file_name + '.jpg')
input_img.save(save_path)
out_seg_img = out_seg[i, ::]
out_seg_index_img = utils.create_index_image(out_seg_img)
save_path = self.output_dir / (file_name + '.png')
out_seg_index_img.save(save_path)
target_img = np.array(Y[i, ::].squeeze()).astype(np.uint8)
target_index_img = utils.create_index_image(target_img)
save_path = self.target_dir / (file_name + '.png')
target_index_img.save(save_path)
blend_output_img = self._blend_image(input_img, out_seg_index_img)
save_path = self.blend_output_dir / (file_name + '.jpg')
blend_output_img.save(save_path)
blend_target_img = self._blend_image(input_img, target_index_img)
save_path = self.blend_target_dir / (file_name + '.jpg')
blend_target_img.save(save_path)
self._counter += 1
def _write_batch_images_multi(self, batch, out):
'''Write batch-wise data into images'''
X = batch['X']
Y = batch['Y']
# out: [n_batch, n_class, height, width]
out_seg = out.argmax(1)
n_batches = X.shape[0]
for i in range(n_batches):
file_name = f'{self._counter:05d}'
input_img = to_pil_image(logging.normalize(X[i, ::]))
save_path = self.input_dir / (file_name + '.jpg')
input_img.save(save_path)
out_seg_img = np.array(out_seg[i, ::]).astype(np.uint8)
out_seg_index_img = utils.create_index_image(out_seg_img)
save_path = self.output_dir / (file_name + '.png')
out_seg_index_img.save(save_path)
target_img = np.array(Y[i, ::]).astype(np.uint8)
target_index_img = utils.create_index_image(target_img)
save_path = self.target_dir / (file_name + '.png')
target_index_img.save(save_path)
blend_output_img = self._blend_image(input_img, out_seg_index_img)
save_path = self.blend_output_dir / (file_name + '.jpg')
blend_output_img.save(save_path)
blend_target_img = self._blend_image(input_img, target_index_img)
save_path = self.blend_target_dir / (file_name + '.jpg')
blend_target_img.save(save_path)
self._counter += 1
def _blend_image(self, original, segmented):
blend = Image.blend(original.convert('RGB'), segmented.convert('RGB'), alpha=0.2)
return blend
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weight-path', required=True)
parser.add_argument('--image-dirs', required=True, type=str, nargs='+')
parser.add_argument('--mask-dirs', required=True, type=str, nargs='+')
parser.add_argument('--model-name', type=str, default='unet11')
parser.add_argument('--dataset-type', type=str, default='base')
parser.add_argument('--save-path', default='forward')
parser.add_argument('--category-type', default='binary', choices=['binary', 'simple'])
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--device-id', type=int, default=0)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--input-size', type=int, nargs=2, default=(640, 640))
parser.add_argument('--jaccard-weight', type=float, default=0.3)
args = parser.parse_args()
print(args)
image_dirs = [Path(p) for p in args.image_dirs]
mask_dirs = [Path(p) for p in args.mask_dirs]
for data_dir in (image_dirs + mask_dirs):
assert data_dir.exists(), f'{str(data_dir)} does not exist.'
device = torch_tools.get_device(args.cpu, args.device_id)
torch_tools.set_seeds(args.seed, device)
weight_path = Path(args.weight_path)
category_type = datasets.surface_types.from_string(args.category_type)
save_path = Path(args.save_path)
if not save_path.exists():
save_path.mkdir(parents=True)
net = models.load_model(args.model_name, category_type).to(device)
state_dict = torch.load(weight_path, map_location=device)
net.load_state_dict(state_dict=state_dict)
input_size = args.input_size
transform = Compose([
CenterCrop(*input_size),
])
dataset = datasets.create_dataset(
args.dataset_type,
image_dirs,
mask_dirs,
category_type,
transform,
)
loader = DataLoader(dataset, batch_size=1, shuffle=False)
criterion = models.loss.get_criterion(category_type, args.jaccard_weight)
evaluate(net, loader, criterion, device, save_path, category_type)
if __name__ == '__main__':
main()
| [
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.load"
] | 1.0.1 | mknz/dsr-road-roughness-prediction | 5f56b6ba5da70a09f2c967b7f32c740072e20ed1 |
1.1 | import pytest
import torch
from espnet2.tts.fastspeech2 import FastSpeech2
@pytest.mark.parametrize("postnet_layers", [0, 1])
@pytest.mark.parametrize("reduction_factor", [1, 2, 3])
@pytest.mark.parametrize(
"spk_embed_dim, spk_embed_integration_type",
[(None, "add"), (2, "add"), (2, "concat")],
)
@pytest.mark.parametrize("use_gst", [True, False])
@pytest.mark.parametrize(
"use_masking, use_weighted_masking", [[True, False], [False, True]]
)
def test_fastspeech2(
postnet_layers,
reduction_factor,
spk_embed_dim,
spk_embed_integration_type,
use_gst,
use_masking,
use_weighted_masking,
):
model = FastSpeech2(
idim=10,
odim=5,
adim=4,
aheads=2,
elayers=1,
eunits=4,
dlayers=1,
dunits=4,
postnet_layers=postnet_layers,
postnet_chans=4,
postnet_filts=5,
reduction_factor=reduction_factor,
duration_predictor_layers=2,
duration_predictor_chans=4,
duration_predictor_kernel_size=3,
energy_predictor_layers=2,
energy_predictor_chans=4,
energy_predictor_kernel_size=3,
energy_predictor_dropout=0.5,
energy_embed_kernel_size=9,
energy_embed_dropout=0.5,
pitch_predictor_layers=2,
pitch_predictor_chans=4,
pitch_predictor_kernel_size=3,
pitch_predictor_dropout=0.5,
pitch_embed_kernel_size=9,
pitch_embed_dropout=0.5,
spk_embed_dim=spk_embed_dim,
spk_embed_integration_type=spk_embed_integration_type,
use_gst=use_gst,
gst_tokens=2,
gst_heads=4,
gst_conv_layers=2,
gst_conv_chans_list=[2, 4],
gst_conv_kernel_size=3,
gst_conv_stride=2,
gst_gru_layers=1,
gst_gru_units=4,
use_masking=use_masking,
use_weighted_masking=use_weighted_masking,
)
inputs = dict(
text=torch.randint(1, 10, (2, 2)),
text_lengths=torch.tensor([2, 1], dtype=torch.long),
speech=torch.randn(2, 4 * reduction_factor, 5),
speech_lengths=torch.tensor([4, 2], dtype=torch.long) * reduction_factor,
durations=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.long),
pitch=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.float).unsqueeze(-1),
energy=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.float).unsqueeze(-1),
# NOTE(kan-bayashi): +1 for eos
durations_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),
pitch_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),
energy_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),
)
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(2, spk_embed_dim))
loss, *_ = model(**inputs)
loss.backward()
with torch.no_grad():
model.eval()
inputs = dict(
text=torch.randint(0, 10, (2,)),
)
if use_gst:
inputs.update(speech=torch.randn(5, 5))
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(spk_embed_dim))
model.inference(**inputs)
# teacher forcing
inputs.update(durations=torch.tensor([2, 2, 0], dtype=torch.long))
inputs.update(pitch=torch.tensor([2, 2, 0], dtype=torch.float).unsqueeze(-1))
inputs.update(energy=torch.tensor([2, 2, 0], dtype=torch.float).unsqueeze(-1))
model.inference(**inputs, use_teacher_forcing=True)
| [
"torch.randint",
"torch.no_grad",
"torch.tensor",
"torch.randn"
] | 1.1.0 | jefflai108/espnet | a51f21cb94a4dead2300a8a13adb92ffdfbafbe8 |
1.0 | # coding=utf-8
# Copyright 2021 The Fairseq Authors, Microsoft Research, and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch WavLM model."""
import math
import warnings
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...deepspeed import is_deepspeed_zero3_enabled
from ...modeling_outputs import (
BaseModelOutput,
CausalLMOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
Wav2Vec2BaseModelOutput,
XVectorOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import torch_int_div
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_wavlm import WavLMConfig
logger = logging.get_logger(__name__)
_HIDDEN_STATES_START_POSITION = 2
# General docstring
_CONFIG_FOR_DOC = "WavLMConfig"
_PROCESSOR_FOR_DOC = "Wav2Vec2Processor"
# Base docstring
_CHECKPOINT_FOR_DOC = "patrickvonplaten/wavlm-libri-clean-100h-base-plus"
_EXPECTED_OUTPUT_SHAPE = [1, 292, 768]
# CTC docstring
_CTC_EXPECTED_OUTPUT = "'mister quilter is the aposle of the middle classes and we are glad to welcome his gospel'"
_CTC_EXPECTED_LOSS = 12.51
# Audio class docstring
_FEAT_EXTRACTOR_FOR_DOC = "Wav2Vec2FeatureExtractor"
_SEQ_CLASS_CHECKPOINT = "hf-internal-testing/tiny-random-wavlm"
_SEQ_CLASS_EXPECTED_OUTPUT = "'no'" # TODO(anton) - could you quickly fine-tune a KS WavLM Model
_SEQ_CLASS_EXPECTED_LOSS = 0.7 # TODO(anton) - could you quickly fine-tune a KS WavLM Model
# Frame class docstring
_FRAME_CLASS_CHECKPOINT = "microsoft/wavlm-base-plus-sd"
_FRAME_EXPECTED_OUTPUT = [0, 0]
# Speaker Verification docstring
_XVECTOR_CHECKPOINT = "microsoft/wavlm-base-plus-sv"
_XVECTOR_EXPECTED_OUTPUT = 0.97
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/wavlm-base",
"microsoft/wavlm-base-plus",
"microsoft/wavlm-large",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
]
# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->WavLM
class WavLMNoLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->WavLM
class WavLMLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->WavLM
class WavLMGroupNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->WavLM
class WavLMPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
)
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
else:
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
self.padding = WavLMSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->WavLM
class WavLMSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->WavLM
class WavLMFeatureEncoder(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [WavLMGroupNormConvLayer(config, layer_id=0)] + [
WavLMNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [WavLMLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
if self._requires_grad and self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(conv_layer),
hidden_states,
)
else:
hidden_states = conv_layer(hidden_states)
return hidden_states
class WavLMFeatureExtractor(WavLMFeatureEncoder):
def __init__(self, config):
super().__init__(config)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->WavLM
class WavLMFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
# non-projected hidden states are needed for quantization
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states, norm_hidden_states
class WavLMAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
num_buckets: int = 320,
max_distance: int = 800,
has_relative_position_bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.out_proj = nn.Linear(embed_dim, embed_dim)
self.num_buckets = num_buckets
self.max_distance = max_distance
self.gru_rel_pos_const = nn.Parameter(torch.ones(1, self.num_heads, 1, 1))
self.gru_rel_pos_linear = nn.Linear(self.head_dim, 8)
if has_relative_position_bias:
self.rel_attn_embed = nn.Embedding(self.num_buckets, self.num_heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_bias: Optional[torch.Tensor] = None,
output_attentions: bool = False,
index=0,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Attention layer with relative attention"""
bsz, tgt_len, _ = hidden_states.size()
# first pass of attention layer creates position bias
if position_bias is None:
position_bias = self.compute_bias(tgt_len, tgt_len)
position_bias = (
position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, tgt_len)
)
# Compute relative position bias:
# 1) get reshape hidden_states
gated_hidden_states = hidden_states.view(hidden_states.shape[:-1] + (self.num_heads, -1))
gated_hidden_states = gated_hidden_states.permute(0, 2, 1, 3)
# 2) project hidden states
relative_position_proj = self.gru_rel_pos_linear(gated_hidden_states)
relative_position_proj = relative_position_proj.view(gated_hidden_states.shape[:-1] + (2, 4)).sum(-1)
# 3) compute gate for position bias from projected hidden states
gate_a, gate_b = torch.sigmoid(relative_position_proj).chunk(2, dim=-1)
gate_output = gate_a * (gate_b * self.gru_rel_pos_const - 1.0) + 2.0
# 4) apply gate to position bias to compute gated position_bias
gated_position_bias = gate_output.view(bsz * self.num_heads, -1, 1) * position_bias
gated_position_bias = gated_position_bias.view((-1, tgt_len, tgt_len))
attn_output, attn_weights = self.torch_multi_head_self_attention(
hidden_states, attention_mask, gated_position_bias, output_attentions
)
return attn_output, attn_weights, position_bias
def torch_multi_head_self_attention(
self,
hidden_states: torch.FloatTensor,
attention_mask: Union[torch.LongTensor, torch.BoolTensor],
gated_position_bias: torch.FloatTensor,
output_attentions: bool,
) -> (torch.FloatTensor, torch.FloatTensor):
"""simple wrapper around torch's multi_head_attention_forward function"""
# self-attention assumes q = k = v
query = key = value = hidden_states.transpose(0, 1)
key_padding_mask = attention_mask.ne(1) if attention_mask is not None else None
# disable bias and add_zero_attn
bias_k = bias_v = None
add_zero_attn = False
# PyTorch 1.3.0 has F.multi_head_attention_forward defined
# so no problem with backwards compatibility
attn_output, attn_weights = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
bias_k,
bias_v,
add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
self.training,
key_padding_mask,
output_attentions,
gated_position_bias,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
# [Seq_Len, Batch Size, ...] -> [Batch Size, Seq_Len, ...]
attn_output = attn_output.transpose(0, 1)
if attn_weights is not None:
# IMPORTANT: Attention weights are averaged weights
# here which should not be the case. This is an open issue
# on PyTorch: https://github.com/pytorch/pytorch/issues/32590
attn_weights = attn_weights[:, None].broadcast_to(
attn_weights.shape[:1] + (self.num_heads,) + attn_weights.shape[1:]
)
return attn_output, attn_weights
def compute_bias(self, query_length: int, key_length: int) -> torch.FloatTensor:
context_position = torch.arange(query_length, dtype=torch.long)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long)[None, :]
relative_position = memory_position - context_position
relative_position_bucket = self._relative_positions_bucket(relative_position)
relative_position_bucket = relative_position_bucket.to(self.rel_attn_embed.weight.device)
values = self.rel_attn_embed(relative_position_bucket)
values = values.permute([2, 0, 1])
return values
def _relative_positions_bucket(self, relative_positions: torch.FloatTensor) -> torch.FloatTensor:
num_buckets = self.num_buckets // 2
relative_buckets = (relative_positions > 0).to(torch.long) * num_buckets
relative_positions = torch.abs(relative_positions)
max_exact = num_buckets // 2
is_small = relative_positions < max_exact
relative_positions_if_large = torch.log(relative_positions.float() / max_exact)
relative_positions_if_large = relative_positions_if_large / math.log(self.max_distance / max_exact)
relative_positions_if_large = relative_positions_if_large * (num_buckets - max_exact)
relative_postion_if_large = (max_exact + relative_positions_if_large).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)
return relative_buckets
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->WavLM
class WavLMFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
class WavLMEncoderLayer(nn.Module):
def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True):
super().__init__()
self.attention = WavLMAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
num_buckets=config.num_buckets,
max_distance=config.max_bucket_distance,
has_relative_position_bias=has_relative_position_bias,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = WavLMFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, index=0):
attn_residual = hidden_states
hidden_states, attn_weights, position_bias = self.attention(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
output_attentions=output_attentions,
index=index,
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states, position_bias)
if output_attentions:
outputs += (attn_weights,)
return outputs
class WavLMEncoderLayerStableLayerNorm(nn.Module):
def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True):
super().__init__()
self.attention = WavLMAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
num_buckets=config.num_buckets,
max_distance=config.max_bucket_distance,
has_relative_position_bias=has_relative_position_bias,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = WavLMFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, position_bias = self.attention(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
output_attentions=output_attentions,
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
outputs = (hidden_states, position_bias)
if output_attentions:
outputs += (attn_weights,)
return outputs
class WavLMEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = WavLMPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[WavLMEncoderLayer(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
hidden_states[~attention_mask] = 0.0
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
position_bias = None
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop)
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
position_bias,
)
else:
layer_outputs = layer(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
output_attentions=output_attentions,
index=i,
)
hidden_states, position_bias = layer_outputs[:2]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class WavLMEncoderStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = WavLMPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[
WavLMEncoderLayerStableLayerNorm(config, has_relative_position_bias=(i == 0))
for i in range(config.num_hidden_layers)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens are not attended to
hidden_states[~attention_mask] = 0
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
position_bias = None
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop)
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
position_bias,
)
else:
layer_outputs = layer(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
position_bias=position_bias,
)
hidden_states, position_bias = layer_outputs[:2]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions
)
class WavLMGumbelVectorQuantizer(nn.Module):
"""
Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH
GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.
"""
def __init__(self, config):
super().__init__()
self.num_groups = config.num_codevector_groups
self.num_vars = config.num_codevectors_per_group
if config.codevector_dim % self.num_groups != 0:
raise ValueError(
f"`config.codevector_dim {config.codevector_dim} must be divisible"
f" by `config.num_codevector_groups` {self.num_groups} "
"for concatenation."
)
# storage for codebook variables (codewords)
self.codevectors = nn.Parameter(
torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)
)
self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)
# can be decayed for training
self.temperature = 2
@staticmethod
def _compute_perplexity(probs):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
# project to codevector dim
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
# sample code vector probs via gumbel in differentiateable way
codevector_probs = nn.functional.gumbel_softmax(hidden_states.float(), tau=self.temperature, hard=True)
codevector_probs = codevector_probs.type_as(hidden_states)
# compute perplexity
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
# take argmax in non-differentiable way
# comptute hard codevector distribution (one hot)
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
# use probs to retrieve codevectors
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Adapter with Wav2Vec2->WavLM
class WavLMAdapter(nn.Module):
def __init__(self, config):
super().__init__()
# feature dim might need to be down-projected
if config.output_hidden_size != config.hidden_size:
self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)
self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)
else:
self.proj = self.proj_layer_norm = None
self.layers = nn.ModuleList(WavLMAdapterLayer(config) for _ in range(config.num_adapter_layers))
self.layerdrop = config.layerdrop
def forward(self, hidden_states):
# down project hidden_states if necessary
if self.proj is not None and self.proj_layer_norm is not None:
hidden_states = self.proj(hidden_states)
hidden_states = self.proj_layer_norm(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
for layer in self.layers:
layerdrop_prob = np.random.random()
if not self.training or (layerdrop_prob > self.layerdrop):
hidden_states = layer(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AdapterLayer with Wav2Vec2->WavLM
class WavLMAdapterLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.output_hidden_size,
2 * config.output_hidden_size,
config.adapter_kernel_size,
stride=config.adapter_stride,
padding=1,
)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = nn.functional.glu(hidden_states, dim=1)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PreTrainedModel with Wav2Vec2->WavLM, wav2vec2->wavlm
class WavLMPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = WavLMConfig
base_model_prefix = "wavlm"
main_input_name = "input_values"
_keys_to_ignore_on_load_missing = [r"position_ids"]
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
# gumbel softmax requires special init
if isinstance(module, WavLMGumbelVectorQuantizer):
module.weight_proj.weight.data.normal_(mean=0.0, std=1)
module.weight_proj.bias.data.zero_()
nn.init.uniform_(module.codevectors)
elif isinstance(module, WavLMPositionalConvEmbedding):
nn.init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, WavLMFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _get_feat_extract_output_lengths(
self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None
):
"""
Computes the output length of the convolutional layers
"""
add_adapter = self.config.add_adapter if add_adapter is None else add_adapter
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch_int_div(input_length - kernel_size, stride) + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
if add_adapter:
for _ in range(self.config.num_adapter_layers):
input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)
return input_lengths
def _get_feature_vector_attention_mask(
self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None
):
# Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)
output_lengths = output_lengths.to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (WavLMEncoder, WavLMEncoderStableLayerNorm, WavLMFeatureEncoder)):
module.gradient_checkpointing = value
WAVLM_START_DOCSTRING = r"""
WavLM was proposed in [WavLM: Unified Speech Representation Learning with Labeled and Unlabeled
Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei,
Michael Zeng, Xuedong Huang.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving etc.).
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`WavLMConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
WAVLM_INPUTS_DOCSTRING = r"""
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
soundfile*). To prepare the array into *input_values*, the [`WavLMProcessor`] should be used for padding
and conversion into a tensor of type *torch.FloatTensor*. See [`WavLMProcessor.__call__`] for details.
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
<Tip warning={true}>
`attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should
**not** be passed to avoid degraded performance when doing batched inference. For such models
`input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these
models also yield slightly different results depending on whether `input_values` is padded or not.
</Tip>
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare WavLM Model transformer outputting raw hidden-states without any specific head on top.",
WAVLM_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM, WavLMBaseModelOutput->Wav2Vec2BaseModelOutput
class WavLMModel(WavLMPreTrainedModel):
def __init__(self, config: WavLMConfig):
super().__init__(config)
self.config = config
self.feature_extractor = WavLMFeatureEncoder(config)
self.feature_projection = WavLMFeatureProjection(config)
# model only needs masking vector if mask prob is > 0.0
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
if config.do_stable_layer_norm:
self.encoder = WavLMEncoderStableLayerNorm(config)
else:
self.encoder = WavLMEncoder(config)
self.adapter = WavLMAdapter(config) if config.add_adapter else None
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.feature_extractor._freeze_parameters()
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Wav2Vec2BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(
extract_features.shape[1], attention_mask, add_adapter=False
)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if self.adapter is not None:
hidden_states = self.adapter(hidden_states)
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return Wav2Vec2BaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""WavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
WAVLM_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM
class WavLMForCTC(WavLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.wavlm = WavLMModel(config)
self.dropout = nn.Dropout(config.final_dropout)
if config.vocab_size is None:
raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that "
"does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `WavLMForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
"or define `vocab_size` of your model's configuration."
)
output_hidden_size = (
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
)
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.wavlm.feature_extractor._freeze_parameters()
@add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_CTC_EXPECTED_OUTPUT,
expected_loss=_CTC_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, CausalLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.wavlm(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
if labels.max() >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
# retrieve loss input_lengths from attention_mask
attention_mask = (
attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
# ctc_loss doesn't support fp16
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(
log_probs,
flattened_targets,
input_lengths,
target_lengths,
blank=self.config.pad_token_id,
reduction=self.config.ctc_loss_reduction,
zero_infinity=self.config.ctc_zero_infinity,
)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"""
WavLM Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
SUPERB Keyword Spotting.
""",
WAVLM_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM
class WavLMForSequenceClassification(WavLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of WavLM adapters (config.add_adapter=True)"
)
self.wavlm = WavLMModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.wavlm.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.wavlm.parameters():
param.requires_grad = False
@add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_SEQ_CLASS_CHECKPOINT,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.wavlm(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
hidden_states[~padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
WavLM Model with a frame classification head on top for tasks like Speaker Diarization.
""",
WAVLM_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM
class WavLMForAudioFrameClassification(WavLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Audio frame classification does not support the use of WavLM adapters (config.add_adapter=True)"
)
self.wavlm = WavLMModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.wavlm.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.wavlm.parameters():
param.requires_grad = False
@add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_FRAME_CLASS_CHECKPOINT,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_FRAME_EXPECTED_OUTPUT,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.wavlm(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
logits = self.classifier(hidden_states)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return output
return TokenClassifierOutput(
loss=None,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss
class AMSoftmaxLoss(nn.Module):
def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):
super(AMSoftmaxLoss, self).__init__()
self.scale = scale
self.margin = margin
self.num_labels = num_labels
self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)
self.loss = nn.CrossEntropyLoss()
def forward(self, hidden_states, labels):
labels = labels.flatten()
weight = nn.functional.normalize(self.weight, dim=0)
hidden_states = nn.functional.normalize(hidden_states, dim=1)
cos_theta = torch.mm(hidden_states, weight)
psi = cos_theta - self.margin
onehot = nn.functional.one_hot(labels, self.num_labels)
logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)
loss = self.loss(logits, labels)
return loss
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer
class TDNNLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
self.out_conv_dim = config.tdnn_dim[layer_id]
self.kernel_size = config.tdnn_kernel[layer_id]
self.dilation = config.tdnn_dilation[layer_id]
self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
self.activation = nn.ReLU()
def forward(self, hidden_states):
hidden_states = hidden_states.unsqueeze(1)
hidden_states = nn.functional.unfold(
hidden_states,
(self.kernel_size, self.in_conv_dim),
stride=(1, self.in_conv_dim),
dilation=(self.dilation, 1),
)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.kernel(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
@add_start_docstrings(
"""
WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification.
""",
WAVLM_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM
class WavLMForXVector(WavLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.wavlm = WavLMModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])
tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]
self.tdnn = nn.ModuleList(tdnn_layers)
self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)
self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)
self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)
self.init_weights()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.wavlm.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.wavlm.parameters():
param.requires_grad = False
def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the TDNN layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return (input_length - kernel_size) // stride + 1
for kernel_size in self.config.tdnn_kernel:
input_lengths = _conv_out_length(input_lengths, kernel_size, 1)
return input_lengths
@add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_XVECTOR_CHECKPOINT,
output_type=XVectorOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_XVECTOR_EXPECTED_OUTPUT,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, XVectorOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.wavlm(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
for tdnn_layer in self.tdnn:
hidden_states = tdnn_layer(hidden_states)
# Statistic Pooling
if attention_mask is None:
mean_features = hidden_states.mean(dim=1)
std_features = hidden_states.std(dim=1)
else:
feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))
tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)
mean_features = []
std_features = []
for i, length in enumerate(tdnn_output_lengths):
mean_features.append(hidden_states[i, :length].mean(dim=0))
std_features.append(hidden_states[i, :length].std(dim=0))
mean_features = torch.stack(mean_features)
std_features = torch.stack(std_features)
statistic_pooling = torch.cat([mean_features, std_features], dim=-1)
output_embeddings = self.feature_extractor(statistic_pooling)
logits = self.classifier(output_embeddings)
loss = None
if labels is not None:
loss = self.objective(logits, labels)
if not return_dict:
output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return XVectorOutput(
loss=loss,
logits=logits,
embeddings=output_embeddings,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.nn.functional.unfold",
"torch.cat",
"torch.stack",
"torch.nn.ModuleList",
"torch.nn.init.kaiming_normal_",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.where",
"torch.nn.functional.ctc_loss",
"torch.sigmoid",
"torch.nn.LayerNorm",
"torch.nn.Conv1d",
"torch.nn.init.constant_",
"torch.FloatTensor",
"torch.abs",
"torch.tensor",
"torch.empty",
"torch.zeros",
"torch.nn.functional.one_hot",
"torch.nn.GroupNorm",
"torch.mm",
"torch.nn.ReLU",
"torch.full_like",
"torch.randn",
"torch.nn.functional.log_softmax",
"torch.nn.functional.softmax",
"torch.nn.init.uniform_",
"torch.nn.utils.weight_norm",
"torch.log",
"torch.backends.cudnn.flags",
"torch.nn.functional.glu",
"torch.nn.Dropout",
"torch.nn.functional.normalize",
"torch.arange",
"torch.ones_like",
"torch.nn.Embedding"
] | 1.0 | Lemswasabi/transformers | 1762ded30a49649bdd5f8f5ee38b46dea051026a |
1.6 | from torch.nn import Embedding
import pytest
from fixtures import *
from perceiver_pytorch.modalities import InputModalityWithEmbedding
from perceiver_pytorch.multi_modality_with_text_perceiver import MultiModalityWithTextPerceiver
def test_embedding_for_layer(text_inputs):
text_modality = InputModalityWithEmbedding(
name='text',
input_channels=1, # 1 channel for long ids representing tokens
input_axis=1, # number of axes, 2 for images
num_freq_bands=6, # number of freq bands, with original value (2 * K + 1)
max_freq=8., # maximum frequency, hyperparameter depending on how fine the data is
embedding=Embedding(32000, text_embedding_dim)
)
assert text_inputs.size() == (3, 512,1)
embedded = text_modality.embedding(text_inputs)
assert embedded.size()==(3, 512,1, 256)
assert text_modality.embedding_for_layer(embedded=embedded.squeeze(2), layer_index=0, depth=4).size() == (3, 512, 256//4)
def test_multimodality_forward_image_text(image_inputs,
text_inputs,
targets):
image_modality = InputModalityWithEmbedding(
name='image',
input_channels=3, # number of channels for each token of the input
input_axis=2, # number of axes, 2 for images
num_freq_bands=6, # number of freq bands, with original value (2 * K + 1)
max_freq=4., # maximum frequency, hyperparameter depending on how fine the data is
)
text_modality = InputModalityWithEmbedding(
name='text',
input_channels=1, # 1 channel for long ids representing tokens
input_axis=1, # number of axes, 2 for images
num_freq_bands=6, # number of freq bands, with original value (2 * K + 1)
max_freq=8., # maximum frequency, hyperparameter depending on how fine the data is
embedding=Embedding(32000, text_embedding_dim)
)
model = MultiModalityWithTextPerceiver(
modalities=(image_modality, text_modality),
depth=depth, # depth of net
num_latent_blocks_per_layer=2,
num_latents=12,
# number of latents, or induced set points, or centroids. different papers giving it different names
latent_dim=64, # latent dimension
cross_heads=1, # number of heads for cross attention. paper said 1
latent_heads=8, # number of heads for latent self attention, 8
cross_dim_head=64,
latent_dim_head=64,
num_classes=num_classes, # output number of classes
attn_dropout=0.,
ff_dropout=0.,
weight_tie_layers=True,
# whether to weight tie layers (optional, as indicated in the diagram)
)
result = model({'image': image_inputs,
'text': text_inputs})
assert result is not None
| [
"torch.nn.Embedding"
] | 1.6 | fac2003/perceiver-pytorch | b07d5154c5dee63684c59f57d02a1b405701845f |
1.9 | #! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
from typing import Any, Dict, List, Union
import numpy as np
import torch
from ludwig.constants import (
ACCURACY,
CATEGORY,
COLUMN,
FILL_WITH_CONST,
HIDDEN,
HITS_AT_K,
LOGITS,
LOSS,
MISSING_VALUE_STRATEGY_OPTIONS,
NAME,
PREDICTIONS,
PROBABILITIES,
PROBABILITY,
PROC_COLUMN,
PROJECTION_INPUT,
SOFTMAX_CROSS_ENTROPY,
SUM,
TIED,
TYPE,
)
from ludwig.features.base_feature import BaseFeatureMixin, InputFeature, OutputFeature, PredictModule
from ludwig.utils import output_feature_utils
from ludwig.utils.eval_utils import ConfusionMatrix
from ludwig.utils.math_utils import int_type, softmax
from ludwig.utils.misc_utils import set_default_value, set_default_values
from ludwig.utils.strings_utils import create_vocabulary, UNKNOWN_SYMBOL
logger = logging.getLogger(__name__)
class _CategoryPreprocessing(torch.nn.Module):
def __init__(self, metadata: Dict[str, Any]):
super().__init__()
self.str2idx = metadata["str2idx"]
self.unk = self.str2idx[UNKNOWN_SYMBOL]
def forward(self, v: Union[List[str], torch.Tensor]):
if isinstance(v, torch.Tensor):
raise ValueError(f"Unsupported input: {v}")
indices = [self.str2idx.get(s.strip(), self.unk) for s in v]
return torch.tensor(indices, dtype=torch.int32)
class _CategoryPostprocessing(torch.nn.Module):
def __init__(self, metadata: Dict[str, Any]):
super().__init__()
self.idx2str = {i: v for i, v in enumerate(metadata["idx2str"])}
self.predictions_key = PREDICTIONS
self.probabilities_key = PROBABILITIES
self.unk = ""
def forward(self, preds: Dict[str, torch.Tensor]) -> Dict[str, Any]:
predictions = preds[self.predictions_key]
inv_preds = [self.idx2str.get(pred, self.unk) for pred in predictions]
return {
self.predictions_key: inv_preds,
self.probabilities_key: preds[self.probabilities_key],
}
class _CategoryPredict(PredictModule):
def forward(self, inputs: Dict[str, torch.Tensor], feature_name: str) -> Dict[str, torch.Tensor]:
logits = output_feature_utils.get_output_feature_tensor(inputs, feature_name, self.logits_key)
probabilities = torch.softmax(logits, -1)
predictions = torch.argmax(logits, -1)
predictions = predictions.long()
# EXPECTED SHAPE OF RETURNED TENSORS
# predictions: [batch_size]
# probabilities: [batch_size, num_classes]
# logits: [batch_size, num_classes]
return {self.predictions_key: predictions, self.probabilities_key: probabilities, self.logits_key: logits}
class CategoryFeatureMixin(BaseFeatureMixin):
@staticmethod
def type():
return CATEGORY
@staticmethod
def preprocessing_defaults():
return {
"most_common": 10000,
"lowercase": False,
"missing_value_strategy": FILL_WITH_CONST,
"fill_value": UNKNOWN_SYMBOL,
}
@staticmethod
def preprocessing_schema():
return {
"most_common": {"type": "integer", "minimum": 0},
"lowercase": {"type": "boolean"},
"missing_value_strategy": {"type": "string", "enum": MISSING_VALUE_STRATEGY_OPTIONS},
"fill_value": {"type": "string"},
"computed_fill_value": {"type": "string"},
}
@staticmethod
def cast_column(column, backend):
return column
@staticmethod
def get_feature_meta(column, preprocessing_parameters, backend):
column = column.astype(str)
idx2str, str2idx, str2freq, _, _, _, _ = create_vocabulary(
column,
"stripped",
num_most_frequent=preprocessing_parameters["most_common"],
lowercase=preprocessing_parameters["lowercase"],
add_special_symbols=False,
processor=backend.df_engine,
)
return {"idx2str": idx2str, "str2idx": str2idx, "str2freq": str2freq, "vocab_size": len(str2idx)}
@staticmethod
def feature_data(column, metadata):
return column.map(
lambda x: (
metadata["str2idx"][x.strip()]
if x.strip() in metadata["str2idx"]
else metadata["str2idx"][UNKNOWN_SYMBOL]
)
).astype(int_type(metadata["vocab_size"]))
@staticmethod
def add_feature_data(
feature_config, input_df, proc_df, metadata, preprocessing_parameters, backend, skip_save_processed_input
):
proc_df[feature_config[PROC_COLUMN]] = CategoryFeatureMixin.feature_data(
input_df[feature_config[COLUMN]].astype(str),
metadata[feature_config[NAME]],
)
return proc_df
class CategoryInputFeature(CategoryFeatureMixin, InputFeature):
encoder = "dense"
def __init__(self, feature, encoder_obj=None):
super().__init__(feature)
self.overwrite_defaults(feature)
if encoder_obj:
self.encoder_obj = encoder_obj
else:
self.encoder_obj = self.initialize_encoder(feature)
def forward(self, inputs):
assert isinstance(inputs, torch.Tensor)
assert (
inputs.dtype == torch.int8
or inputs.dtype == torch.int16
or inputs.dtype == torch.int32
or inputs.dtype == torch.int64
)
assert len(inputs.shape) == 1 or (len(inputs.shape) == 2 and inputs.shape[1] == 1)
if len(inputs.shape) == 1:
inputs = inputs.unsqueeze(dim=1)
if inputs.dtype == torch.int8 or inputs.dtype == torch.int16:
inputs = inputs.type(torch.int)
encoder_output = self.encoder_obj(inputs)
return {"encoder_output": encoder_output}
@property
def input_dtype(self):
return torch.int32
@property
def input_shape(self) -> torch.Size:
return torch.Size([1])
@property
def output_shape(self) -> torch.Size:
return torch.Size(self.encoder_obj.output_shape)
@staticmethod
def update_config_with_metadata(input_feature, feature_metadata, *args, **kwargs):
input_feature["vocab"] = feature_metadata["idx2str"]
@staticmethod
def populate_defaults(input_feature):
set_default_value(input_feature, TIED, None)
@staticmethod
def create_preproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:
return _CategoryPreprocessing(metadata)
class CategoryOutputFeature(CategoryFeatureMixin, OutputFeature):
decoder = "classifier"
loss = {TYPE: SOFTMAX_CROSS_ENTROPY}
metric_functions = {LOSS: None, ACCURACY: None, HITS_AT_K: None}
default_validation_metric = ACCURACY
num_classes = 0
top_k = 3
def __init__(self, feature, output_features: Dict[str, OutputFeature]):
super().__init__(feature, output_features)
self.overwrite_defaults(feature)
self.decoder_obj = self.initialize_decoder(feature)
self._setup_loss()
self._setup_metrics()
def logits(self, inputs, **kwargs): # hidden
hidden = inputs[HIDDEN]
# EXPECTED SHAPES FOR RETURNED TENSORS
# logits: shape [batch_size, num_classes]
# hidden: shape [batch_size, size of final fully connected layer]
return {LOGITS: self.decoder_obj(hidden), PROJECTION_INPUT: hidden}
def create_predict_module(self) -> PredictModule:
return _CategoryPredict()
def get_prediction_set(self):
return {PREDICTIONS, PROBABILITIES, LOGITS}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.input_size])
@classmethod
def get_output_dtype(cls):
return torch.int64
@property
def output_shape(self) -> torch.Size:
return torch.Size([1])
def metric_kwargs(self):
return dict(top_k=self.top_k)
@staticmethod
def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):
output_feature["num_classes"] = feature_metadata["vocab_size"]
output_feature["top_k"] = min(output_feature["num_classes"], output_feature["top_k"])
if isinstance(output_feature[LOSS]["class_weights"], (list, tuple)):
if len(output_feature[LOSS]["class_weights"]) != output_feature["num_classes"]:
raise ValueError(
"The length of class_weights ({}) is not compatible with "
"the number of classes ({}) for feature {}. "
"Check the metadata JSON file to see the classes "
"and their order and consider there needs to be a weight "
"for the <UNK> class too.".format(
len(output_feature[LOSS]["class_weights"]),
output_feature["num_classes"],
output_feature[COLUMN],
)
)
if isinstance(output_feature[LOSS]["class_weights"], dict):
if feature_metadata["str2idx"].keys() != output_feature[LOSS]["class_weights"].keys():
raise ValueError(
"The class_weights keys ({}) are not compatible with "
"the classes ({}) of feature {}. "
"Check the metadata JSON file to see the classes "
"and consider there needs to be a weight "
"for the <UNK> class too.".format(
output_feature[LOSS]["class_weights"].keys(),
feature_metadata["str2idx"].keys(),
output_feature[COLUMN],
)
)
else:
class_weights = output_feature[LOSS]["class_weights"]
idx2str = feature_metadata["idx2str"]
class_weights_list = [class_weights[s] for s in idx2str]
output_feature[LOSS]["class_weights"] = class_weights_list
if output_feature[LOSS]["class_similarities_temperature"] > 0:
if "class_similarities" in output_feature[LOSS]:
similarities = output_feature[LOSS]["class_similarities"]
temperature = output_feature[LOSS]["class_similarities_temperature"]
curr_row = 0
first_row_length = 0
is_first_row = True
for row in similarities:
if is_first_row:
first_row_length = len(row)
is_first_row = False
curr_row += 1
else:
curr_row_length = len(row)
if curr_row_length != first_row_length:
raise ValueError(
"The length of row {} of the class_similarities "
"of {} is {}, different from the length of "
"the first row {}. All rows must have "
"the same length.".format(
curr_row, output_feature[COLUMN], curr_row_length, first_row_length
)
)
else:
curr_row += 1
all_rows_length = first_row_length
if all_rows_length != len(similarities):
raise ValueError(
"The class_similarities matrix of {} has "
"{} rows and {} columns, "
"their number must be identical.".format(
output_feature[COLUMN], len(similarities), all_rows_length
)
)
if all_rows_length != output_feature["num_classes"]:
raise ValueError(
"The size of the class_similarities matrix of {} is "
"{}, different from the number of classes ({}). "
"Check the metadata JSON file to see the classes "
"and their order and "
"consider <UNK> class too.".format(
output_feature[COLUMN], all_rows_length, output_feature["num_classes"]
)
)
similarities = np.array(similarities, dtype=np.float32)
for i in range(len(similarities)):
similarities[i, :] = softmax(similarities[i, :], temperature=temperature)
output_feature[LOSS]["class_similarities"] = similarities
else:
raise ValueError(
"class_similarities_temperature > 0, "
"but no class_similarities are provided "
"for feature {}".format(output_feature[COLUMN])
)
@staticmethod
def calculate_overall_stats(predictions, targets, train_set_metadata):
overall_stats = {}
confusion_matrix = ConfusionMatrix(targets, predictions[PREDICTIONS], labels=train_set_metadata["idx2str"])
overall_stats["confusion_matrix"] = confusion_matrix.cm.tolist()
overall_stats["overall_stats"] = confusion_matrix.stats()
overall_stats["per_class_stats"] = confusion_matrix.per_class_stats()
return overall_stats
def postprocess_predictions(
self,
predictions,
metadata,
output_directory,
backend,
):
predictions_col = f"{self.feature_name}_{PREDICTIONS}"
if predictions_col in predictions:
if "idx2str" in metadata:
predictions[predictions_col] = backend.df_engine.map_objects(
predictions[predictions_col], lambda pred: metadata["idx2str"][pred]
)
probabilities_col = f"{self.feature_name}_{PROBABILITIES}"
if probabilities_col in predictions:
prob_col = f"{self.feature_name}_{PROBABILITY}"
predictions[prob_col] = predictions[probabilities_col].map(max)
predictions[probabilities_col] = backend.df_engine.map_objects(
predictions[probabilities_col], lambda pred: pred.tolist()
)
if "idx2str" in metadata:
for i, label in enumerate(metadata["idx2str"]):
key = f"{probabilities_col}_{label}"
# Use default param to force a capture before the loop completes, see:
# https://stackoverflow.com/questions/2295290/what-do-lambda-function-closures-capture
predictions[key] = backend.df_engine.map_objects(
predictions[probabilities_col],
lambda prob, i=i: prob[i],
)
top_k_col = f"{self.feature_name}_predictions_top_k"
if top_k_col in predictions:
if "idx2str" in metadata:
predictions[top_k_col] = backend.df_engine.map_objects(
predictions[top_k_col], lambda pred_top_k: [metadata["idx2str"][pred] for pred in pred_top_k]
)
return predictions
@staticmethod
def populate_defaults(output_feature):
# If Loss is not defined, set an empty dictionary
set_default_value(output_feature, LOSS, {})
# Populate the default values for LOSS if they aren't defined already
set_default_values(
output_feature[LOSS],
{
TYPE: "softmax_cross_entropy",
"labels_smoothing": 0,
"class_weights": 1,
"robust_lambda": 0,
"confidence_penalty": 0,
"class_similarities_temperature": 0,
"weight": 1,
},
)
set_default_values(
output_feature, {"top_k": 3, "dependencies": [], "reduce_input": SUM, "reduce_dependencies": SUM}
)
@staticmethod
def create_postproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:
return _CategoryPostprocessing(metadata)
| [
"torch.Size",
"torch.softmax",
"torch.tensor",
"torch.argmax"
] | 1.9.0 | hfurkanbozkurt/ludwig | bfcbd52237c73702764e733ede4351e0146394bd |
1.7 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.networks.layers import HilbertTransform
from monai.utils import OptionalImportError
from tests.utils import SkipIfModule, SkipIfNoModule, skip_if_no_cuda
def create_expected_numpy_output(input_datum, **kwargs):
x = np.fft.fft(input_datum.cpu().numpy() if input_datum.device.type == "cuda" else input_datum.numpy(), **kwargs)
f = np.fft.fftfreq(x.shape[kwargs["axis"]])
u = np.heaviside(f, 0.5)
new_dims_before = kwargs["axis"]
new_dims_after = len(x.shape) - kwargs["axis"] - 1
for _ in range(new_dims_before):
u = np.expand_dims(u, 0)
for _ in range(new_dims_after):
u = np.expand_dims(u, -1)
ht = np.fft.ifft(x * 2 * u, axis=kwargs["axis"])
return ht
cpu = torch.device("cpu")
n_samples = 500
hann_windowed_sine = np.sin(2 * np.pi * 10 * np.linspace(0, 1, n_samples)) * np.hanning(n_samples)
# CPU TEST DATA
cpu_input_data = {}
cpu_input_data["1D"] = torch.as_tensor(hann_windowed_sine, device=cpu).unsqueeze(0).unsqueeze(0)
cpu_input_data["2D"] = (
torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=cpu).unsqueeze(0).unsqueeze(0)
)
cpu_input_data["3D"] = (
torch.as_tensor(np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=cpu)
.unsqueeze(0)
.unsqueeze(0)
)
cpu_input_data["1D 2CH"] = torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=cpu).unsqueeze(0)
cpu_input_data["2D 2CH"] = torch.as_tensor(
np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=cpu
).unsqueeze(0)
# SINGLE-CHANNEL CPU VALUE TESTS
TEST_CASE_1D_SINE_CPU = [
{}, # args (empty, so use default)
cpu_input_data["1D"], # Input data: Random 1D signal
create_expected_numpy_output(cpu_input_data["1D"], axis=2), # Expected output: FFT of signal
1e-5, # absolute tolerance
]
TEST_CASE_2D_SINE_CPU = [
{}, # args (empty, so use default)
cpu_input_data["2D"], # Input data: Random 1D signal
create_expected_numpy_output(cpu_input_data["2D"], axis=2), # Expected output: FFT of signal
1e-5, # absolute tolerance
]
TEST_CASE_3D_SINE_CPU = [
{}, # args (empty, so use default)
cpu_input_data["3D"], # Input data: Random 1D signal
create_expected_numpy_output(cpu_input_data["3D"], axis=2),
1e-5, # absolute tolerance
]
# MULTICHANNEL CPU VALUE TESTS, PROCESS ALONG FIRST SPATIAL AXIS
TEST_CASE_1D_2CH_SINE_CPU = [
{}, # args (empty, so use default)
cpu_input_data["1D 2CH"], # Input data: Random 1D signal
create_expected_numpy_output(cpu_input_data["1D 2CH"], axis=2),
1e-5, # absolute tolerance
]
TEST_CASE_2D_2CH_SINE_CPU = [
{}, # args (empty, so use default)
cpu_input_data["2D 2CH"], # Input data: Random 1D signal
create_expected_numpy_output(cpu_input_data["2D 2CH"], axis=2),
1e-5, # absolute tolerance
]
# GPU TEST DATA
if torch.cuda.is_available():
gpu = torch.device("cuda")
gpu_input_data = {}
gpu_input_data["1D"] = torch.as_tensor(hann_windowed_sine, device=gpu).unsqueeze(0).unsqueeze(0)
gpu_input_data["2D"] = (
torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=gpu).unsqueeze(0).unsqueeze(0)
)
gpu_input_data["3D"] = (
torch.as_tensor(np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=gpu)
.unsqueeze(0)
.unsqueeze(0)
)
gpu_input_data["1D 2CH"] = torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=gpu).unsqueeze(0)
gpu_input_data["2D 2CH"] = torch.as_tensor(
np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=gpu
).unsqueeze(0)
# SINGLE CHANNEL GPU VALUE TESTS
TEST_CASE_1D_SINE_GPU = [
{}, # args (empty, so use default)
gpu_input_data["1D"], # Input data: Random 1D signal
create_expected_numpy_output(gpu_input_data["1D"], axis=2), # Expected output: FFT of signal
1e-5, # absolute tolerance
]
TEST_CASE_2D_SINE_GPU = [
{}, # args (empty, so use default)
gpu_input_data["2D"], # Input data: Random 1D signal
create_expected_numpy_output(gpu_input_data["2D"], axis=2), # Expected output: FFT of signal
1e-5, # absolute tolerance
]
TEST_CASE_3D_SINE_GPU = [
{}, # args (empty, so use default)
gpu_input_data["3D"], # Input data: Random 1D signal
create_expected_numpy_output(gpu_input_data["3D"], axis=2), # Expected output: FFT of signal
1e-5, # absolute tolerance
]
# MULTICHANNEL GPU VALUE TESTS, PROCESS ALONG FIRST SPATIAL AXIS
TEST_CASE_1D_2CH_SINE_GPU = [
{}, # args (empty, so use default)
gpu_input_data["1D 2CH"], # Input data: Random 1D signal
create_expected_numpy_output(gpu_input_data["1D 2CH"], axis=2),
1e-5, # absolute tolerance
]
TEST_CASE_2D_2CH_SINE_GPU = [
{}, # args (empty, so use default)
gpu_input_data["2D 2CH"], # Input data: Random 1D signal
create_expected_numpy_output(gpu_input_data["2D 2CH"], axis=2),
1e-5, # absolute tolerance
]
# TESTS CHECKING PADDING, AXIS SELECTION ETC ARE COVERED BY test_detect_envelope.py
@SkipIfNoModule("torch.fft")
class TestHilbertTransformCPU(unittest.TestCase):
@parameterized.expand(
[
TEST_CASE_1D_SINE_CPU,
TEST_CASE_2D_SINE_CPU,
TEST_CASE_3D_SINE_CPU,
TEST_CASE_1D_2CH_SINE_CPU,
TEST_CASE_2D_2CH_SINE_CPU,
]
)
def test_value(self, arguments, image, expected_data, atol):
result = HilbertTransform(**arguments)(image)
result = result.squeeze(0).squeeze(0).numpy()
np.testing.assert_allclose(result, expected_data.squeeze(), atol=atol)
@skip_if_no_cuda
@SkipIfNoModule("torch.fft")
class TestHilbertTransformGPU(unittest.TestCase):
@parameterized.expand(
[]
if not torch.cuda.is_available()
else [
TEST_CASE_1D_SINE_GPU,
TEST_CASE_2D_SINE_GPU,
TEST_CASE_3D_SINE_GPU,
TEST_CASE_1D_2CH_SINE_GPU,
TEST_CASE_2D_2CH_SINE_GPU,
],
skip_on_empty=True,
)
def test_value(self, arguments, image, expected_data, atol):
result = HilbertTransform(**arguments)(image)
result = result.squeeze(0).squeeze(0).cpu().numpy()
np.testing.assert_allclose(result, expected_data.squeeze(), atol=atol)
@SkipIfModule("torch.fft")
class TestHilbertTransformNoFFTMod(unittest.TestCase):
def test_no_fft_module_error(self):
self.assertRaises(OptionalImportError, HilbertTransform(), torch.randn(1, 1, 10))
if __name__ == "__main__":
unittest.main()
| [
"torch.device",
"torch.cuda.is_available",
"torch.as_tensor",
"torch.randn"
] | 1.7 | function2-llx/MONAI | 4cddaa830b61b88ec78e089bb5f21e05bb1a78f4 |
1.6 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.transforms import FillHolesd
from monai.utils.enums import CommonKeys
from tests.utils import TEST_NDARRAYS, assert_allclose, clone
grid_1_raw = [[1, 1, 1], [1, 0, 1], [1, 1, 1]]
grid_2_raw = [[0, 1, 0], [1, 0, 1], [0, 1, 0]]
grid_3_raw = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
grid_4_raw = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
grid_1 = torch.tensor([grid_1_raw])
grid_2 = torch.tensor([grid_2_raw])
grid_3 = torch.tensor([grid_3_raw])
grid_4 = torch.tensor([grid_4_raw])
grid_5 = torch.tensor([[[1, 1, 1], [1, 0, 0], [1, 1, 1]]])
grid_6 = torch.tensor([[[1, 1, 2, 2, 2], [1, 0, 2, 0, 2], [1, 1, 2, 2, 2]]])
grid_7 = torch.tensor([[[1, 1, 2, 2, 2], [1, 0, 2, 2, 2], [1, 1, 2, 2, 2]]])
TEST_CASE_0 = ["enclosed_default_full_connectivity_default_applied_labels", {}, grid_1, grid_3]
TEST_CASE_1 = ["enclosed_full_connectivity_default_applied_labels", {"connectivity": 2}, grid_1, grid_3]
TEST_CASE_2 = [
"enclosed_full_connectivity_applied_labels_same_single",
{"connectivity": 2, "applied_labels": 1},
grid_1,
grid_3,
]
TEST_CASE_3 = [
"enclosed_full_connectivity_applied_labels_same_list",
{"connectivity": 2, "applied_labels": [1]},
grid_1,
grid_3,
]
TEST_CASE_4 = [
"enclosed_full_connectivity_applied_labels_other_single",
{"connectivity": 2, "applied_labels": 2},
grid_1,
grid_1,
]
TEST_CASE_5 = [
"enclosed_full_connectivity_applied_labels_other_list",
{"connectivity": 2, "applied_labels": [2]},
grid_1,
grid_1,
]
TEST_CASE_6 = [
"enclosed_full_connectivity_applied_labels_same_and_other",
{"connectivity": 2, "applied_labels": [1, 2]},
grid_1,
grid_3,
]
TEST_CASE_7 = ["enclosed_connectivity_1_default_applied_labels", {"connectivity": 1}, grid_1, grid_3]
TEST_CASE_8 = ["enclosed_connectivity_1_default_applied_labels", {"connectivity": 1}, grid_2, grid_4]
TEST_CASE_9 = ["open_full_connectivity_default_applied_labels", {"connectivity": 2}, grid_2, grid_2]
TEST_CASE_10 = ["open_to_edge_connectivity_1_default_applied_labels", {"connectivity": 1}, grid_5, grid_5]
TEST_CASE_11 = ["open_to_other_label_connectivity_1_default_applied_labels", {"connectivity": 1}, grid_6, grid_7]
TEST_CASE_12 = [
"open_to_other_label_connectivity_1_applied_labels_other",
{"connectivity": 1, "applied_labels": 1},
grid_6,
grid_6,
]
TEST_CASE_13 = [
"numpy_enclosed_default_full_connectivity_default_applied_labels",
{},
grid_1.cpu().numpy(),
grid_3.cpu().numpy(),
]
TEST_CASE_14 = [
"3D_enclosed_full_connectivity_default_applied_labels",
{"connectivity": 3},
torch.tensor([[grid_3_raw, grid_1_raw, grid_3_raw]]),
torch.tensor([[grid_3_raw, grid_3_raw, grid_3_raw]]),
]
TEST_CASE_15 = [
"3D_enclosed_connectivity_1_default_applied_labels",
{"connectivity": 1},
torch.tensor([[grid_4_raw, grid_2_raw, grid_4_raw]]),
torch.tensor([[grid_4_raw, grid_4_raw, grid_4_raw]]),
]
TEST_CASE_16 = [
"3D_open_full_connectivity_default_applied_labels",
{"connectivity": 3},
torch.tensor([[grid_4_raw, grid_2_raw, grid_4_raw]]),
torch.tensor([[grid_4_raw, grid_2_raw, grid_4_raw]]),
]
TEST_CASE_17 = [
"3D_open_to_edge_connectivity_1_default_applied_labels",
{"connectivity": 1},
torch.tensor([[grid_1_raw, grid_1_raw, grid_3_raw]]),
torch.tensor([[grid_1_raw, grid_1_raw, grid_3_raw]]),
]
TEST_CASE_18 = [
"enclosed_full_connectivity_applied_labels_with_background",
{"connectivity": 2, "applied_labels": [0, 1]},
grid_1,
grid_3,
]
TEST_CASE_19 = [
"enclosed_full_connectivity_applied_labels_only_background",
{"connectivity": 2, "applied_labels": [0]},
grid_1,
grid_1,
]
TEST_CASE_20 = [
"one-hot_enclosed_connectivity_1_default_applied_labels",
{"connectivity": 1},
torch.tensor([grid_1_raw, grid_1_raw, grid_2_raw]),
torch.tensor([grid_1_raw, grid_3_raw, grid_4_raw]),
]
TEST_CASE_21 = [
"one-hot_enclosed_connectivity_1_applied_labels_2",
{"connectivity": 1, "applied_labels": [2]},
torch.tensor([grid_1_raw, grid_1_raw, grid_2_raw]),
torch.tensor([grid_1_raw, grid_1_raw, grid_4_raw]),
]
TEST_CASE_22 = [
"one-hot_full_connectivity_applied_labels_2",
{"connectivity": 2},
torch.tensor([grid_1_raw, grid_1_raw, grid_2_raw]),
torch.tensor([grid_1_raw, grid_3_raw, grid_2_raw]),
]
VALID_CASES = [
TEST_CASE_0,
TEST_CASE_1,
TEST_CASE_2,
TEST_CASE_3,
TEST_CASE_4,
TEST_CASE_5,
TEST_CASE_6,
TEST_CASE_7,
TEST_CASE_8,
TEST_CASE_9,
TEST_CASE_10,
TEST_CASE_11,
TEST_CASE_12,
TEST_CASE_13,
TEST_CASE_14,
TEST_CASE_15,
TEST_CASE_16,
TEST_CASE_17,
TEST_CASE_18,
TEST_CASE_19,
TEST_CASE_20,
TEST_CASE_21,
TEST_CASE_22,
]
ITEST_CASE_1 = ["invalid_image_data_type", {}, [[[[1, 1, 1]]]], NotImplementedError]
INVALID_CASES = [ITEST_CASE_1]
class TestFillHoles(unittest.TestCase):
@parameterized.expand(VALID_CASES)
def test_correct_results(self, _, args, input_image, expected):
key = CommonKeys.IMAGE
converter = FillHolesd(keys=key, **args)
for p in TEST_NDARRAYS:
result = converter({key: p(clone(input_image))})[key]
assert_allclose(result, p(expected))
@parameterized.expand(INVALID_CASES)
def test_raise_exception(self, _, args, input_image, expected_error):
key = CommonKeys.IMAGE
with self.assertRaises(expected_error):
converter = FillHolesd(keys=key, **args)
if isinstance(input_image, torch.Tensor) and torch.cuda.is_available():
_ = converter({key: clone(input_image).cuda()})[key]
else:
_ = converter({key: clone(input_image)})[key]
if __name__ == "__main__":
unittest.main()
| [
"torch.cuda.is_available",
"torch.tensor"
] | 1.6 | function2-llx/MONAI | 2fef7ff5c064a9ff6b6d6b4f2323180afed99934 |
1.6 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.data import CacheDataset, DataLoader, Dataset
from monai.transforms import Compose, DataStatsd, Randomizable, SimulateDelayd
from monai.utils import set_determinism
TEST_CASE_1 = [[{"image": np.asarray([1, 2, 3])}, {"image": np.asarray([4, 5])}]]
TEST_CASE_2 = [[{"label": torch.as_tensor([[3], [2]])}, {"label": np.asarray([[1], [2]])}]]
class TestDataLoader(unittest.TestCase):
def test_values(self):
datalist = [
{"image": "spleen_19.nii.gz", "label": "spleen_label_19.nii.gz"},
{"image": "spleen_31.nii.gz", "label": "spleen_label_31.nii.gz"},
]
transform = Compose(
[
DataStatsd(keys=["image", "label"], data_shape=False, value_range=False, data_value=True),
SimulateDelayd(keys=["image", "label"], delay_time=0.1),
]
)
dataset = CacheDataset(data=datalist, transform=transform, cache_rate=0.5, cache_num=1)
n_workers = 0 if sys.platform == "win32" else 2
dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=n_workers)
for d in dataloader:
self.assertEqual(d["image"][0], "spleen_19.nii.gz")
self.assertEqual(d["image"][1], "spleen_31.nii.gz")
self.assertEqual(d["label"][0], "spleen_label_19.nii.gz")
self.assertEqual(d["label"][1], "spleen_label_31.nii.gz")
@parameterized.expand([TEST_CASE_1, TEST_CASE_2])
def test_exception(self, datalist):
dataset = Dataset(data=datalist, transform=None)
dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)
with self.assertRaisesRegex((TypeError, RuntimeError), "Collate error on the key"):
for _ in dataloader:
pass
class _RandomDataset(torch.utils.data.Dataset, Randomizable):
def __getitem__(self, index):
return self.R.randint(0, 1000, (1,))
def __len__(self):
return 8
class TestLoaderRandom(unittest.TestCase):
"""
Testing data loader working with the randomizable interface
"""
def setUp(self):
set_determinism(0)
def tearDown(self):
set_determinism(None)
def test_randomize(self):
dataset = _RandomDataset()
dataloader = DataLoader(dataset, batch_size=2, num_workers=3)
output = []
for _ in range(2):
for batch in dataloader:
output.extend(batch.data.numpy().flatten().tolist())
self.assertListEqual(output, [594, 170, 524, 778, 370, 906, 292, 589, 762, 763, 156, 886, 42, 405, 221, 166])
if __name__ == "__main__":
unittest.main()
| [
"torch.as_tensor"
] | 1.6 | function2-llx/MONAI | e0db5a564225a7cb62e7a23df97267019006302f |
1.6 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.transforms import VoteEnsemble
from tests.utils import TEST_NDARRAYS, assert_allclose
TESTS = []
for p in TEST_NDARRAYS:
# shape: [2, 1, 1]
TESTS.append(
[
{"num_classes": None},
[p(torch.tensor([[[1]], [[0]]])), p(torch.tensor([[[1]], [[0]]])), p(torch.tensor([[[0]], [[1]]]))],
p(torch.tensor([[[1.0]], [[0.0]]])),
]
)
# shape: [1, 2, 1, 1]
TESTS.append(
[
{"num_classes": None},
p(
torch.stack(
[torch.tensor([[[[1]], [[0]]]]), torch.tensor([[[[1]], [[0]]]]), torch.tensor([[[[0]], [[1]]]])]
)
),
p(torch.tensor([[[[1.0]], [[0.0]]]])),
]
)
# shape: [1, 2, 1]
TESTS.append(
[
{"num_classes": 3},
[p(torch.tensor([[[0], [2]]])), p(torch.tensor([[[0], [2]]])), p(torch.tensor([[[1], [1]]]))],
p(torch.tensor([[[0], [2]]])),
]
)
# shape: [1, 2, 1]
TESTS.append(
[
{"num_classes": 5},
[p(torch.tensor([[[0], [2]]])), p(torch.tensor([[[0], [2]]])), p(torch.tensor([[[1], [1]]]))],
p(torch.tensor([[[0], [2]]])),
]
)
# shape: [1]
TESTS.append(
[{"num_classes": 3}, [p(torch.tensor([2])), p(torch.tensor([2])), p(torch.tensor([1]))], p(torch.tensor([2]))]
)
# shape: 1
TESTS.append([{"num_classes": 3}, [p(torch.tensor(2)), p(torch.tensor(2)), p(torch.tensor(1))], p(torch.tensor(2))])
class TestVoteEnsemble(unittest.TestCase):
@parameterized.expand(TESTS)
def test_value(self, input_param, img, expected_value):
result = VoteEnsemble(**input_param)(img)
if isinstance(img, torch.Tensor):
self.assertIsInstance(result, torch.Tensor)
self.assertEqual(result.device, img.device)
assert_allclose(result, expected_value)
if __name__ == "__main__":
unittest.main()
| [
"torch.tensor"
] | 1.6 | function2-llx/MONAI | e0db5a564225a7cb62e7a23df97267019006302f |
1.7 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Iterable, Sequence
import torch
from torch.utils.data import DataLoader
from monai.config import IgniteInfo, KeysCollection
from monai.engines.utils import IterationEvents, default_metric_cmp_fn, default_prepare_batch
from monai.engines.workflow import Workflow
from monai.inferers import Inferer, SimpleInferer
from monai.networks.utils import eval_mode, train_mode
from monai.transforms import Transform
from monai.utils import ForwardMode, ensure_tuple, min_version, optional_import
from monai.utils.enums import CommonKeys as Keys
from monai.utils.module import look_up_option
if TYPE_CHECKING:
from ignite.engine import Engine, EventEnum
from ignite.metrics import Metric
else:
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
Metric, _ = optional_import("ignite.metrics", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Metric")
EventEnum, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "EventEnum")
__all__ = ["Evaluator", "SupervisedEvaluator", "EnsembleEvaluator"]
class Evaluator(Workflow):
"""
Base class for all kinds of evaluators, inherits from Workflow.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable or torch.DataLoader.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse expected data (usually `image`, `label` and other network args)
from `engine.state.batch` for every iteration, for more details please refer to:
https://pytorch.org/ignite/generated/ignite.engine.create_supervised_trainer.html.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `engine.state.batch` as inputs, return data will be stored in `engine.state.output`.
if not provided, use `self._iteration()` instead. for more details please refer to:
https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html.
postprocessing: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
metric_cmp_fn: function to compare current key metric with previous best key metric value,
it must accept 2 args (current_metric, previous_best) and return a bool result: if `True`, will update
`best_metric` and `best_metric_epoch` with current metric and epoch, default to `greater than`.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
mode: model forward mode during evaluation, should be 'eval' or 'train',
which maps to `model.eval()` or `model.train()`, default to 'eval'.
event_names: additional custom ignite events that will register to the engine.
new events can be a list of str or `ignite.engine.events.EventEnum`.
event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`.
for more details, check: https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html
#ignite.engine.engine.Engine.register_events.
decollate: whether to decollate the batch-first data to a list of data after model computation,
recommend `decollate=True` when `postprocessing` uses components from `monai.transforms`.
default to `True`.
to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for
`device`, `non_blocking`.
amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details:
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Iterable | DataLoader,
epoch_length: int | None = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Callable[[Engine, Any], Any] | None = None,
postprocessing: Transform | None = None,
key_val_metric: dict[str, Metric] | None = None,
additional_metrics: dict[str, Metric] | None = None,
metric_cmp_fn: Callable = default_metric_cmp_fn,
val_handlers: Sequence | None = None,
amp: bool = False,
mode: ForwardMode | str = ForwardMode.EVAL,
event_names: list[str | EventEnum] | None = None,
event_to_attr: dict | None = None,
decollate: bool = True,
to_kwargs: dict | None = None,
amp_kwargs: dict | None = None,
) -> None:
super().__init__(
device=device,
max_epochs=1,
data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
postprocessing=postprocessing,
key_metric=key_val_metric,
additional_metrics=additional_metrics,
metric_cmp_fn=metric_cmp_fn,
handlers=val_handlers,
amp=amp,
event_names=event_names,
event_to_attr=event_to_attr,
decollate=decollate,
to_kwargs=to_kwargs,
amp_kwargs=amp_kwargs,
)
mode = look_up_option(mode, ForwardMode)
if mode == ForwardMode.EVAL:
self.mode = eval_mode
elif mode == ForwardMode.TRAIN:
self.mode = train_mode
else:
raise ValueError(f"unsupported mode: {mode}, should be 'eval' or 'train'.")
def run(self, global_epoch: int = 1) -> None:
"""
Execute validation/evaluation based on Ignite Engine.
Args:
global_epoch: the overall epoch if during a training. evaluator engine can get it from trainer.
"""
# init env value for current validation process
self.state.max_epochs = global_epoch
self.state.epoch = global_epoch - 1
self.state.iteration = 0
super().run()
def get_validation_stats(self) -> dict[str, float]:
return {"best_validation_metric": self.state.best_metric, "best_validation_epoch": self.state.best_metric_epoch}
class SupervisedEvaluator(Evaluator):
"""
Standard supervised evaluation method with image and label(optional), inherits from evaluator and Workflow.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader.
network: network to evaluate in the evaluator, should be regular PyTorch `torch.nn.Module`.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse expected data (usually `image`, `label` and other network args)
from `engine.state.batch` for every iteration, for more details please refer to:
https://pytorch.org/ignite/generated/ignite.engine.create_supervised_trainer.html.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `engine.state.batch` as inputs, return data will be stored in `engine.state.output`.
if not provided, use `self._iteration()` instead. for more details please refer to:
https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html.
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
postprocessing: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
metric_cmp_fn: function to compare current key metric with previous best key metric value,
it must accept 2 args (current_metric, previous_best) and return a bool result: if `True`, will update
`best_metric` and `best_metric_epoch` with current metric and epoch, default to `greater than`.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
mode: model forward mode during evaluation, should be 'eval' or 'train',
which maps to `model.eval()` or `model.train()`, default to 'eval'.
event_names: additional custom ignite events that will register to the engine.
new events can be a list of str or `ignite.engine.events.EventEnum`.
event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`.
for more details, check: https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html
#ignite.engine.engine.Engine.register_events.
decollate: whether to decollate the batch-first data to a list of data after model computation,
recommend `decollate=True` when `postprocessing` uses components from `monai.transforms`.
default to `True`.
to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for
`device`, `non_blocking`.
amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details:
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Iterable | DataLoader,
network: torch.nn.Module,
epoch_length: int | None = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Callable[[Engine, Any], Any] | None = None,
inferer: Inferer | None = None,
postprocessing: Transform | None = None,
key_val_metric: dict[str, Metric] | None = None,
additional_metrics: dict[str, Metric] | None = None,
metric_cmp_fn: Callable = default_metric_cmp_fn,
val_handlers: Sequence | None = None,
amp: bool = False,
mode: ForwardMode | str = ForwardMode.EVAL,
event_names: list[str | EventEnum] | None = None,
event_to_attr: dict | None = None,
decollate: bool = True,
to_kwargs: dict | None = None,
amp_kwargs: dict | None = None,
) -> None:
super().__init__(
device=device,
val_data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
postprocessing=postprocessing,
key_val_metric=key_val_metric,
additional_metrics=additional_metrics,
metric_cmp_fn=metric_cmp_fn,
val_handlers=val_handlers,
amp=amp,
mode=mode,
event_names=event_names,
event_to_attr=event_to_attr,
decollate=decollate,
to_kwargs=to_kwargs,
amp_kwargs=amp_kwargs,
)
self.network = network
self.inferer = SimpleInferer() if inferer is None else inferer
def _iteration(self, engine: SupervisedEvaluator, batchdata: dict[str, torch.Tensor]):
"""
callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.
Return below items in a dictionary:
- IMAGE: image Tensor data for model input, already moved to device.
- LABEL: label Tensor data corresponding to the image, already moved to device.
- PRED: prediction result of model.
Args:
engine: `SupervisedEvaluator` to execute operation for an iteration.
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
Raises:
ValueError: When ``batchdata`` is None.
"""
if batchdata is None:
raise ValueError("Must provide batch data for current iteration.")
batch = engine.prepare_batch(batchdata, engine.state.device, engine.non_blocking, **engine.to_kwargs)
if len(batch) == 2:
inputs, targets = batch
args: tuple = ()
kwargs: dict = {}
else:
inputs, targets, args, kwargs = batch
# put iteration outputs into engine.state
engine.state.output = {Keys.IMAGE: inputs, Keys.LABEL: targets}
# execute forward computation
with engine.mode(engine.network):
if engine.amp:
with torch.cuda.amp.autocast(**engine.amp_kwargs):
engine.state.output[Keys.PRED] = engine.inferer(inputs, engine.network, *args, **kwargs)
else:
engine.state.output[Keys.PRED] = engine.inferer(inputs, engine.network, *args, **kwargs)
engine.fire_event(IterationEvents.FORWARD_COMPLETED)
engine.fire_event(IterationEvents.MODEL_COMPLETED)
return engine.state.output
class EnsembleEvaluator(Evaluator):
"""
Ensemble evaluation for multiple models, inherits from evaluator and Workflow.
It accepts a list of models for inference and outputs a list of predictions for further operations.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
networks: networks to evaluate in order in the evaluator, should be regular PyTorch `torch.nn.Module`.
pred_keys: the keys to store every prediction data.
the length must exactly match the number of networks.
if None, use "pred_{index}" as key corresponding to N networks, index from `0` to `N-1`.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse expected data (usually `image`, `label` and other network args)
from `engine.state.batch` for every iteration, for more details please refer to:
https://pytorch.org/ignite/generated/ignite.engine.create_supervised_trainer.html.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `engine.state.batch` as inputs, return data will be stored in `engine.state.output`.
if not provided, use `self._iteration()` instead. for more details please refer to:
https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html.
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
postprocessing: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
metric_cmp_fn: function to compare current key metric with previous best key metric value,
it must accept 2 args (current_metric, previous_best) and return a bool result: if `True`, will update
`best_metric` and `best_metric_epoch` with current metric and epoch, default to `greater than`.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
mode: model forward mode during evaluation, should be 'eval' or 'train',
which maps to `model.eval()` or `model.train()`, default to 'eval'.
event_names: additional custom ignite events that will register to the engine.
new events can be a list of str or `ignite.engine.events.EventEnum`.
event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`.
for more details, check: https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html
#ignite.engine.engine.Engine.register_events.
decollate: whether to decollate the batch-first data to a list of data after model computation,
recommend `decollate=True` when `postprocessing` uses components from `monai.transforms`.
default to `True`.
to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for
`device`, `non_blocking`.
amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details:
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Iterable | DataLoader,
networks: Sequence[torch.nn.Module],
pred_keys: KeysCollection | None = None,
epoch_length: int | None = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Callable[[Engine, Any], Any] | None = None,
inferer: Inferer | None = None,
postprocessing: Transform | None = None,
key_val_metric: dict[str, Metric] | None = None,
additional_metrics: dict[str, Metric] | None = None,
metric_cmp_fn: Callable = default_metric_cmp_fn,
val_handlers: Sequence | None = None,
amp: bool = False,
mode: ForwardMode | str = ForwardMode.EVAL,
event_names: list[str | EventEnum] | None = None,
event_to_attr: dict | None = None,
decollate: bool = True,
to_kwargs: dict | None = None,
amp_kwargs: dict | None = None,
) -> None:
super().__init__(
device=device,
val_data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
postprocessing=postprocessing,
key_val_metric=key_val_metric,
additional_metrics=additional_metrics,
metric_cmp_fn=metric_cmp_fn,
val_handlers=val_handlers,
amp=amp,
mode=mode,
event_names=event_names,
event_to_attr=event_to_attr,
decollate=decollate,
to_kwargs=to_kwargs,
amp_kwargs=amp_kwargs,
)
self.networks = ensure_tuple(networks)
self.pred_keys = (
[f"{Keys.PRED}_{i}" for i in range(len(self.networks))] if pred_keys is None else ensure_tuple(pred_keys)
)
if len(self.pred_keys) != len(self.networks):
raise ValueError("length of `pred_keys` must be same as the length of `networks`.")
self.inferer = SimpleInferer() if inferer is None else inferer
def _iteration(self, engine: EnsembleEvaluator, batchdata: dict[str, torch.Tensor]):
"""
callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.
Return below items in a dictionary:
- IMAGE: image Tensor data for model input, already moved to device.
- LABEL: label Tensor data corresponding to the image, already moved to device.
- pred_keys[0]: prediction result of network 0.
- pred_keys[1]: prediction result of network 1.
- ... ...
- pred_keys[N]: prediction result of network N.
Args:
engine: `EnsembleEvaluator` to execute operation for an iteration.
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
Raises:
ValueError: When ``batchdata`` is None.
"""
if batchdata is None:
raise ValueError("Must provide batch data for current iteration.")
batch = engine.prepare_batch(batchdata, engine.state.device, engine.non_blocking, **engine.to_kwargs)
if len(batch) == 2:
inputs, targets = batch
args: tuple = ()
kwargs: dict = {}
else:
inputs, targets, args, kwargs = batch
# put iteration outputs into engine.state
engine.state.output = {Keys.IMAGE: inputs, Keys.LABEL: targets}
for idx, network in enumerate(engine.networks):
with engine.mode(network):
if engine.amp:
with torch.cuda.amp.autocast(**engine.amp_kwargs):
if isinstance(engine.state.output, dict):
engine.state.output.update(
{engine.pred_keys[idx]: engine.inferer(inputs, network, *args, **kwargs)}
)
else:
if isinstance(engine.state.output, dict):
engine.state.output.update(
{engine.pred_keys[idx]: engine.inferer(inputs, network, *args, **kwargs)}
)
engine.fire_event(IterationEvents.FORWARD_COMPLETED)
engine.fire_event(IterationEvents.MODEL_COMPLETED)
return engine.state.output
| [
"torch.cuda.amp.autocast"
] | 1.7 | function2-llx/MONAI | 4cddaa830b61b88ec78e089bb5f21e05bb1a78f4 |
1.7 | # Copyright 2020 - 2022 -> (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Tuple, Type, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from torch.nn import LayerNorm
from monai.networks.blocks import MLPBlock as Mlp
from monai.networks.blocks import PatchEmbed, UnetOutBlock, UnetrBasicBlock, UnetrUpBlock
from monai.networks.layers import DropPath, trunc_normal_
from monai.utils import ensure_tuple_rep, optional_import
rearrange, _ = optional_import("einops", name="rearrange")
class SwinUNETR(nn.Module):
"""
Swin UNETR based on: "Hatamizadeh et al.,
Swin UNETR: Swin Transformers for Semantic Segmentation of Brain Tumors in MRI Images
<https://arxiv.org/abs/2201.01266>"
"""
def __init__(
self,
img_size: Union[Sequence[int], int],
in_channels: int,
out_channels: int,
depths: Sequence[int] = (2, 2, 2, 2),
num_heads: Sequence[int] = (3, 6, 12, 24),
feature_size: int = 24,
norm_name: Union[Tuple, str] = "instance",
drop_rate: float = 0.0,
attn_drop_rate: float = 0.0,
dropout_path_rate: float = 0.0,
normalize: bool = True,
use_checkpoint: bool = False,
spatial_dims: int = 3,
) -> None:
"""
Args:
img_size: dimension of input image.
in_channels: dimension of input channels.
out_channels: dimension of output channels.
feature_size: dimension of network feature size.
depths: number of layers in each stage.
num_heads: number of attention heads.
norm_name: feature normalization type and arguments.
drop_rate: dropout rate.
attn_drop_rate: attention dropout rate.
dropout_path_rate: drop path rate.
normalize: normalize output intermediate features in each stage.
use_checkpoint: use gradient checkpointing for reduced memory usage.
spatial_dims: number of spatial dims.
Examples::
# for 3D single channel input with size (96,96,96), 4-channel output and feature size of 48.
>>> net = SwinUNETR(img_size=(96,96,96), in_channels=1, out_channels=4, feature_size=48)
# for 3D 4-channel input with size (128,128,128), 3-channel output and (2,4,2,2) layers in each stage.
>>> net = SwinUNETR(img_size=(128,128,128), in_channels=4, out_channels=3, depths=(2,4,2,2))
# for 2D single channel input with size (96,96), 2-channel output and gradient checkpointing.
>>> net = SwinUNETR(img_size=(96,96), in_channels=3, out_channels=2, use_checkpoint=True, spatial_dims=2)
"""
super().__init__()
img_size = ensure_tuple_rep(img_size, spatial_dims)
patch_size = ensure_tuple_rep(2, spatial_dims)
window_size = ensure_tuple_rep(7, spatial_dims)
if not (spatial_dims == 2 or spatial_dims == 3):
raise ValueError("spatial dimension should be 2 or 3.")
for m, p in zip(img_size, patch_size):
for i in range(5):
if m % np.power(p, i + 1) != 0:
raise ValueError("input image size (img_size) should be divisible by stage-wise image resolution.")
if not (0 <= drop_rate <= 1):
raise ValueError("dropout rate should be between 0 and 1.")
if not (0 <= attn_drop_rate <= 1):
raise ValueError("attention dropout rate should be between 0 and 1.")
if not (0 <= dropout_path_rate <= 1):
raise ValueError("drop path rate should be between 0 and 1.")
if feature_size % 12 != 0:
raise ValueError("feature_size should be divisible by 12.")
self.normalize = normalize
self.swinViT = SwinTransformer(
in_chans=in_channels,
embed_dim=feature_size,
window_size=window_size,
patch_size=patch_size,
depths=depths,
num_heads=num_heads,
mlp_ratio=4.0,
qkv_bias=True,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=dropout_path_rate,
norm_layer=nn.LayerNorm,
use_checkpoint=use_checkpoint,
spatial_dims=spatial_dims,
)
self.encoder1 = UnetrBasicBlock(
spatial_dims=spatial_dims,
in_channels=in_channels,
out_channels=feature_size,
kernel_size=3,
stride=1,
norm_name=norm_name,
res_block=True,
)
self.encoder2 = UnetrBasicBlock(
spatial_dims=spatial_dims,
in_channels=feature_size,
out_channels=feature_size,
kernel_size=3,
stride=1,
norm_name=norm_name,
res_block=True,
)
self.encoder3 = UnetrBasicBlock(
spatial_dims=spatial_dims,
in_channels=2 * feature_size,
out_channels=2 * feature_size,
kernel_size=3,
stride=1,
norm_name=norm_name,
res_block=True,
)
self.encoder4 = UnetrBasicBlock(
spatial_dims=spatial_dims,
in_channels=4 * feature_size,
out_channels=4 * feature_size,
kernel_size=3,
stride=1,
norm_name=norm_name,
res_block=True,
)
self.encoder10 = UnetrBasicBlock(
spatial_dims=spatial_dims,
in_channels=16 * feature_size,
out_channels=16 * feature_size,
kernel_size=3,
stride=1,
norm_name=norm_name,
res_block=True,
)
self.decoder5 = UnetrUpBlock(
spatial_dims=spatial_dims,
in_channels=16 * feature_size,
out_channels=8 * feature_size,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=True,
)
self.decoder4 = UnetrUpBlock(
spatial_dims=spatial_dims,
in_channels=feature_size * 8,
out_channels=feature_size * 4,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=True,
)
self.decoder3 = UnetrUpBlock(
spatial_dims=spatial_dims,
in_channels=feature_size * 4,
out_channels=feature_size * 2,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=True,
)
self.decoder2 = UnetrUpBlock(
spatial_dims=spatial_dims,
in_channels=feature_size * 2,
out_channels=feature_size,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=True,
)
self.decoder1 = UnetrUpBlock(
spatial_dims=spatial_dims,
in_channels=feature_size,
out_channels=feature_size,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=True,
)
self.out = UnetOutBlock(
spatial_dims=spatial_dims, in_channels=feature_size, out_channels=out_channels
) # type: ignore
def load_from(self, weights):
with torch.no_grad():
self.swinViT.patch_embed.proj.weight.copy_(weights["state_dict"]["module.patch_embed.proj.weight"])
self.swinViT.patch_embed.proj.bias.copy_(weights["state_dict"]["module.patch_embed.proj.bias"])
for bname, block in self.swinViT.layers1[0].blocks.named_children():
block.load_from(weights, n_block=bname, layer="layers1")
self.swinViT.layers1[0].downsample.reduction.weight.copy_(
weights["state_dict"]["module.layers1.0.downsample.reduction.weight"]
)
self.swinViT.layers1[0].downsample.norm.weight.copy_(
weights["state_dict"]["module.layers1.0.downsample.norm.weight"]
)
self.swinViT.layers1[0].downsample.norm.bias.copy_(
weights["state_dict"]["module.layers1.0.downsample.norm.bias"]
)
for bname, block in self.swinViT.layers2[0].blocks.named_children():
block.load_from(weights, n_block=bname, layer="layers2")
self.swinViT.layers2[0].downsample.reduction.weight.copy_(
weights["state_dict"]["module.layers2.0.downsample.reduction.weight"]
)
self.swinViT.layers2[0].downsample.norm.weight.copy_(
weights["state_dict"]["module.layers2.0.downsample.norm.weight"]
)
self.swinViT.layers2[0].downsample.norm.bias.copy_(
weights["state_dict"]["module.layers2.0.downsample.norm.bias"]
)
for bname, block in self.swinViT.layers3[0].blocks.named_children():
block.load_from(weights, n_block=bname, layer="layers3")
self.swinViT.layers3[0].downsample.reduction.weight.copy_(
weights["state_dict"]["module.layers3.0.downsample.reduction.weight"]
)
self.swinViT.layers3[0].downsample.norm.weight.copy_(
weights["state_dict"]["module.layers3.0.downsample.norm.weight"]
)
self.swinViT.layers3[0].downsample.norm.bias.copy_(
weights["state_dict"]["module.layers3.0.downsample.norm.bias"]
)
for bname, block in self.swinViT.layers4[0].blocks.named_children():
block.load_from(weights, n_block=bname, layer="layers4")
self.swinViT.layers4[0].downsample.reduction.weight.copy_(
weights["state_dict"]["module.layers4.0.downsample.reduction.weight"]
)
self.swinViT.layers4[0].downsample.norm.weight.copy_(
weights["state_dict"]["module.layers4.0.downsample.norm.weight"]
)
self.swinViT.layers4[0].downsample.norm.bias.copy_(
weights["state_dict"]["module.layers4.0.downsample.norm.bias"]
)
def forward(self, x_in):
hidden_states_out = self.swinViT(x_in, self.normalize)
enc0 = self.encoder1(x_in)
enc1 = self.encoder2(hidden_states_out[0])
enc2 = self.encoder3(hidden_states_out[1])
enc3 = self.encoder4(hidden_states_out[2])
dec4 = self.encoder10(hidden_states_out[4])
dec3 = self.decoder5(dec4, hidden_states_out[3])
dec2 = self.decoder4(dec3, enc3)
dec1 = self.decoder3(dec2, enc2)
dec0 = self.decoder2(dec1, enc1)
out = self.decoder1(dec0, enc0)
logits = self.out(out)
return logits
def window_partition(x, window_size):
"""window partition operation based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
Args:
x: input tensor.
window_size: local window size.
"""
x_shape = x.size()
if len(x_shape) == 5:
b, d, h, w, c = x_shape
x = x.view(
b,
d // window_size[0],
window_size[0],
h // window_size[1],
window_size[1],
w // window_size[2],
window_size[2],
c,
)
windows = (
x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, window_size[0] * window_size[1] * window_size[2], c)
)
elif len(x_shape) == 4:
b, h, w, c = x.shape
x = x.view(b, h // window_size[0], window_size[0], w // window_size[1], window_size[1], c)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0] * window_size[1], c)
return windows
def window_reverse(windows, window_size, dims):
"""window reverse operation based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
Args:
windows: windows tensor.
window_size: local window size.
dims: dimension values.
"""
if len(dims) == 4:
b, d, h, w = dims
x = windows.view(
b,
d // window_size[0],
h // window_size[1],
w // window_size[2],
window_size[0],
window_size[1],
window_size[2],
-1,
)
x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(b, d, h, w, -1)
elif len(dims) == 3:
b, h, w = dims
x = windows.view(b, h // window_size[0], w // window_size[0], window_size[0], window_size[1], -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1)
return x
def get_window_size(x_size, window_size, shift_size=None):
"""Computing window size based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
Args:
x_size: input size.
window_size: local window size.
shift_size: window shifting size.
"""
use_window_size = list(window_size)
if shift_size is not None:
use_shift_size = list(shift_size)
for i in range(len(x_size)):
if x_size[i] <= window_size[i]:
use_window_size[i] = x_size[i]
if shift_size is not None:
use_shift_size[i] = 0
if shift_size is None:
return tuple(use_window_size)
else:
return tuple(use_window_size), tuple(use_shift_size)
class WindowAttention(nn.Module):
"""
Window based multi-head self attention module with relative position bias based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
"""
def __init__(
self,
dim: int,
num_heads: int,
window_size: Sequence[int],
qkv_bias: bool = False,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
) -> None:
"""
Args:
dim: number of feature channels.
num_heads: number of attention heads.
window_size: local window size.
qkv_bias: add a learnable bias to query, key, value.
attn_drop: attention dropout rate.
proj_drop: dropout rate of output.
"""
super().__init__()
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
mesh_args = torch.meshgrid.__kwdefaults__
if len(self.window_size) == 3:
self.relative_position_bias_table = nn.Parameter(
torch.zeros(
(2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1),
num_heads,
)
)
coords_d = torch.arange(self.window_size[0])
coords_h = torch.arange(self.window_size[1])
coords_w = torch.arange(self.window_size[2])
if mesh_args is not None:
coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w, indexing="ij"))
else:
coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 2] += self.window_size[2] - 1
relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)
relative_coords[:, :, 1] *= 2 * self.window_size[2] - 1
elif len(self.window_size) == 2:
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
)
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
if mesh_args is not None:
coords = torch.stack(torch.meshgrid(coords_h, coords_w, indexing="ij"))
else:
coords = torch.stack(torch.meshgrid(coords_h, coords_w))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=0.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask):
b, n, c = x.shape
qkv = self.qkv(x).reshape(b, n, 3, self.num_heads, c // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index[:n, :n].reshape(-1)
].reshape(n, n, -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nw = mask.shape[0]
attn = attn.view(b // nw, nw, self.num_heads, n, n) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, n, n)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(b, n, c)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
"""
Swin Transformer block based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
"""
def __init__(
self,
dim: int,
num_heads: int,
window_size: Sequence[int],
shift_size: Sequence[int],
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
drop: float = 0.0,
attn_drop: float = 0.0,
drop_path: float = 0.0,
act_layer: str = "GELU",
norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore
use_checkpoint: bool = False,
) -> None:
"""
Args:
dim: number of feature channels.
num_heads: number of attention heads.
window_size: local window size.
shift_size: window shift size.
mlp_ratio: ratio of mlp hidden dim to embedding dim.
qkv_bias: add a learnable bias to query, key, value.
drop: dropout rate.
attn_drop: attention dropout rate.
drop_path: stochastic depth rate.
act_layer: activation layer.
norm_layer: normalization layer.
use_checkpoint: use gradient checkpointing for reduced memory usage.
"""
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.use_checkpoint = use_checkpoint
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
window_size=self.window_size,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(hidden_size=dim, mlp_dim=mlp_hidden_dim, act=act_layer, dropout_rate=drop, dropout_mode="swin")
def forward_part1(self, x, mask_matrix):
x_shape = x.size()
x = self.norm1(x)
if len(x_shape) == 5:
b, d, h, w, c = x.shape
window_size, shift_size = get_window_size((d, h, w), self.window_size, self.shift_size)
pad_l = pad_t = pad_d0 = 0
pad_d1 = (window_size[0] - d % window_size[0]) % window_size[0]
pad_b = (window_size[1] - h % window_size[1]) % window_size[1]
pad_r = (window_size[2] - w % window_size[2]) % window_size[2]
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b, pad_d0, pad_d1))
_, dp, hp, wp, _ = x.shape
dims = [b, dp, hp, wp]
elif len(x_shape) == 4:
b, h, w, c = x.shape
window_size, shift_size = get_window_size((h, w), self.window_size, self.shift_size)
pad_l = pad_t = 0
pad_r = (window_size[0] - h % window_size[0]) % window_size[0]
pad_b = (window_size[1] - w % window_size[1]) % window_size[1]
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, hp, wp, _ = x.shape
dims = [b, hp, wp]
if any(i > 0 for i in shift_size):
if len(x_shape) == 5:
shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))
elif len(x_shape) == 4:
shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1]), dims=(1, 2))
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
x_windows = window_partition(shifted_x, window_size)
attn_windows = self.attn(x_windows, mask=attn_mask)
attn_windows = attn_windows.view(-1, *(window_size + (c,)))
shifted_x = window_reverse(attn_windows, window_size, dims)
if any(i > 0 for i in shift_size):
if len(x_shape) == 5:
x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3))
elif len(x_shape) == 4:
x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1]), dims=(1, 2))
else:
x = shifted_x
if len(x_shape) == 5:
if pad_d1 > 0 or pad_r > 0 or pad_b > 0:
x = x[:, :d, :h, :w, :].contiguous()
elif len(x_shape) == 4:
if pad_r > 0 or pad_b > 0:
x = x[:, :h, :w, :].contiguous()
return x
def forward_part2(self, x):
return self.drop_path(self.mlp(self.norm2(x)))
def load_from(self, weights, n_block, layer):
root = f"module.{layer}.0.blocks.{n_block}."
block_names = [
"norm1.weight",
"norm1.bias",
"attn.relative_position_bias_table",
"attn.relative_position_index",
"attn.qkv.weight",
"attn.qkv.bias",
"attn.proj.weight",
"attn.proj.bias",
"norm2.weight",
"norm2.bias",
"mlp.fc1.weight",
"mlp.fc1.bias",
"mlp.fc2.weight",
"mlp.fc2.bias",
]
with torch.no_grad():
self.norm1.weight.copy_(weights["state_dict"][root + block_names[0]])
self.norm1.bias.copy_(weights["state_dict"][root + block_names[1]])
self.attn.relative_position_bias_table.copy_(weights["state_dict"][root + block_names[2]])
self.attn.relative_position_index.copy_(weights["state_dict"][root + block_names[3]])
self.attn.qkv.weight.copy_(weights["state_dict"][root + block_names[4]])
self.attn.qkv.bias.copy_(weights["state_dict"][root + block_names[5]])
self.attn.proj.weight.copy_(weights["state_dict"][root + block_names[6]])
self.attn.proj.bias.copy_(weights["state_dict"][root + block_names[7]])
self.norm2.weight.copy_(weights["state_dict"][root + block_names[8]])
self.norm2.bias.copy_(weights["state_dict"][root + block_names[9]])
self.mlp.linear1.weight.copy_(weights["state_dict"][root + block_names[10]])
self.mlp.linear1.bias.copy_(weights["state_dict"][root + block_names[11]])
self.mlp.linear2.weight.copy_(weights["state_dict"][root + block_names[12]])
self.mlp.linear2.bias.copy_(weights["state_dict"][root + block_names[13]])
def forward(self, x, mask_matrix):
shortcut = x
if self.use_checkpoint:
x = checkpoint.checkpoint(self.forward_part1, x, mask_matrix)
else:
x = self.forward_part1(x, mask_matrix)
x = shortcut + self.drop_path(x)
if self.use_checkpoint:
x = x + checkpoint.checkpoint(self.forward_part2, x)
else:
x = x + self.forward_part2(x)
return x
class PatchMerging(nn.Module):
"""
Patch merging layer based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
"""
def __init__(
self, dim: int, norm_layer: Type[LayerNorm] = nn.LayerNorm, spatial_dims: int = 3
) -> None: # type: ignore
"""
Args:
dim: number of feature channels.
norm_layer: normalization layer.
spatial_dims: number of spatial dims.
"""
super().__init__()
self.dim = dim
if spatial_dims == 3:
self.reduction = nn.Linear(8 * dim, 2 * dim, bias=False)
self.norm = norm_layer(8 * dim)
elif spatial_dims == 2:
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
x_shape = x.size()
if len(x_shape) == 5:
b, d, h, w, c = x_shape
pad_input = (h % 2 == 1) or (w % 2 == 1) or (d % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, d % 2, 0, w % 2, 0, h % 2))
x0 = x[:, 0::2, 0::2, 0::2, :]
x1 = x[:, 1::2, 0::2, 0::2, :]
x2 = x[:, 0::2, 1::2, 0::2, :]
x3 = x[:, 0::2, 0::2, 1::2, :]
x4 = x[:, 1::2, 0::2, 1::2, :]
x5 = x[:, 0::2, 1::2, 0::2, :]
x6 = x[:, 0::2, 0::2, 1::2, :]
x7 = x[:, 1::2, 1::2, 1::2, :]
x = torch.cat([x0, x1, x2, x3, x4, x5, x6, x7], -1)
elif len(x_shape) == 4:
b, h, w, c = x_shape
pad_input = (h % 2 == 1) or (w % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, w % 2, 0, h % 2))
x0 = x[:, 0::2, 0::2, :]
x1 = x[:, 1::2, 0::2, :]
x2 = x[:, 0::2, 1::2, :]
x3 = x[:, 1::2, 1::2, :]
x = torch.cat([x0, x1, x2, x3], -1)
x = self.norm(x)
x = self.reduction(x)
return x
def compute_mask(dims, window_size, shift_size, device):
"""Computing region masks based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
Args:
dims: dimension values.
window_size: local window size.
shift_size: shift size.
device: device.
"""
cnt = 0
if len(dims) == 3:
d, h, w = dims
img_mask = torch.zeros((1, d, h, w, 1), device=device)
for d in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None):
for h in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None):
for w in slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice(-shift_size[2], None):
img_mask[:, d, h, w, :] = cnt
cnt += 1
elif len(dims) == 2:
h, w = dims
img_mask = torch.zeros((1, h, w, 1), device=device)
for h in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None):
for w in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None):
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, window_size)
mask_windows = mask_windows.squeeze(-1)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
class BasicLayer(nn.Module):
"""
Basic Swin Transformer layer in one stage based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
"""
def __init__(
self,
dim: int,
depth: int,
num_heads: int,
window_size: Sequence[int],
drop_path: list,
mlp_ratio: float = 4.0,
qkv_bias: bool = False,
drop: float = 0.0,
attn_drop: float = 0.0,
norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore
downsample: isinstance = None, # type: ignore
use_checkpoint: bool = False,
) -> None:
"""
Args:
dim: number of feature channels.
depths: number of layers in each stage.
num_heads: number of attention heads.
window_size: local window size.
drop_path: stochastic depth rate.
mlp_ratio: ratio of mlp hidden dim to embedding dim.
qkv_bias: add a learnable bias to query, key, value.
drop: dropout rate.
attn_drop: attention dropout rate.
norm_layer: normalization layer.
downsample: downsample layer at the end of the layer.
use_checkpoint: use gradient checkpointing for reduced memory usage.
"""
super().__init__()
self.window_size = window_size
self.shift_size = tuple(i // 2 for i in window_size)
self.no_shift = tuple(0 for i in window_size)
self.depth = depth
self.use_checkpoint = use_checkpoint
self.blocks = nn.ModuleList(
[
SwinTransformerBlock(
dim=dim,
num_heads=num_heads,
window_size=self.window_size,
shift_size=self.no_shift if (i % 2 == 0) else self.shift_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
)
for i in range(depth)
]
)
self.downsample = downsample
if self.downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer, spatial_dims=len(self.window_size))
def forward(self, x):
x_shape = x.size()
if len(x_shape) == 5:
b, c, d, h, w = x_shape
window_size, shift_size = get_window_size((d, h, w), self.window_size, self.shift_size)
x = rearrange(x, "b c d h w -> b d h w c")
dp = int(np.ceil(d / window_size[0])) * window_size[0]
hp = int(np.ceil(h / window_size[1])) * window_size[1]
wp = int(np.ceil(w / window_size[2])) * window_size[2]
attn_mask = compute_mask([dp, hp, wp], window_size, shift_size, x.device)
for blk in self.blocks:
x = blk(x, attn_mask)
x = x.view(b, d, h, w, -1)
if self.downsample is not None:
x = self.downsample(x)
x = rearrange(x, "b d h w c -> b c d h w")
elif len(x_shape) == 4:
b, c, h, w = x_shape
window_size, shift_size = get_window_size((h, w), self.window_size, self.shift_size)
x = rearrange(x, "b c h w -> b h w c")
hp = int(np.ceil(h / window_size[0])) * window_size[0]
wp = int(np.ceil(w / window_size[1])) * window_size[1]
attn_mask = compute_mask([hp, wp], window_size, shift_size, x.device)
for blk in self.blocks:
x = blk(x, attn_mask)
x = x.view(b, h, w, -1)
if self.downsample is not None:
x = self.downsample(x)
x = rearrange(x, "b h w c -> b c h w")
return x
class SwinTransformer(nn.Module):
"""
Swin Transformer based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
"""
def __init__(
self,
in_chans: int,
embed_dim: int,
window_size: Sequence[int],
patch_size: Sequence[int],
depths: Sequence[int],
num_heads: Sequence[int],
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
drop_rate: float = 0.0,
attn_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore
patch_norm: bool = False,
use_checkpoint: bool = False,
spatial_dims: int = 3,
) -> None:
"""
Args:
in_chans: dimension of input channels.
embed_dim: number of linear projection output channels.
window_size: local window size.
patch_size: patch size.
depths: number of layers in each stage.
num_heads: number of attention heads.
mlp_ratio: ratio of mlp hidden dim to embedding dim.
qkv_bias: add a learnable bias to query, key, value.
drop_rate: dropout rate.
attn_drop_rate: attention dropout rate.
drop_path_rate: stochastic depth rate.
norm_layer: normalization layer.
patch_norm: add normalization after patch embedding.
use_checkpoint: use gradient checkpointing for reduced memory usage.
spatial_dims: spatial dimension.
"""
super().__init__()
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.window_size = window_size
self.patch_size = patch_size
self.patch_embed = PatchEmbed(
patch_size=self.patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None, # type: ignore
spatial_dims=spatial_dims,
)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.layers1 = nn.ModuleList()
self.layers2 = nn.ModuleList()
self.layers3 = nn.ModuleList()
self.layers4 = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2**i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=self.window_size,
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
norm_layer=norm_layer,
downsample=PatchMerging,
use_checkpoint=use_checkpoint,
)
if i_layer == 0:
self.layers1.append(layer)
elif i_layer == 1:
self.layers2.append(layer)
elif i_layer == 2:
self.layers3.append(layer)
elif i_layer == 3:
self.layers4.append(layer)
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
def proj_out(self, x, normalize=False):
if normalize:
x_shape = x.size()
if len(x_shape) == 5:
n, ch, d, h, w = x_shape
x = rearrange(x, "n c d h w -> n d h w c")
x = F.layer_norm(x, [ch])
x = rearrange(x, "n d h w c -> n c d h w")
elif len(x_shape) == 4:
n, ch, h, w = x_shape
x = rearrange(x, "n c h w -> n h w c")
x = F.layer_norm(x, [ch])
x = rearrange(x, "n h w c -> n c h w")
return x
def forward(self, x, normalize=True):
x0 = self.patch_embed(x)
x0 = self.pos_drop(x0)
x0_out = self.proj_out(x0, normalize)
x1 = self.layers1[0](x0.contiguous())
x1_out = self.proj_out(x1, normalize)
x2 = self.layers2[0](x1.contiguous())
x2_out = self.proj_out(x2, normalize)
x3 = self.layers3[0](x2.contiguous())
x3_out = self.proj_out(x3, normalize)
x4 = self.layers4[0](x3.contiguous())
x4_out = self.proj_out(x4, normalize)
return [x0_out, x1_out, x2_out, x3_out, x4_out]
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.Identity",
"torch.cat",
"torch.roll",
"torch.nn.ModuleList",
"torch.nn.Softmax",
"torch.arange",
"torch.nn.functional.layer_norm",
"torch.no_grad",
"torch.meshgrid",
"torch.utils.checkpoint.checkpoint",
"torch.flatten",
"torch.nn.functional.pad"
] | 1.7 | function2-llx/MONAI | 4cddaa830b61b88ec78e089bb5f21e05bb1a78f4 |
1.6 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.engines import PrepareBatchExtraInput, SupervisedEvaluator
from tests.utils import assert_allclose
TEST_CASE_0 = [
{"extra_keys": "extra1"},
{"x": torch.tensor([1, 2]), "t1": torch.tensor([5, 6]), "t2": None, "t3": None},
]
TEST_CASE_1 = [
{"extra_keys": ["extra1", "extra3"]},
{"x": torch.tensor([1, 2]), "t1": torch.tensor([5, 6]), "t2": "test", "t3": None},
]
TEST_CASE_2 = [
{"extra_keys": {"t1": "extra2", "t2": "extra3", "t3": "extra1"}},
{"x": torch.tensor([1, 2]), "t1": 16, "t2": "test", "t3": torch.tensor([5, 6])},
]
class TestNet(torch.nn.Module):
def forward(self, x: torch.Tensor, t1=None, t2=None, t3=None):
return {"x": x, "t1": t1, "t2": t2, "t3": t3}
class TestPrepareBatchExtraInput(unittest.TestCase):
@parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2])
def test_content(self, input_args, expected_value):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataloader = [
{
"image": torch.tensor([1, 2]),
"label": torch.tensor([3, 4]),
"extra1": torch.tensor([5, 6]),
"extra2": 16,
"extra3": "test",
}
]
# set up engine
evaluator = SupervisedEvaluator(
device=device,
val_data_loader=dataloader,
epoch_length=1,
network=TestNet(),
non_blocking=True,
prepare_batch=PrepareBatchExtraInput(**input_args),
decollate=False,
)
evaluator.run()
output = evaluator.state.output
assert_allclose(output["image"], torch.tensor([1, 2], device=device))
assert_allclose(output["label"], torch.tensor([3, 4], device=device))
for k, v in output["pred"].items():
if isinstance(v, torch.Tensor):
assert_allclose(v, expected_value[k].to(device))
else:
self.assertEqual(v, expected_value[k])
if __name__ == "__main__":
unittest.main()
| [
"torch.cuda.is_available",
"torch.tensor"
] | 1.6 | function2-llx/MONAI | e0db5a564225a7cb62e7a23df97267019006302f |
1.6 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from unittest import skipUnless
import torch
from ignite.engine import Engine
from parameterized import parameterized
from monai.config import IgniteInfo
from monai.data import Dataset
from monai.handlers import GarbageCollector
from monai.utils import min_version, optional_import
Events, has_ignite = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
TEST_CASE_0 = [[0, 1, 2], "epoch"]
TEST_CASE_1 = [[0, 1, 2], "iteration"]
TEST_CASE_2 = [[0, 1, 2], Events.EPOCH_COMPLETED]
class TestHandlerGarbageCollector(unittest.TestCase):
@skipUnless(has_ignite, "Requires ignite")
@parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2])
def test_content(self, data, trigger_event):
# set up engine
gb_count_dict = {}
def _train_func(engine, batch):
# store garbage collection counts
if trigger_event == Events.EPOCH_COMPLETED or trigger_event.lower() == "epoch":
if engine.state.iteration % engine.state.epoch_length == 1:
gb_count_dict[engine.state.epoch] = gc.get_count()
elif trigger_event.lower() == "iteration":
gb_count_dict[engine.state.iteration] = gc.get_count()
engine = Engine(_train_func)
# set up testing handler
dataset = Dataset(data, transform=None)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=1)
GarbageCollector(trigger_event=trigger_event, log_level=30).attach(engine)
engine.run(data_loader, max_epochs=5)
first_count = 0
for iter, gb_count in gb_count_dict.items():
# At least one zero-generation object is collected
# self.assertGreaterEqual(gb_count[0], 0)
if iter > 1:
# Since we are collecting all objects from all generations manually at each call,
# starting from the second call, there shouldn't be any 1st and 2nd
# generation objects available to collect.
self.assertEqual(gb_count[1], first_count)
self.assertEqual(gb_count[2], first_count)
if __name__ == "__main__":
unittest.main()
| [
"torch.utils.data.DataLoader"
] | 1.6 | function2-llx/MONAI | e0db5a564225a7cb62e7a23df97267019006302f |
1.7 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
# Adapted from https://github.com/pytorch/vision/blob/main/torchvision/models/detection/_utils.py
# which has the following license...
# https://github.com/pytorch/vision/blob/main/LICENSE
#
# BSD 3-Clause License
# Copyright (c) Soumith Chintala 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This script is modified from torchvision to support N-D images,
https://github.com/pytorch/vision/blob/main/torchvision/models/detection/_utils.py
"""
import math
from typing import Sequence, Tuple, Union
import torch
from torch import Tensor
from monai.data.box_utils import COMPUTE_DTYPE, CenterSizeMode, StandardMode, convert_box_mode, is_valid_box_values
from monai.utils.module import look_up_option
def encode_boxes(gt_boxes: Tensor, proposals: Tensor, weights: Tensor) -> Tensor:
"""
Encode a set of proposals with respect to some reference ground truth (gt) boxes.
Args:
gt_boxes: gt boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
proposals: boxes to be encoded, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
weights: the weights for ``(cx, cy, w, h) or (cx,cy,cz, w,h,d)``
Return:
encoded gt, target of box regression that is used to convert proposals into gt_boxes, Nx4 or Nx6 torch tensor.
"""
if gt_boxes.shape[0] != proposals.shape[0]:
raise ValueError("gt_boxes.shape[0] should be equal to proposals.shape[0].")
spatial_dims = look_up_option(len(weights), [4, 6]) // 2
if not is_valid_box_values(gt_boxes):
raise ValueError("gt_boxes is not valid. Please check if it contains empty boxes.")
if not is_valid_box_values(proposals):
raise ValueError("proposals is not valid. Please check if it contains empty boxes.")
# implementation starts here
ex_cccwhd: Tensor = convert_box_mode(proposals, src_mode=StandardMode, dst_mode=CenterSizeMode) # type: ignore
gt_cccwhd: Tensor = convert_box_mode(gt_boxes, src_mode=StandardMode, dst_mode=CenterSizeMode) # type: ignore
targets_dxyz = (
weights[:spatial_dims].unsqueeze(0)
* (gt_cccwhd[:, :spatial_dims] - ex_cccwhd[:, :spatial_dims])
/ ex_cccwhd[:, spatial_dims:]
)
targets_dwhd = weights[spatial_dims:].unsqueeze(0) * torch.log(
gt_cccwhd[:, spatial_dims:] / ex_cccwhd[:, spatial_dims:]
)
targets = torch.cat((targets_dxyz, targets_dwhd), dim=1)
# torch.log may cause NaN or Inf
if torch.isnan(targets).any() or torch.isinf(targets).any():
raise ValueError("targets is NaN or Inf.")
return targets
class BoxCoder:
"""
This class encodes and decodes a set of bounding boxes into
the representation used for training the regressors.
Args:
weights: 4-element tuple or 6-element tuple
boxes_xform_clip: high threshold to prevent sending too large values into torch.exp()
Example:
.. code-block:: python
box_coder = BoxCoder(weights=[1., 1., 1., 1., 1., 1.])
gt_boxes = torch.tensor([[1,2,1,4,5,6],[1,3,2,7,8,9]])
proposals = gt_boxes + torch.rand(gt_boxes.shape)
rel_gt_boxes = box_coder.encode_single(gt_boxes, proposals)
gt_back = box_coder.decode_single(rel_gt_boxes, proposals)
# We expect gt_back to be equal to gt_boxes
"""
def __init__(self, weights: Tuple[float], boxes_xform_clip: Union[float, None] = None) -> None:
if boxes_xform_clip is None:
boxes_xform_clip = math.log(1000.0 / 16)
self.spatial_dims = look_up_option(len(weights), [4, 6]) // 2
self.weights = weights
self.boxes_xform_clip = boxes_xform_clip
def encode(self, gt_boxes: Sequence[Tensor], proposals: Sequence[Tensor]) -> Tuple[Tensor]:
"""
Encode a set of proposals with respect to some ground truth (gt) boxes.
Args:
gt_boxes: list of gt boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
proposals: list of boxes to be encoded, each element is Mx4 or Mx6 torch tensor.
The box mode is assumed to be ``StandardMode``
Return:
A tuple of encoded gt, target of box regression that is used to
convert proposals into gt_boxes, Nx4 or Nx6 torch tensor.
"""
boxes_per_image = [len(b) for b in gt_boxes]
# concat the lists to do computation
concat_gt_boxes = torch.cat(tuple(gt_boxes), dim=0)
concat_proposals = torch.cat(tuple(proposals), dim=0)
concat_targets = self.encode_single(concat_gt_boxes, concat_proposals)
# split to tuple
targets: Tuple[Tensor] = concat_targets.split(boxes_per_image, 0)
return targets
def encode_single(self, gt_boxes: Tensor, proposals: Tensor) -> Tensor:
"""
Encode proposals with respect to ground truth (gt) boxes.
Args:
gt_boxes: gt boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
proposals: boxes to be encoded, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
Return:
encoded gt, target of box regression that is used to convert proposals into gt_boxes, Nx4 or Nx6 torch tensor.
"""
dtype = gt_boxes.dtype
device = gt_boxes.device
weights = torch.as_tensor(self.weights, dtype=dtype, device=device)
targets = encode_boxes(gt_boxes, proposals, weights)
return targets
def decode(self, rel_codes: Tensor, reference_boxes: Sequence[Tensor]) -> Tensor:
"""
From a set of original reference_boxes and encoded relative box offsets,
Args:
rel_codes: encoded boxes, Nx4 or Nx6 torch tensor.
boxes: a list of reference boxes, each element is Mx4 or Mx6 torch tensor.
The box mode is assumed to be ``StandardMode``
Return:
decoded boxes, Nx1x4 or Nx1x6 torch tensor. The box mode will be ``StandardMode``
"""
if not isinstance(reference_boxes, Sequence) or (not isinstance(rel_codes, torch.Tensor)):
raise ValueError("Input arguments wrong type.")
boxes_per_image = [b.size(0) for b in reference_boxes]
# concat the lists to do computation
concat_boxes = torch.cat(tuple(reference_boxes), dim=0)
box_sum = 0
for val in boxes_per_image:
box_sum += val
if box_sum > 0:
rel_codes = rel_codes.reshape(box_sum, -1)
pred_boxes = self.decode_single(rel_codes, concat_boxes)
if box_sum > 0:
pred_boxes = pred_boxes.reshape(box_sum, -1, 2 * self.spatial_dims)
return pred_boxes
def decode_single(self, rel_codes: Tensor, reference_boxes: Tensor) -> Tensor:
"""
From a set of original boxes and encoded relative box offsets,
Args:
rel_codes: encoded boxes, Nx(4*num_box_reg) or Nx(6*num_box_reg) torch tensor.
reference_boxes: reference boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
Return:
decoded boxes, Nx(4*num_box_reg) or Nx(6*num_box_reg) torch tensor. The box mode will to be ``StandardMode``
"""
reference_boxes = reference_boxes.to(rel_codes.dtype)
offset = reference_boxes.shape[-1]
pred_boxes = []
boxes_cccwhd = convert_box_mode(reference_boxes, src_mode=StandardMode, dst_mode=CenterSizeMode)
for axis in range(self.spatial_dims):
whd_axis = boxes_cccwhd[:, axis + self.spatial_dims]
ctr_xyz_axis = boxes_cccwhd[:, axis]
dxyz_axis = rel_codes[:, axis::offset] / self.weights[axis]
dwhd_axis = rel_codes[:, self.spatial_dims + axis :: offset] / self.weights[axis + self.spatial_dims]
# Prevent sending too large values into torch.exp()
dwhd_axis = torch.clamp(dwhd_axis.to(COMPUTE_DTYPE), max=self.boxes_xform_clip)
pred_ctr_xyx_axis = dxyz_axis * whd_axis[:, None] + ctr_xyz_axis[:, None]
pred_whd_axis = torch.exp(dwhd_axis) * whd_axis[:, None]
pred_whd_axis = pred_whd_axis.to(dxyz_axis.dtype)
# When convert float32 to float16, Inf or Nan may occur
if torch.isnan(pred_whd_axis).any() or torch.isinf(pred_whd_axis).any():
raise ValueError("pred_whd_axis is NaN or Inf.")
# Distance from center to box's corner.
c_to_c_whd_axis = (
torch.tensor(0.5, dtype=pred_ctr_xyx_axis.dtype, device=pred_whd_axis.device) * pred_whd_axis
)
pred_boxes.append(pred_ctr_xyx_axis - c_to_c_whd_axis)
pred_boxes.append(pred_ctr_xyx_axis + c_to_c_whd_axis)
pred_boxes = pred_boxes[::2] + pred_boxes[1::2]
pred_boxes_final = torch.stack(pred_boxes, dim=2).flatten(1)
return pred_boxes_final
| [
"torch.cat",
"torch.stack",
"torch.isnan",
"torch.tensor",
"torch.isinf",
"torch.as_tensor",
"torch.log",
"torch.exp"
] | 1.7 | function2-llx/MONAI | 4cddaa830b61b88ec78e089bb5f21e05bb1a78f4 |
1.10 | import torch.nn as nn
from torch.optim import Adam
from src.models.Seq2seq import Seq2Seq
from src.models.Decoder import Decoder, OneStepDecoder, OneStepDecoderWithAttention, DecoderWithAttention
from src.models.Encoder import Encoder, EncoderAttention
from src.models.Attention import Attention
from src.data.config import *
def create_seq2seq(src_vocab, tgt_vocab):
"""
Creates encoder, decoder, defines optimizer, and loss function.
:param src_vocab: torchtext.vocab.vocab.Vocab
source language vocabulary
:param tgt_vocab: torchtext.vocab.vocab.Vocab
target language vocabulary
:return: model, optimizer, criterion
see : https://datascience.stackexchange.com/questions/10250/what-is-the-difference-between-objective-error-criterion-cost-loss-fun/10263
"""
# vocabularies size
src_vocab__len = len(src_vocab)
tgt_vocab__len = len(tgt_vocab)
# encoder model
encoder = Encoder(src_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM, N_LAYERS, DROPOUT)
# one step decoder model
one_step_decoder = OneStepDecoder(tgt_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM)
# decoder model
decoder = Decoder(one_step_decoder, device=DEVICE)
# encoder -> decoder
seq2seq = Seq2Seq(encoder, decoder)
# move the model to device
seq2seq.to(DEVICE)
# Adam optimizer
optimizer = Adam(seq2seq.parameters())
# ignore padding indices
# TGT_PAD_IDX = tgt_vocab.lookup_indices([SPECIAL_SYMBOLS[PAD_IDX]])[0]
TGT_PAD_IDX = 1
# loss function
criterion = nn.CrossEntropyLoss(ignore_index=TGT_PAD_IDX)
return seq2seq, optimizer, criterion
def create_seq2seq_with_att(src_vocab, tgt_vocab):
"""
Creates encoder, decoder, defines optimizer, and loss function with the attention mechanism
:param src_vocab: torchtext.vocab.vocab.Vocab
source language vocabulary
:param tgt_vocab: torchtext.vocab.vocab.Vocab
target language vocabulary
:return: model, optimizer, criterion
see : https://datascience.stackexchange.com/questions/10250/what-is-the-difference-between-objective-error-criterion-cost-loss-fun/10263
"""
# vocabularies size
src_vocab__len = len(src_vocab.vocab)
tgt_vocab__len = len(tgt_vocab.vocab)
# encoder model
encoder = EncoderAttention(src_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM, N_LAYERS, DROPOUT)
# attention model
attention = Attention(HIDDEN_DIM, HIDDEN_DIM)
# one step decoder model
one_step_decoder = OneStepDecoderWithAttention(tgt_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM, HIDDEN_DIM, attention)
# decoder model
decoder = DecoderWithAttention(one_step_decoder, device='cpu')
# encoder -> decoder
seq2seq = Seq2Seq(encoder, decoder)
# move the model to device
seq2seq.to('cpu')
# Adam optimizer
optimizer = Adam(seq2seq.parameters())
# ignore padding indices
# TGT_PAD_IDX = tgt_vocab.lookup_indices([SPECIAL_SYMBOLS[PAD_IDX]])[0]
TGT_PAD_IDX = 1
# loss function
criterion = nn.CrossEntropyLoss(ignore_index=TGT_PAD_IDX)
return seq2seq, optimizer, criterion
| [
"torch.nn.CrossEntropyLoss"
] | 1.10.1 | mhannani/ZinVert | d54e1ab1980ed70945c34d2ceb294d559126f623 |
1.4 | import os
import time
import torch
import numpy as np
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader, random_split
from data_utils import MyTestDataset, get_test_transforms
from models import Darknet
from utils import non_max_suppression
from conf.settings import BASE_DIR
models_path = os.path.join(BASE_DIR, "models")
images_path = os.path.join(BASE_DIR, "images")
if __name__ == "__main__":
attempt = 4
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Running on {device}...")
model = Darknet(os.path.join(BASE_DIR, "yolo_v3/config/yolov3-custom.cfg")).to(device)
model.load_state_dict(torch.load(os.path.join(models_path, "yolo_v3_4_17.pt"), map_location=device))
model2 = Darknet(os.path.join(BASE_DIR, "yolo_v3/config/yolov3-custom.cfg")).to(device)
model2.load_state_dict(torch.load(os.path.join(models_path, "yolo_v3_4_20.pt"), map_location=device))
model3 = Darknet(os.path.join(BASE_DIR, "yolo_v3/config/yolov3-custom.cfg")).to(device)
model3.load_state_dict(torch.load(os.path.join(models_path, "yolo_v3_4_25.pt"), map_location=device))
dataset = MyTestDataset(split='stage1_train', transforms=get_test_transforms(rescale_size=(416, 416)))
test_loader = DataLoader(dataset, batch_size=1, num_workers=0, shuffle=False)
model.eval()
model2.eval()
model3.eval()
for i, (image, targets) in enumerate(test_loader):
image = image[0].to(device=device)
name = targets["name"][0]
start_time = time.time()
with torch.no_grad():
outputs = model(image)
outputs2 = model2(image)
outputs3 = model3(image)
outputs = non_max_suppression(outputs, conf_thres=0.5)
outputs2 = non_max_suppression(outputs2, conf_thres=0.5)
outputs3 = non_max_suppression(outputs3, conf_thres=0.5)
elapsed_time = time.time() - start_time
if outputs[0] is not None:
boxes = outputs[0][:, 0:4]
boxes2 = outputs2[0][:, 0:4]
boxes3 = outputs3[0][:, 0:4]
else:
continue
image_copy = Image.fromarray(image.cpu().numpy()[0, 0, :, :])
if image_copy.mode != "RGB":
image_copy = image_copy.convert("RGB")
draw = ImageDraw.Draw(image_copy)
for box in boxes:
x0, y0, x1, y1 = box
draw.rectangle([(x0, y0), (x1, y1)], outline=(255, 0, 255))
image_copy2 = Image.fromarray(image.cpu().numpy()[0, 0, :, :])
if image_copy2.mode != "RGB":
image_copy2 = image_copy2.convert("RGB")
draw = ImageDraw.Draw(image_copy2)
for box in boxes2:
x0, y0, x1, y1 = box
draw.rectangle([(x0, y0), (x1, y1)], outline=(255, 0, 255))
image_copy3 = Image.fromarray(image.cpu().numpy()[0, 0, :, :])
if image_copy3.mode != "RGB":
image_copy3 = image_copy3.convert("RGB")
draw = ImageDraw.Draw(image_copy3)
for box in boxes3:
x0, y0, x1, y1 = box
draw.rectangle([(x0, y0), (x1, y1)], outline=(255, 0, 255))
# image_copy.show()
# image_copy.save(os.path.join(images_path, f"yolo_v3/{attempt}/images/{name}.png"))
print(f"{name}, time: {elapsed_time}")
fig = plt.figure(dpi=400)
ax1 = fig.add_subplot(1, 3, 1)
ax1.imshow(image_copy)
ax2 = fig.add_subplot(1, 3, 2)
ax2.imshow(image_copy2)
ax3 = fig.add_subplot(1, 3, 3)
ax3.imshow(image_copy3)
plt.show()
| [
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.4.0 | Ondrados/bachelor-thesis | 1ce6f40dfdeadbdcc31a1cce785962f9cf3145fd |
1.5 | """
=====================
Vector autoregression
=====================
This example demonstrates how one can validate :code:`deepdow` on synthetic data.
We choose to model our returns with the vector autoregression model (VAR).
This model links future returns to lagged returns with a linear
model. See [Lütkepohl2005]_ for more details. We use a stable VAR
process with 12 lags and 8 assets, that is
.. math::
r_t = A_1 r_{t-1} + ... + A_{12} r_{t-12}
For this specific task, we use the :code:`LinearNet` network. It is very similar to VAR since it tries to find a linear
model of all lagged variables. However, it also has purely deep learning components like dropout, batch
normalization and softmax allocator.
To put the performance of our network into context, we create a benchmark **VARTrue** that has access to the true
parameters of the VAR process. We create a simple investment rule of investing all resources into the asset with the
highest future returns. Additionally, we also consider other benchmarks
- equally weighted portfolio
- inverse volatility
- random allocation
References
----------
.. [Lütkepohl2005]
Lütkepohl, Helmut. New introduction to multiple time series analysis. Springer Science & Business Media, 2005.
.. warning::
Note that we are using the :code:`statsmodels` package to simulate the VAR process.
"""
import numpy as np
import torch
import matplotlib.pyplot as plt
from statsmodels.tsa.vector_ar.var_model import VARProcess, forecast
from deepdow.benchmarks import OneOverN, Benchmark, InverseVolatility, Random
from deepdow.callbacks import EarlyStoppingCallback
from deepdow.data import InRAMDataset, RigidDataLoader
from deepdow.losses import MeanReturns, SquaredWeights
from deepdow.nn import LinearNet
from deepdow.experiments import Run
class VARTrue(Benchmark):
"""Benchmark representing the ground truth return process.
Parameters
----------
process : statsmodels.tsa.vector_ar.var_model.VARProcess
The ground truth VAR process that generates the returns.
"""
def __init__(self, process):
self.process = process
def __call__(self, x):
"""Invest all money into the asset with the highest return over the horizon."""
n_samples, n_channels, lookback, n_assets = x.shape
assert n_channels == 1
x_np = x.detach().numpy() # (n_samples, n_channels, lookback, n_assets)
weights_list = [forecast(x_np[i, 0], self.process.coefs, None, 1).argmax() for i in range(n_samples)]
result = torch.zeros(n_samples, n_assets).to(x.dtype)
for i, w_ix in enumerate(weights_list):
result[i, w_ix] = 1
return result
coefs = np.load('../examples/var_coefs.npy') # (lookback, n_assets, n_assets) = (12, 8, 8)
# Parameters
lookback, _, n_assets = coefs.shape
gap, horizon = 0, 1
batch_size = 256
# Simulate returns
process = VARProcess(coefs, None, np.eye(n_assets) * 1e-5)
data = process.simulate_var(10000)
n_timesteps = len(data)
# Create features and targets
X_list, y_list = [], []
for i in range(lookback, n_timesteps - horizon - gap + 1):
X_list.append(data[i - lookback: i, :])
y_list.append(data[i + gap: i + gap + horizon, :])
X = np.stack(X_list, axis=0)[:, None, ...]
y = np.stack(y_list, axis=0)[:, None, ...]
# Setup deepdow framework
dataset = InRAMDataset(X, y)
network = LinearNet(1, lookback, n_assets, p=0.5)
dataloader = RigidDataLoader(dataset,
indices=list(range(5000)),
batch_size=batch_size,
lookback=lookback)
val_dataloaders = {'train': dataloader,
'val': RigidDataLoader(dataset,
indices=list(range(5020, 9800)),
batch_size=batch_size,
lookback=lookback)}
run = Run(network,
100 * MeanReturns(),
dataloader,
val_dataloaders=val_dataloaders,
metrics={'sqweights': SquaredWeights()},
benchmarks={'1overN': OneOverN(),
'VAR': VARTrue(process),
'Random': Random(),
'InverseVol': InverseVolatility()},
optimizer=torch.optim.Adam(network.parameters(), amsgrad=True),
callbacks=[EarlyStoppingCallback('val', 'loss')]
)
history = run.launch(40)
fig, ax = plt.subplots(1, 1)
ax.set_title('Validation loss')
per_epoch_results = history.metrics.groupby(['dataloader', 'metric', 'model', 'epoch'])['value'].mean()['val']['loss']
our = per_epoch_results['network']
our.plot(ax=ax, label='network')
ax.hlines(y=per_epoch_results['VAR'], xmin=0, xmax=len(our), color='red', label='VAR')
ax.hlines(y=per_epoch_results['1overN'], xmin=0, xmax=len(our), color='green', label='1overN')
ax.hlines(y=per_epoch_results['Random'], xmin=0, xmax=len(our), color='yellow', label='Random')
ax.hlines(y=per_epoch_results['InverseVol'], xmin=0, xmax=len(our), color='black', label='InverseVol')
plt.legend()
| [
"torch.zeros"
] | 1.5 | owen1998-liu/deepdow | a815fb8072015b79e90aef619c196ea9a9389e7f |
1.4 | import torch
import torch.nn.functional as F
import torchvision
from torchvision import transforms
import os
import numpy as np
import time
from src import utils as ut
from sklearn.metrics import confusion_matrix
import skimage
from src import wrappers
from torchvision import transforms
from haven import haven_utils as hu
class ClfWrapper(torch.nn.Module):
def __init__(self, model, opt):
super().__init__()
self.model = model
self.opt = opt
def train_on_loader(self, train_loader):
return wrappers.train_on_loader(self, train_loader)
def val_on_loader(self, val_loader):
val_monitor = ClfMonitor()
return wrappers.val_on_loader(self, val_loader, val_monitor=val_monitor)
def vis_on_loader(self, vis_loader, savedir):
return wrappers.vis_on_loader(self, vis_loader, savedir=savedir)
def train_on_batch(self, batch, **extras):
self.opt.zero_grad()
labels = batch["labels"].cuda()
logits = self.model.forward(batch["images"].cuda())
loss_clf = F.binary_cross_entropy_with_logits(logits.squeeze(),
labels.squeeze().float(), reduction="mean")
loss_clf.backward()
self.opt.step()
return {"loss_clf":loss_clf.item()}
def val_on_batch(self, batch, **extras):
pred_clf = self.predict_on_batch(batch)
return (pred_clf.cpu().numpy().ravel() != batch["labels"].numpy().ravel())
def predict_on_batch(self, batch):
images = batch["images"].cuda()
n = images.shape[0]
logits = self.model.forward(images)
return (torch.sigmoid(logits) > 0.5).float()
def vis_on_batch(self, batch, savedir_image):
self.eval()
# clf
pred_labels = float(self.predict_on_batch(batch))
img = hu.get_image(batch["image_original"], denorm="rgb")
hu.save_image(savedir_image, np.array(img))
hu.save_json(savedir_image.replace(".png",".json"),
{"pred_label":float(pred_labels), "gt_label": float(batch["labels"])})
class ClfMonitor:
def __init__(self):
self.corrects = 0
self.n_samples = 0
def add(self, corrects):
self.corrects += corrects.sum()
self.n_samples += corrects.shape[0]
def get_avg_score(self):
return {"val_clf": self.corrects/ self.n_samples} | [
"torch.sigmoid"
] | 1.4.0 | AliKhoda/DeepFish | 6769e83ab0b586e49f48e28f70607d33b5c36718 |
0.4 | from torch import nn
from torch.autograd import Variable
import torch
from utils import score, BCELoss
import numpy as np
UNK_IDX = 0
EOS_IDX = 2
class SigmoidSiameseRNN(nn.Module):
def __init__(self, config, data_config):
super(SigmoidSiameseRNN, self).__init__()
self.mode = config['mode']
self.l = self.mode[0] + 'len'
self.vocab_size = data_config[self.mode+'_size']
self.embed_size = config['embed_size']
self.hidden_size = config['hidden_size']
self.num_layers = config['num_layers']
self.bidirectional = config['bidirectional']
self.pos_weight = config['pos_weight']
self.config = config
self.data_config = data_config
self.embed = nn.Embedding(self.vocab_size, self.embed_size, padding_idx=EOS_IDX)
self.rnn = nn.LSTM(input_size=self.embed_size, hidden_size=self.hidden_size, \
num_layers=self.num_layers, batch_first=True, dropout=0.2)
self.rnn_rvs = nn.LSTM(input_size=self.embed_size, hidden_size=self.hidden_size, \
num_layers=self.num_layers, batch_first=True, dropout=0.2)
self.dropout = nn.Dropout(config['dropout'])
self.dropout2 = nn.Dropout(config['dropout2'])
self.linear_in_size = self.hidden_size
self.lstm_size = self.hidden_size
if self.bidirectional:
self.lstm_size *= 2
self.linear_in_size *= 2
if config['sim_fun'] == 'dense+':
self.linear_in_size = config['plus_size']
self.linear_in_size *= 2
if config['sim_fun'] in ['dense', 'dense+']:
self.linear_in_size = self.linear_in_size + 7 + 124 #similarity:5; len:4->2; word_bool:124
self.linear2_in_size = config['l1_size']
#self.linear3_in_size = config['l2_size']
self.linear = nn.Linear(self.linear_in_size, self.linear2_in_size)
self.linear2 = nn.Linear(self.linear2_in_size, 1)
#self.linear3 = nn.Linear(self.linear3_in_size, 1)
if config['sim_fun'] == 'dense+':
self.dense_plus = nn.Linear(self.lstm_size, config['plus_size'])
if self.config['sim_fun'] == 'dense+':
self.bn = nn.BatchNorm1d(config['plus_size'])
else:
self.bn = nn.BatchNorm1d(self.lstm_size)
self.bn_feats = nn.BatchNorm1d(self.linear_in_size)
self.bn2 = nn.BatchNorm1d(self.linear2_in_size)
self.softmax = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.selu = nn.SELU()
self.prelu = nn.PReLU()
self.BCELoss = BCELoss
self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.parameters()), lr=0.001)
self._init_weights()
def _init_weights(self):
nn.init.normal_(self.embed.weight[1:])
nn.init.xavier_normal_(self.linear.weight)
#nn.init.xavier_normal_(self.linear2.weight)
init_fun = nn.init.orthogonal_
for i in range(self.num_layers):
for j in range(4):
init_fun(getattr(self.rnn, 'weight_ih_l{0}'.format(i))[j*self.hidden_size:(j+1)*self.hidden_size])
init_fun(getattr(self.rnn, 'weight_hh_l{0}'.format(i))[j*self.hidden_size:(j+1)*self.hidden_size])
if self.bidirectional:
init_fun(getattr(self.rnn_rvs, 'weight_ih_l{0}'.format(i))[j*self.hidden_size:(j+1)*self.hidden_size])
init_fun(getattr(self.rnn_rvs, 'weight_hh_l{0}'.format(i))[j*self.hidden_size:(j+1)*self.hidden_size])
getattr(self.rnn, 'bias_ih_l{0}'.format(i))[self.hidden_size:2*self.hidden_size].data.fill_(1.)
getattr(self.rnn, 'bias_hh_l{0}'.format(i))[self.hidden_size:2*self.hidden_size].data.fill_(1.)
if self.bidirectional:
getattr(self.rnn_rvs, 'bias_ih_l{0}'.format(i))[self.hidden_size:2*self.hidden_size].data.fill_(1.)
getattr(self.rnn_rvs, 'bias_hh_l{0}'.format(i))[self.hidden_size:2*self.hidden_size].data.fill_(1.)
if self.config['sim_fun'] == 'dense+':
nn.init.xavier_normal_(self.dense_plus.weight)
def forward(self, data):
batch_size = data['s1_char'].size()[0]
row_idx = torch.arange(0, batch_size).long()
s1_embed = self.embed(data['s1_'+self.mode])
s2_embed = self.embed(data['s2_'+self.mode])
s1_embed = self.dropout(s1_embed)
s2_embed = self.dropout(s2_embed)
s1_out, s1_hidden = self.rnn(s1_embed)
s2_out, s2_hidden = self.rnn(s2_embed)
if self.config['representation'] == 'last': # last hidden state
s1_out = torch.squeeze(s1_out[row_idx, data['s1_'+self.l]-1, :], 1)
s2_out = torch.squeeze(s2_out[row_idx, data['s2_'+self.l]-1, :], 1)
elif self.config['representation'] == 'avg': # average of all hidden states
s1_outs = []
s2_outs = []
for i in range(batch_size):
s1_outs.append(torch.mean(s1_out[i][:data['s1_'+self.l][i]], dim=0))
s2_outs.append(torch.mean(s2_out[i][:data['s2_'+self.l][i]], dim=0))
s1_outs = torch.stack(s1_outs)
s2_outs = torch.stack(s2_outs)
elif self.config['representation'] == 'max':
s1_out, _ = torch.max(s1_out, 1)
s2_out, _ = torch.max(s2_out, 1)
if self.bidirectional:
s1_embed_rvs = self.embed(data['s1_'+self.mode+'_rvs'])
s2_embed_rvs = self.embed(data['s2_'+self.mode+'_rvs'])
s1_embed_rvs = self.dropout(s1_embed_rvs)
s2_embed_rvs = self.dropout(s2_embed_rvs)
s1_out_rvs, _ = self.rnn_rvs(s1_embed_rvs)
s2_out_rvs, _ = self.rnn_rvs(s2_embed_rvs)
if self.config['representation'] == 'last': # last hidden state
s1_out_rvs = torch.squeeze(s1_out_rvs[row_idx, data['s1_'+self.l]-1, :], 1)
s2_out_rvs = torch.squeeze(s2_out_rvs[row_idx, data['s2_'+self.l]-1, :], 1)
s1_outs = torch.cat((s1_out, s1_out_rvs), dim=1)
s2_outs = torch.cat((s2_out, s2_out_rvs), dim=1)
elif self.config['representation'] == 'avg': # average of all hidden states
s1_outs_rvs = []
s2_outs_rvs = []
for i in range(batch_size):
s1_outs_rvs.append(torch.mean(s1_out_rvs[i][:data['s1_'+self.l][i]], dim=0))
s2_outs_rvs.append(torch.mean(s2_out_rvs[i][:data['s2_'+self.l][i]], dim=0))
s1_outs = torch.cat((torch.stack(s1_outs_rvs), s1_outs), dim=1)
s2_outs = torch.cat((torch.stack(s2_outs_rvs), s2_outs), dim=1)
elif self.config['representation'] == 'max':
s1_out_rvs, _ = torch.max(s1_out_rvs, 1)
s2_out_rvs, _ = torch.max(s2_out_rvs, 1)
s1_outs = torch.cat((s1_out, s1_out_rvs), dim=1)
s2_outs = torch.cat((s2_out, s2_out_rvs), dim=1)
if self.config['sim_fun'] == 'cosine':
out = nn.functional.cosine_similarity(s1_outs, s2_outs)
elif self.config['sim_fun'] == 'cosine+':
pass
elif self.config['sim_fun'] == 'exp':
out = torch.exp(torch.neg(torch.norm(s1_outs-s2_outs, p=1, dim=1)))
elif self.config['sim_fun'] == 'gesd':
out = torch.rsqrt(torch.norm(s1_outs-s2_outs, p=2, dim=1))
out = out * (1./ (1.+torch.exp(-1*(torch.bmm(s1_outs.unsqueeze(1), s2_outs.unsqueeze(2)).squeeze()+1.))))
elif self.config['sim_fun'] in ['dense', 'dense+']:
if self.config['sim_fun'] == 'dense+':
s1_outs = self.dropout2(s1_outs)
s2_outs = self.dropout2(s2_outs)
s1_outs = self.dense_plus(s1_outs)
s2_outs = self.dense_plus(s2_outs)
# BN
#s1_outs = self.bn(s1_outs)
#s2_outs = self.bn(s2_outs)
s1_outs = self.tanh(s1_outs)
s2_outs = self.tanh(s2_outs)
sfeats = self.sfeats(data)
pair_feats = self.pair_feats(data)
#feats = torch.cat(((s1_outs-s2_outs)*(s1_outs-s2_outs), s1_outs * s2_outs, sfeats, pair_feats), dim=1)
feats = torch.cat((s1_outs, s2_outs, (s1_outs-s2_outs)*(s1_outs-s2_outs), s1_outs * s2_outs, sfeats, pair_feats), dim=1)
feats = self.bn_feats(feats)
#feats = self.dropout2(feats)
out1 = self.linear(feats)
out1 = self.selu(out1)
out1 = self.bn2(out1)
out1 = self.dropout2(out1)
#out2 = self.dropout2(self.prelu(self.linear2(out1)))
out = torch.squeeze(self.linear2(out1), 1)
return out
def sfeats(self, data):
""" Sentence level features """
s1_feats = data['s1_feats'].type(torch.FloatTensor)
s2_feats = data['s2_feats'].type(torch.FloatTensor)
feats = torch.abs(s1_feats-s2_feats).float()
if self.config['use_cuda']:
feats = feats.cuda(self.config['cuda_num'])
return feats
def pair_feats(self, data):
feats = data['pair_feats']
if self.config['use_cuda']:
feats = feats.cuda(self.config['cuda_num'])
return feats
def contrastive_loss(self, sims, labels, margin=0.3):
"""
Args:
sims: similarity between two sentences
labels: 1D tensor of 0 or 1
margin: max(sim-margin, 0)
"""
batch_size = labels.size()[0]
if len(sims.size()) == 0:
sims = torch.unsqueeze(sims, dim=0)
loss = torch.tensor(0.)
if self.config['use_cuda']:
loss = loss.cuda(self.config['cuda_num'])
for i, l in enumerate(labels):
loss += l*(1-sims[i])*(1-sims[i])*self.config['pos_weight']
if sims[i] > margin:
loss += (1-l)*sims[i] * sims[i]
loss = loss/batch_size
return loss
def load_vectors(self, char=None, word=None):
print("Use pretrained embedding")
if char is not None:
self.embed.weight = nn.Parameter(torch.FloatTensor(char))
if word is not None:
self.embed.weight = nn.Parameter(torch.FloatTensor(word))
def get_proba(self, out):
if self.config['sim_fun'] in ['dense', 'dense+']:
sim = self.tanh(out)
proba = self.sigmoid(out)
elif self.config['sim_fun'] == 'gesd':
sim = out
proba = out
else:
sim = out
proba = sim/2.+0.5
return sim, proba
def train_step(self, data):
out = self.forward(data)
sim, proba = self.get_proba(out)
# constractive loss
loss = 0.
if 'ce' in self.config['loss']:
loss += self.config['ce_alpha'] * self.BCELoss(proba, data['target'], [1., self.pos_weight])
if 'cl' in self.config['loss']:
loss += self.contrastive_loss(proba, data['target'], margin=self.config['cl_margin'])
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def evaluate(self, data):
out = self.forward(data)
sim, proba = self.get_proba(out)
loss = 0.
if 'ce' in self.config['loss']:
loss += self.config['ce_alpha'] * self.BCELoss(proba, data['target'], [1., self.pos_weight])
if 'cl' in self.config['loss']:
loss += self.contrastive_loss(proba, data['target'], margin=self.config['cl_margin'])
return proba.tolist(), data['label'].tolist(), loss.item()
def test(self, data):
out = self.forward(data)
sim, proba = self.get_proba(out)
pred = proba.item()
return pred, data['sid'].item()
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.SELU",
"torch.nn.LSTM",
"torch.stack",
"torch.squeeze",
"torch.nn.Softmax",
"torch.norm",
"torch.FloatTensor",
"torch.unsqueeze",
"torch.abs",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.init.xavier_normal_",
"torch.max",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.functional.cosine_similarity",
"torch.mean",
"torch.nn.Dropout",
"torch.arange",
"torch.nn.BatchNorm1d",
"torch.nn.PReLU",
"torch.nn.Embedding"
] | 0.4.0 | alanwang93/ATEC2018-NLP-PyTorch | 8e00c6af1d3e1db7ab4433a0587784e45f830347 |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.utils.data._utils.collate import default_collate
from flash.core.data.io.input import DataKeys
from flash.core.data.io.input_transform import InputTransform
from flash.core.data.transforms import ApplyToKeys, merge_transforms
from flash.core.utilities.imports import _TORCHAUDIO_AVAILABLE, _TORCHVISION_AVAILABLE
if _TORCHVISION_AVAILABLE:
from torchvision import transforms as T
if _TORCHAUDIO_AVAILABLE:
from torchaudio import transforms as TAudio
def default_transforms(spectrogram_size: Tuple[int, int]) -> Dict[str, Callable]:
"""The default transforms for audio classification for spectrograms: resize the spectrogram, convert the
spectrogram and target to a tensor, and collate the batch."""
return {
"per_sample_transform": nn.Sequential(
ApplyToKeys(DataKeys.INPUT, T.Compose([T.ToTensor(), T.Resize(spectrogram_size)])),
ApplyToKeys(DataKeys.TARGET, torch.as_tensor),
),
"collate": default_collate,
}
def train_default_transforms(
spectrogram_size: Tuple[int, int], time_mask_param: Optional[int], freq_mask_param: Optional[int]
) -> Dict[str, Callable]:
"""During training we apply the default transforms with optional ``TimeMasking`` and ``Frequency Masking``."""
augs = []
if time_mask_param is not None:
augs.append(ApplyToKeys(DataKeys.INPUT, TAudio.TimeMasking(time_mask_param=time_mask_param)))
if freq_mask_param is not None:
augs.append(ApplyToKeys(DataKeys.INPUT, TAudio.FrequencyMasking(freq_mask_param=freq_mask_param)))
if len(augs) > 0:
return merge_transforms(default_transforms(spectrogram_size), {"per_sample_transform": nn.Sequential(*augs)})
return default_transforms(spectrogram_size)
@dataclass
class AudioClassificationInputTransform(InputTransform):
spectrogram_size: Tuple[int, int] = (128, 128)
time_mask_param: Optional[int] = None
freq_mask_param: Optional[int] = None
def train_input_per_sample_transform(self) -> Callable:
transforms = []
if self.time_mask_param is not None:
transforms.append(TAudio.TimeMasking(time_mask_param=self.time_mask_param))
if self.freq_mask_param is not None:
transforms.append(TAudio.FrequencyMasking(freq_mask_param=self.freq_mask_param))
transforms += [T.ToTensor(), T.Resize(self.spectrogram_size)]
return T.Compose(transforms)
def input_per_sample_transform(self) -> Callable:
return T.Compose([T.ToTensor(), T.Resize(self.spectrogram_size)])
def target_per_sample_transform(self) -> Callable:
return torch.as_tensor
| [
"torch.nn.Sequential"
] | 1.7.1 | MikeTrizna/lightning-flash | 8d68c32a20d5910a255b6fc9ef6851b091cb6ed6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.