version
stringclasses 21
values | code
stringlengths 225
174k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 10
107
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.4 | import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from typing import Any, Dict, List, Type, Union, Optional
from tianshou.policy import PGPolicy
from tianshou.data import Batch, ReplayBuffer, to_torch_as, to_numpy
class A2CPolicy(PGPolicy):
"""Implementation of Synchronous Advantage Actor-Critic. arXiv:1602.01783.
:param torch.nn.Module actor: the actor network following the rules in
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
:param torch.nn.Module critic: the critic network. (s -> V(s))
:param torch.optim.Optimizer optim: the optimizer for actor and critic
network.
:param dist_fn: distribution class for computing the action.
:type dist_fn: Type[torch.distributions.Distribution]
:param float discount_factor: in [0, 1]. Default to 0.99.
:param float vf_coef: weight for value loss. Default to 0.5.
:param float ent_coef: weight for entropy loss. Default to 0.01.
:param float max_grad_norm: clipping gradients in back propagation.
Default to None.
:param float gae_lambda: in [0, 1], param for Generalized Advantage
Estimation. Default to 0.95.
:param bool reward_normalization: normalize the reward to Normal(0, 1).
Default to False.
:param int max_batchsize: the maximum size of the batch when computing GAE,
depends on the size of available memory and the memory cost of the
model; should be as large as possible within the memory constraint.
Default to 256.
.. seealso::
Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed
explanation.
"""
def __init__(
self,
actor: torch.nn.Module,
critic: torch.nn.Module,
optim: torch.optim.Optimizer,
dist_fn: Type[torch.distributions.Distribution],
discount_factor: float = 0.99,
vf_coef: float = 0.5,
ent_coef: float = 0.01,
max_grad_norm: Optional[float] = None,
gae_lambda: float = 0.95,
reward_normalization: bool = False,
max_batchsize: int = 256,
**kwargs: Any
) -> None:
super().__init__(None, optim, dist_fn, discount_factor, **kwargs)
self.actor = actor
self.critic = critic
assert 0.0 <= gae_lambda <= 1.0, "GAE lambda should be in [0, 1]."
self._lambda = gae_lambda
self._weight_vf = vf_coef
self._weight_ent = ent_coef
self._grad_norm = max_grad_norm
self._batch = max_batchsize
self._rew_norm = reward_normalization
def process_fn(
self, batch: Batch, buffer: ReplayBuffer, indice: np.ndarray
) -> Batch:
if self._lambda in [0.0, 1.0]:
return self.compute_episodic_return(
batch, buffer, indice,
None, gamma=self._gamma, gae_lambda=self._lambda)
v_ = []
with torch.no_grad():
for b in batch.split(self._batch, shuffle=False, merge_last=True):
v_.append(to_numpy(self.critic(b.obs_next)))
v_ = np.concatenate(v_, axis=0)
return self.compute_episodic_return(
batch, buffer, indice, v_,
gamma=self._gamma, gae_lambda=self._lambda, rew_norm=self._rew_norm)
def forward(
self,
batch: Batch,
state: Optional[Union[dict, Batch, np.ndarray]] = None,
**kwargs: Any
) -> Batch:
"""Compute action over the given batch data.
:return: A :class:`~tianshou.data.Batch` which has 4 keys:
* ``act`` the action.
* ``logits`` the network's raw output.
* ``dist`` the action distribution.
* ``state`` the hidden state.
.. seealso::
Please refer to :meth:`~tianshou.policy.BasePolicy.forward` for
more detailed explanation.
"""
logits, h = self.actor(batch.obs, state=state, info=batch.info)
if isinstance(logits, tuple):
dist = self.dist_fn(*logits)
else:
dist = self.dist_fn(logits)
act = dist.sample()
return Batch(logits=logits, act=act, state=h, dist=dist)
def learn( # type: ignore
self, batch: Batch, batch_size: int, repeat: int, **kwargs: Any
) -> Dict[str, List[float]]:
losses, actor_losses, vf_losses, ent_losses = [], [], [], []
for _ in range(repeat):
for b in batch.split(batch_size, merge_last=True):
self.optim.zero_grad()
dist = self(b).dist
v = self.critic(b.obs).flatten()
a = to_torch_as(b.act, v)
r = to_torch_as(b.returns, v)
log_prob = dist.log_prob(a).reshape(len(r), -1).transpose(0, 1)
a_loss = -(log_prob * (r - v).detach()).mean()
vf_loss = F.mse_loss(r, v) # type: ignore
ent_loss = dist.entropy().mean()
loss = a_loss + self._weight_vf * vf_loss - self._weight_ent * ent_loss
loss.backward()
if self._grad_norm is not None:
nn.utils.clip_grad_norm_(
list(self.actor.parameters()) + list(self.critic.parameters()),
max_norm=self._grad_norm,
)
self.optim.step()
actor_losses.append(a_loss.item())
vf_losses.append(vf_loss.item())
ent_losses.append(ent_loss.item())
losses.append(loss.item())
return {
"loss": losses,
"loss/actor": actor_losses,
"loss/vf": vf_losses,
"loss/ent": ent_losses,
}
| [
"torch.no_grad",
"torch.nn.functional.mse_loss"
] | 1.4.0 | Lanxiaozhi/tianshou | 0fa3f4b7a256780448b7dcdbdbeb9daf7944f1d5 |
1.6 | import torch
from dexpression_pytorch.cnn_model.dexpression import Dexpression
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def initialize():
"""
Loads model parameters into cuda.
Returns
-------
model : object
The convolutional neural network to be trained.
"""
model = Dexpression()
model = model.to(device)
return model
| [
"torch.cuda.is_available"
] | 1.6.0 | rdgozum/dexpression-pytorch | 6aac1fffee31062afda5fb1403f328d9c2502137 |
1.2 |
# Adopted from
# https://github.com/pytorch/fairseq/blob/master/fairseq/distributed_utils.py
import pickle
import torch
MAX_SIZE_LIMIT = 65533
BYTE_SIZE = 256
def enc_obj2bytes(obj, max_size=4094):
"""
Encode Python objects to PyTorch byte tensors
"""
assert max_size <= MAX_SIZE_LIMIT
byte_tensor = torch.zeros(max_size, dtype=torch.uint8)
obj_enc = pickle.dumps(obj)
obj_size = len(obj_enc)
if obj_size > max_size:
raise Exception(
'objects too large: object size {}, max size {}'.format(
obj_size, max_size
)
)
byte_tensor[0] = obj_size // 256
byte_tensor[1] = obj_size % 256
byte_tensor[2:2+obj_size] = torch.ByteTensor(list(obj_enc))
return byte_tensor
def dec_bytes2obj(byte_tensor, max_size=4094):
"""
Decode PyTorch byte tensors to Python objects
"""
assert max_size <= MAX_SIZE_LIMIT
obj_size = byte_tensor[0].item() * 256 + byte_tensor[1].item()
obj_enc = bytes(byte_tensor[2:2+obj_size].tolist())
obj = pickle.loads(obj_enc)
return obj
if __name__ == '__main__':
test_obj = [1, '2', {3: 4}, [5]]
test_obj_bytes = enc_obj2bytes(test_obj)
test_obj_dec = dec_bytes2obj(test_obj_bytes)
print(test_obj_dec == test_obj)
| [
"torch.zeros"
] | 1.2 | caodoanh2001/uit-mmf | 60359f6083b89b442c383dc7eee888e7fbf0c65f |
1.0 | # Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of building XLNet language model for sample generation.
"""
import argparse
import torch
import texar.torch as tx
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, default=None,
help="Checkpoint to load model weights from.")
parser.add_argument("--pretrained-model-name", type=str,
default="xlnet-large-cased",
help="The pre-trained model to load selected in the list "
"of: `xlnet-base-cased`, `xlnet-large-cased`.")
parser.add_argument('--seed', type=int, default=None, help="Random seed.")
parser.add_argument('--nsamples', type=int, default=1,
help="Total number of samples to generate. Used in "
"non-interactive mode.")
parser.add_argument('--batch-size', type=int, default=1,
help="The batch size of input.")
parser.add_argument('--max-decoding-length', type=int, default=100,
help="The maximun length of generated text.")
parser.add_argument('--temperature', type=float, default=0.7,
help="Softmax temperature for top-k sample decoding. Must "
"be strictly greater than 0. Defaults to 0.7.")
parser.add_argument('--top-k', type=int, default=40,
help="The number of top most likely candidates to choose "
"from at each step. This is use "
"TopKSampleEmbeddingHelper for decoding. Ignored if "
"'p' is given.")
parser.add_argument('--top-p', type=float, default=None,
help="Select tokens with cumulative probability of at most "
"'top-p' when arranged in decreasing order. This "
"will use TopPSampleEmbeddingHelper for decoding.")
parser.add_argument('--interactive', action='store_true',
help="Interactive mode or not.")
args = parser.parse_args()
def main() -> None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = tx.modules.XLNetDecoder(
pretrained_model_name=args.pretrained_model_name)
if args.checkpoint is not None:
model.load_state_dict(torch.load(args.checkpoint, map_location=device))
print(f"Loaded checkpoint from {args.checkpoint}")
model = model.to(device)
tokenizer = tx.data.XLNetTokenizer(
pretrained_model_name=args.pretrained_model_name)
# A lengthy padding text used to workaround lack of context for short
# prompts. Refer to https://github.com/rusiaaman/XLNet-gen for the rationale
# behind this.
pad_txt = """
Texar-PyTorch is an open-source toolkit based on PyTorch, aiming to
support a broad set of machine learning, especially text generation
tasks, such as machine translation, dialog, summarization, content
manipulation, language modeling, and so on. Texar is designed for both
researchers and practitioners for fast prototyping and
experimentation.
With the design goals of modularity, versatility, and extensibility in
mind, Texar extracts the common patterns underlying the diverse tasks
and methodologies, creates a library of highly reusable modules and
functionalities, and facilitates arbitrary model architectures and
algorithmic paradigms. """
pad_ids = tokenizer.map_text_to_id(pad_txt)
eod_id = tokenizer.map_token_to_id("<eod>")
pad_ids.append(eod_id)
def split_by(xs, y):
p = 0
for idx, x in enumerate(xs):
if x == y:
if idx - p > 0:
yield xs[p:idx]
p = idx + 1
if len(xs) - p > 0:
yield xs[p:]
@torch.no_grad()
def sample(text: str, length: int = 100, n_samples=3, **kwargs):
model.eval()
text = text.replace("\n", "<eop>")
tokens = pad_ids + tokenizer.map_text_to_id(text)
tokens = torch.tensor(tokens, device=device).expand(n_samples, -1)
if args.top_p:
kwargs["p"] = args.top_p
decode_output, _ = model(
start_tokens=tokens,
end_token=eod_id,
max_decoding_length=length,
print_steps=True,
helper_type=tx.modules.TopPSampleEmbeddingHelper,
**kwargs)
else:
kwargs["top_k"] = args.top_k
decode_output, _ = model(
start_tokens=tokens,
end_token=eod_id,
max_decoding_length=length,
print_steps=True,
helper_type=tx.modules.TopKSampleEmbeddingHelper,
**kwargs)
decode_samples = decode_output.sample_id.tolist()
for idx, sample_tokens in enumerate(decode_samples):
print(f"=== Sample {idx} ===")
output = "\n".join(tokenizer.map_id_to_text(xs) for xs in split_by(
sample_tokens, tokenizer.map_token_to_id("<eop>")))
print(output)
nsamples = args.nsamples
batch_size = args.batch_size
max_decoding_length = args.max_decoding_length
assert nsamples % batch_size == 0, (
"nsamples must be dividable by batch_size")
if args.interactive:
while True:
try:
raw_text = input("Model input >>> ")
while not raw_text:
print('Input should not be empty!')
raw_text = input("Model input >>> ")
sample(text=raw_text, length=max_decoding_length,
n_samples=batch_size)
except EOFError:
print("EOF entered, quitting.")
exit(0)
else:
# Generate samples from scratch
for _ in range(nsamples // batch_size):
for _ in range(args.batch_size):
sample(text="<BOS>", length=max_decoding_length,
n_samples=batch_size)
if __name__ == '__main__':
main()
| [
"torch.no_grad",
"torch.cuda.is_available",
"torch.tensor",
"torch.load"
] | 1.0.0 | ZhitingHu/texar-pytorch | 72ea115013ced8a5a2b004eacf6271184d3572a8 |
1.5 | # -*- coding: utf-8 -*-
import unittest
from nose.tools import raises
import torch
from kraken.lib import layers
class TestLayers(unittest.TestCase):
"""
Testing custom layer implementations.
"""
def setUp(self):
torch.set_grad_enabled(False)
def test_maxpool(self):
"""
Test maximum pooling layer.
"""
mp = layers.MaxPool((3, 3), (2, 2))
o = mp(torch.randn(1, 2, 32, 64))
self.assertEqual(o.shape, (1, 2, 15, 31))
def test_1d_dropout(self):
"""
Test 1d dropout layer.
"""
do = layers.Dropout(0.2, 1)
o = do(torch.randn(1, 2, 32, 64))
self.assertEqual(o.shape, (1, 2, 32, 64))
def test_2d_dropout(self):
"""
Test 2d dropout layer.
"""
do = layers.Dropout(0.2, 2)
o = do(torch.randn(1, 2, 32, 64))
self.assertEqual(o.shape, (1, 2, 32, 64))
def test_forward_rnn_layer_x(self):
"""
Test unidirectional RNN layer in x-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'f', False, False)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o.shape, (1, 2, 32, 64))
def test_forward_rnn_layer_y(self):
"""
Test unidirectional RNN layer in y-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'f', True, False)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o.shape, (1, 2, 32, 64))
def test_forward_rnn_layer_x_summarize(self):
"""
Test unidirectional summarizing RNN layer in x-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'f', False, True)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o.shape, (1, 2, 32, 1))
def test_forward_rnn_layer_y_summarize(self):
"""
Test unidirectional summarizing RNN layer in y-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'f', True, True)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o.shape, (1, 2, 1, 64))
def test_bidi_rnn_layer_x(self):
"""
Test bidirectional RNN layer in x-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'b', False, False)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o.shape, (1, 4, 32, 64))
def test_bidi_rnn_layer_y(self):
"""
Test bidirectional RNN layer in y-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'b', True, False)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o.shape, (1, 4, 32, 64))
def test_bidi_rnn_layer_x_summarize(self):
"""
Test bidirectional summarizing RNN layer in x-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'b', False, True)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o.shape, (1, 4, 32, 1))
def test_bidi_rnn_layer_y_summarize(self):
"""
Test bidirectional summarizing RNN layer in y-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'b', True, True)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o.shape, (1, 4, 1, 64))
def test_linsoftmax(self):
"""
Test basic function of linear layer.
"""
lin = layers.LinSoftmax(20, 10)
o = lin(torch.randn(1, 20, 12, 24))
self.assertEqual(o.shape, (1, 10, 12, 24))
def test_linsoftmax_train(self):
"""
Test function of linear layer in training mode (log_softmax)
"""
lin = layers.LinSoftmax(20, 10).train()
o = lin(torch.randn(1, 20, 12, 24))
self.assertLess(o.max(), 0)
def test_linsoftmax_test(self):
"""
Test function of linear layer in eval mode (softmax)
"""
lin = layers.LinSoftmax(20, 10).eval()
o = lin(torch.randn(1, 20, 12, 24))
self.assertGreaterEqual(o.min(), 0)
def test_linsoftmax_aug(self):
"""
Test basic function of linear layer with 1-augmentation.
"""
lin = layers.LinSoftmax(20, 10, True)
o = lin(torch.randn(1, 20, 12, 24))
self.assertEqual(o.shape, (1, 10, 12, 24))
def test_linsoftmax_aug_train(self):
"""
Test function of linear layer in training mode (log_softmax) with 1-augmentation
"""
lin = layers.LinSoftmax(20, 10, True).train()
o = lin(torch.randn(1, 20, 12, 24))
self.assertLess(o.max(), 0)
def test_linsoftmax_aug_test(self):
"""
Test function of linear layer in eval mode (softmax) with 1-augmentation
"""
lin = layers.LinSoftmax(20, 10, True).eval()
o = lin(torch.randn(1, 20, 12, 24))
self.assertGreaterEqual(o.min(), 0)
def test_actconv2d_lin(self):
"""
Test convolutional layer without activation.
"""
conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 'l')
o = conv(torch.randn(1, 5, 24, 12))
self.assertEqual(o.shape, (1, 12, 24, 12))
def test_actconv2d_sigmoid(self):
"""
Test convolutional layer with sigmoid activation.
"""
conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 's')
o = conv(torch.randn(1, 5, 24, 12))
self.assertTrue(0 <= o.min() <= 1)
self.assertTrue(0 <= o.max() <= 1)
def test_actconv2d_tanh(self):
"""
Test convolutional layer with tanh activation.
"""
conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 't')
o = conv(torch.randn(1, 5, 24, 12))
self.assertTrue(-1 <= o.min() <= 1)
self.assertTrue(-1 <= o.max() <= 1)
def test_actconv2d_softmax(self):
"""
Test convolutional layer with softmax activation.
"""
conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 'm')
o = conv(torch.randn(1, 5, 24, 12))
self.assertTrue(0 <= o.min() <= 1)
self.assertTrue(0 <= o.max() <= 1)
def test_actconv2d_relu(self):
"""
Test convolutional layer with relu activation.
"""
conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 'r')
o = conv(torch.randn(1, 5, 24, 12))
self.assertLessEqual(0, o.min())
self.assertLessEqual(0, o.max())
| [
"torch.randn",
"torch.set_grad_enabled"
] | 1.5.0 | eighttails/kraken | 6e3b7d6e86d673acf5633e6e23292cb82f1a114e |
1.0 | import torch
from os.path import join as oj
import os
def train_epoch(model, device, train_loader, optimizer, epoch, criterion):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
if batch_idx % 50 == 0:
print('\rTrain Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()), end='')
def test_epoch(model, device, test_loader, criterion):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss = criterion(output, target)
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return test_loss
def train(model, device, train_loader, test_loader, optimizer, num_epochs, criterion, save_dir=None):
print('training...')
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
best_loss = 1e10
test_losses = []
for epoch in range(num_epochs):
train_epoch(model, device, train_loader, optimizer, epoch+1, criterion)
test_loss = test_epoch(model, device, test_loader, criterion)
test_losses.append(test_loss)
# saving
if test_loss < best_loss:
best_loss = test_loss
if save_dir is not None:
torch.save(model.state_dict(),
oj(save_dir, f'checkpoint_{epoch}.pth'))
| [
"torch.no_grad"
] | 1.0 | Yu-Group/adaptive-wavelets | e67f726e741d83c94c3aee3ed97a772db4ce0bb3 |
1.1 | import copy
import pickle
import numpy as np
from skimage import io
from . import kitti_utils
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, calibration_kitti, common_utils, object3d_kitti
from ..dataset import DatasetTemplate
import struct
class KittiDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
self.kitti_infos = []
self.include_kitti_data(self.mode)
def include_kitti_data(self, mode):
if self.logger is not None:
self.logger.info('Loading KITTI dataset')
kitti_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos.extend(infos)
self.kitti_infos.extend(kitti_infos)
if self.logger is not None:
self.logger.info('Total samples for KITTI dataset: %d' % (len(kitti_infos)))
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, idx):
lidar_file = self.root_split_path / 'velodyne' / ('%s.bin' % idx)
assert lidar_file.exists()
return np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
def get_image(self, idx):
"""
Loads image for a sample
Args:
idx: int, Sample index
Returns:
image: (H, W, 3), RGB Image
"""
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
image = io.imread(img_file)
image = image.astype(np.float32)
image /= 255.0
return image
def get_image_shape(self, idx):
print(self.root_split_path / 'image_2' / ('%s.png' % idx))
# input()
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
# print(img_file.exists())
assert img_file.exists()
return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
def get_label(self, idx):
label_file = self.root_split_path / 'label_2' / ('%s.txt' % idx)
assert label_file.exists()
return object3d_kitti.get_objects_from_label(label_file)
def get_depth_map(self, idx):
"""
Loads depth map for a sample
Args:
idx: str, Sample index
Returns:
depth: (H, W), Depth map
"""
depth_file = self.root_split_path / 'depth_2' / ('%s.png' % idx)
assert depth_file.exists()
depth = io.imread(depth_file)
depth = depth.astype(np.float32)
depth /= 256.0
return depth
def get_calib(self, idx):
calib_file = self.root_split_path / 'calib' / ('%s.txt' % idx)
assert calib_file.exists()
return calibration_kitti.Calibration(calib_file)
def get_road_plane(self, idx):
plane_file = self.root_split_path / 'planes' / ('%s.txt' % idx)
if not plane_file.exists():
return None
with open(plane_file, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
@staticmethod
def get_fov_flag(pts_rect, img_shape, calib):
"""
Args:
pts_rect:
img_shape:
calib:
Returns:
"""
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None):
import concurrent.futures as futures
def process_single_scene(sample_idx):
print('%s sample_idx: %s' % (self.split, sample_idx))
info = {}
pc_info = {'num_features': 4, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
image_info = {'image_idx': sample_idx, 'image_shape': self.get_image_shape(sample_idx)}
info['image'] = image_info
calib = self.get_calib(sample_idx)
P2 = np.concatenate([calib.P2, np.array([[0., 0., 0., 1.]])], axis=0)
R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype)
R0_4x4[3, 3] = 1.
R0_4x4[:3, :3] = calib.R0
V2C_4x4 = np.concatenate([calib.V2C, np.array([[0., 0., 0., 1.]])], axis=0)
calib_info = {'P2': P2, 'R0_rect': R0_4x4, 'Tr_velo_to_cam': V2C_4x4}
info['calib'] = calib_info
if has_label:
obj_list = self.get_label(sample_idx)
annotations = {}
annotations['name'] = np.array([obj.cls_type for obj in obj_list])
annotations['truncated'] = np.array([obj.truncation for obj in obj_list])
annotations['occluded'] = np.array([obj.occlusion for obj in obj_list])
annotations['alpha'] = np.array([obj.alpha for obj in obj_list])
annotations['bbox'] = np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)
annotations['dimensions'] = np.array([[obj.l, obj.h, obj.w] for obj in obj_list]) # lhw(camera) format
annotations['location'] = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
annotations['rotation_y'] = np.array([obj.ry for obj in obj_list])
annotations['score'] = np.array([obj.score for obj in obj_list])
annotations['difficulty'] = np.array([obj.level for obj in obj_list], np.int32)
num_objects = len([obj.cls_type for obj in obj_list if obj.cls_type != 'DontCare'])
num_gt = len(annotations['name'])
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
loc = annotations['location'][:num_objects]
dims = annotations['dimensions'][:num_objects]
rots = annotations['rotation_y'][:num_objects]
loc_lidar = calib.rect_to_lidar(loc)
l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]
loc_lidar[:, 2] += h[:, 0] / 2
gt_boxes_lidar = np.concatenate([loc_lidar, l, w, h, -(np.pi / 2 + rots[..., np.newaxis])], axis=1)
annotations['gt_boxes_lidar'] = gt_boxes_lidar
info['annos'] = annotations
if count_inside_pts:
points = self.get_lidar(sample_idx)
calib = self.get_calib(sample_idx)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, info['image']['image_shape'], calib)
pts_fov = points[fov_flag]
corners_lidar = box_utils.boxes_to_corners_3d(gt_boxes_lidar)
num_points_in_gt = -np.ones(num_gt, dtype=np.int32)
for k in range(num_objects):
flag = box_utils.in_hull(pts_fov[:, 0:3], corners_lidar[k])
num_points_in_gt[k] = flag.sum()
annotations['num_points_in_gt'] = num_points_in_gt
return info
sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos)
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
import torch
database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
db_info_save_path = Path(self.root_path) / ('kitti_dbinfos_%s.pkl' % split)
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
sample_idx = info['point_cloud']['lidar_idx']
points = self.get_lidar(sample_idx)
annos = info['annos']
names = annos['name']
difficulty = annos['difficulty']
bbox = annos['bbox']
gt_boxes = annos['gt_boxes_lidar']
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
filename = '%s_%s_%d.bin' % (sample_idx, names[i], i)
filepath = database_save_path / filename
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
'difficulty': difficulty[i], 'bbox': bbox[i], 'score': annos['score'][i]}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(batch_index, box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
calib = batch_dict['calib'][batch_index]
image_shape = batch_dict['image_shape'][batch_index].cpu().numpy()
pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera(pred_boxes, calib)
pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes(
pred_boxes_camera, calib, image_shape=image_shape
)
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6]
pred_dict['bbox'] = pred_boxes_img
pred_dict['dimensions'] = pred_boxes_camera[:, 3:6]
pred_dict['location'] = pred_boxes_camera[:, 0:3]
pred_dict['rotation_y'] = pred_boxes_camera[:, 6]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(index, box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
cur_det_file = output_path / ('%s.txt' % frame_id)
with open(cur_det_file, 'w') as f:
bbox = single_pred_dict['bbox']
loc = single_pred_dict['location']
dims = single_pred_dict['dimensions'] # lhw -> hwl
for idx in range(len(bbox)):
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f'
% (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx],
bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3],
dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0],
loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx],
single_pred_dict['score'][idx]), file=f)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
if 'annos' not in self.kitti_infos[0].keys():
return None, {}
from .kitti_object_eval_python import eval as kitti_eval
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.kitti_infos]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.kitti_infos) * self.total_epochs
return len(self.kitti_infos)
def __getitem__(self, index):
# index = 4
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
img_shape = info['image']['image_shape']
calib = self.get_calib(sample_idx)
get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points'])
input_dict = {
'frame_id': sample_idx,
'calib': calib,
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_names = annos['name']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
if "gt_boxes2d" in get_item_list:
input_dict['gt_boxes2d'] = annos["bbox"]
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
if "points" in get_item_list:
points = self.get_lidar(sample_idx)
if self.dataset_cfg.FOV_POINTS_ONLY:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
'''
filename = str(sample_idx)+'.bin'
binfile = open(filename, 'wb')
binfile.write(points)
binfile.close()
input()
'''
# print(points.shape)
input_dict['points'] = points
if "images" in get_item_list:
input_dict['images'] = self.get_image(sample_idx)
if "depth_maps" in get_item_list:
input_dict['depth_maps'] = self.get_depth_map(sample_idx)
if "calib_matricies" in get_item_list:
input_dict["trans_lidar_to_cam"], input_dict["trans_cam_to_img"] = kitti_utils.calib_to_matricies(calib)
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['image_shape'] = img_shape
return data_dict
def create_kitti_infos(dataset_cfg, class_names, data_path, save_path, workers=4):
dataset = KittiDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
train_split, val_split = 'train', 'val'
train_filename = save_path / ('kitti_infos_%s.pkl' % train_split)
val_filename = save_path / ('kitti_infos_%s.pkl' % val_split)
trainval_filename = save_path / 'kitti_infos_trainval.pkl'
test_filename = save_path / 'kitti_infos_test.pkl'
print('---------------Start to generate data infos---------------')
dataset.set_split(train_split)
kitti_infos_train = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(train_filename, 'wb') as f:
pickle.dump(kitti_infos_train, f)
print('Kitti info train file is saved to %s' % train_filename)
dataset.set_split(val_split)
kitti_infos_val = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(val_filename, 'wb') as f:
pickle.dump(kitti_infos_val, f)
print('Kitti info val file is saved to %s' % val_filename)
with open(trainval_filename, 'wb') as f:
pickle.dump(kitti_infos_train + kitti_infos_val, f)
print('Kitti info trainval file is saved to %s' % trainval_filename)
dataset.set_split('test')
kitti_infos_test = dataset.get_infos(num_workers=workers, has_label=False, count_inside_pts=False)
with open(test_filename, 'wb') as f:
pickle.dump(kitti_infos_test, f)
print('Kitti info test file is saved to %s' % test_filename)
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.set_split(train_split)
dataset.create_groundtruth_database(train_filename, split=train_split)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import sys
if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.load(open(sys.argv[2]),Loader=yaml.FullLoader))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
create_kitti_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'kitti',
save_path=ROOT_DIR / 'data' / 'kitti'
)
| [
"torch.from_numpy"
] | 1.1 | SH-Tan/voxel-rangenet | f2050cd30a8684fd09e561aba004adea978d3d35 |
1.0 | # standard libraries
import numpy as np
import random
import time
from collections import namedtuple, Counter
import operator
import os
from copy import deepcopy
import heapq
# pytorch
import torch
import torch.nn as nn
import torch.optim as optim
# import from other files
from .toric_model import Toric_code
from .toric_model import Action
from .toric_model import Perspective
from .Replay_memory import Replay_memory_uniform, Replay_memory_prioritized
# import networks
from NN import NN_11, NN_17
from ResNet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from .util import incremental_mean, convert_from_np_to_tensor, Transition
class RL():
def __init__(self, Network, Network_name, system_size=int, p_error=0.1, replay_memory_capacity=int, learning_rate=0.00025,
discount_factor=0.95, number_of_actions=3, max_nbr_actions_per_episode=50, device='cpu', replay_memory='uniform'):
# device
self.device = device
# Toric code
if system_size%2 > 0:
self.toric = Toric_code(system_size)
else:
raise ValueError('Invalid system_size, please use only odd system sizes.')
self.grid_shift = int(system_size/2)
self.max_nbr_actions_per_episode = max_nbr_actions_per_episode
self.system_size = system_size
self.p_error = p_error
# Replay Memory
self.replay_memory_capacity = replay_memory_capacity
self.replay_memory = replay_memory
if self.replay_memory == 'proportional':
self.memory = Replay_memory_prioritized(replay_memory_capacity, 0.6) # alpha
elif self.replay_memory == 'uniform':
self.memory = Replay_memory_uniform(replay_memory_capacity)
else:
raise ValueError('Invalid memory type, please use only proportional or uniform.')
# Network
self.network_name = Network_name
self.network = Network
if Network == ResNet18 or Network == ResNet34 or Network == ResNet50 or Network == ResNet101 or Network == ResNet152:
self.policy_net = self.network()
else:
self.policy_net = self.network(system_size, number_of_actions, device)
self.target_net = deepcopy(self.policy_net)
self.policy_net = self.policy_net.to(self.device)
self.target_net = self.target_net.to(self.device)
self.learning_rate = learning_rate
# hyperparameters RL
self.discount_factor = discount_factor
self.number_of_actions = number_of_actions
def save_network(self, PATH):
torch.save(self.policy_net, PATH)
def load_network(self, PATH):
self.policy_net = torch.load(PATH, map_location='cpu')
self.target_net = deepcopy(self.policy_net)
self.policy_net = self.policy_net.to(self.device)
self.target_net = self.target_net.to(self.device)
def experience_replay(self, criterion, optimizer, batch_size):
self.policy_net.train()
self.target_net.eval()
# get transitions and unpack them to minibatch
transitions, weights, indices = self.memory.sample(batch_size, 0.4) # beta parameter
mini_batch = Transition(*zip(*transitions))
# unpack action batch
batch_actions = Action(*zip(*mini_batch.action))
batch_actions = np.array(batch_actions.action) - 1
batch_actions = torch.Tensor(batch_actions).long()
batch_actions = batch_actions.to(self.device)
# preprocess batch_input and batch_target_input for the network
batch_state = self.get_batch_input(mini_batch.state)
batch_next_state = self.get_batch_input(mini_batch.next_state)
# preprocess batch_terminal and batch reward
batch_terminal = convert_from_np_to_tensor(np.array(mini_batch.terminal))
batch_terminal = batch_terminal.to(self.device)
batch_reward = convert_from_np_to_tensor(np.array(mini_batch.reward))
batch_reward = batch_reward.to(self.device)
# compute policy net output
output = self.policy_net(batch_state)
output = output.gather(1, batch_actions.view(-1, 1)).squeeze(1)
# compute target network output
target_output = self.get_target_network_output(batch_next_state, batch_size)
target_output = target_output.to(self.device)
y = batch_reward + (batch_terminal * self.discount_factor * target_output)
# compute loss and update replay memory
loss = self.get_loss(criterion, optimizer, y, output, weights, indices)
# backpropagate loss
loss.backward()
optimizer.step()
def get_loss(self, criterion, optimizer, y, output, weights, indices):
loss = criterion(y, output)
optimizer.zero_grad()
# for prioritized experience replay
if self.replay_memory == 'proportional':
loss = convert_from_np_to_tensor(np.array(weights)) * loss.cpu()
priorities = loss
priorities = np.absolute(priorities.detach().numpy())
self.memory.priority_update(indices, priorities)
return loss.mean()
def get_network_output_next_state(self, batch_next_state=float, batch_size=int, action_index=None):
self.target_net.eval()
self.policy_net.eval()
# init matrices
batch_network_output = np.zeros(batch_size)
batch_perspectives = np.zeros(shape=(batch_size, 2, self.system_size, self.system_size))
batch_actions = np.zeros(batch_size)
for i in range(batch_size):
if (batch_next_state[i].cpu().sum().item() == 0):
batch_perspectives[i,:,:,:] = np.zeros(shape=(2, self.system_size, self.system_size))
else:
perspectives = self.toric.generate_perspective(self.grid_shift, batch_next_state[i].cpu())
perspectives = Perspective(*zip(*perspectives))
perspectives = np.array(perspectives.perspective)
perspectives = convert_from_np_to_tensor(perspectives)
perspectives = perspectives.to(self.device)
# select greedy action
with torch.no_grad():
net_output = self.target_net(perspectives)
q_values_table = np.array(net_output.cpu())
row, col = np.where(q_values_table == np.max(q_values_table))
if action_index[i] == None:
batch_network_output[i] = q_values_table[row[0], col[0]]
elif action_index[i] != None:
action_from_policy_net = int(action_index[i])
batch_network_output[i] = q_values_table[row[0], action_from_policy_net]
perspective = perspectives[row[0]]
perspective = np.array(perspective.cpu())
batch_perspectives[i,:,:,:] = perspective
batch_actions[i] = col[0]
batch_network_output[i] = q_values_table[row[0], col[0]]
batch_network_output = convert_from_np_to_tensor(batch_network_output)
batch_perspectives = convert_from_np_to_tensor(batch_perspectives)
return batch_network_output, batch_perspectives, batch_actions
def get_target_network_output(self, batch_next_state, batch_size):
with torch.no_grad():
action_index = np.full(shape=(batch_size), fill_value=None)
target_output,_,_ = self.get_network_output_next_state(batch_next_state=batch_next_state,
batch_size=batch_size,
action_index=action_index)
return target_output
def get_batch_input(self, state_batch):
batch_input = np.stack(state_batch, axis=0)
batch_input = convert_from_np_to_tensor(batch_input)
return batch_input.to(self.device)
def train(self, training_steps=int, target_update=int, epsilon_start=1.0, num_of_epsilon_steps=10,
epsilon_end=0.1, reach_final_epsilon=0.5, optimizer=str,
batch_size=int, replay_start_size=int, minimum_nbr_of_qubit_errors=0):
# set network to train mode
self.policy_net.train()
# define criterion and optimizer
criterion = nn.MSELoss(reduction='none')
if optimizer == 'RMSprop':
optimizer = optim.RMSprop(self.policy_net.parameters(), lr=self.learning_rate)
elif optimizer == 'Adam':
optimizer = optim.Adam(self.policy_net.parameters(), lr=self.learning_rate)
# init counters
steps_counter = 0
update_counter = 1
iteration = 0
# define epsilon steps
epsilon = epsilon_start
num_of_steps = np.round(training_steps/num_of_epsilon_steps)
epsilon_decay = np.round((epsilon_start-epsilon_end)/num_of_epsilon_steps, 5)
epsilon_update = num_of_steps * reach_final_epsilon
# main loop over training steps
while iteration < training_steps: # include tqdm here?
num_of_steps_per_episode = 0
# initialize syndrom
self.toric = Toric_code(self.system_size)
terminal_state = 0
# generate syndroms
while terminal_state == 0:
if minimum_nbr_of_qubit_errors == 0:
self.toric.generate_random_error(self.p_error)
else:
self.toric.generate_n_random_errors(minimum_nbr_of_qubit_errors)
terminal_state = self.toric.terminal_state(self.toric.current_state)
# solve one episode
while terminal_state == 1 and num_of_steps_per_episode < self.max_nbr_actions_per_episode and iteration < training_steps:
num_of_steps_per_episode += 1
num_of_epsilon_steps += 1
steps_counter += 1
iteration += 1
# select action using epsilon greedy policy
action = self.select_action(number_of_actions=self.number_of_actions,
epsilon=epsilon,
grid_shift=self.grid_shift)
self.toric.step(action)
reward = self.get_reward()
# generate memory entry
perspective, action_memory, reward, next_perspective, terminal = self.toric.generate_memory_entry(
action, reward, self.grid_shift)
# save transition in memory
self.memory.save(Transition(perspective, action_memory, reward, next_perspective, terminal), 10000) # max priority
# experience replay
if steps_counter > replay_start_size:
update_counter += 1
self.experience_replay(criterion,
optimizer,
batch_size)
# set target_net to policy_net
if update_counter % target_update == 0:
self.target_net = deepcopy(self.policy_net)
# update epsilon
if (update_counter % epsilon_update == 0):
epsilon = np.round(np.maximum(epsilon - epsilon_decay, epsilon_end), 3)
# set next_state to new state and update terminal state
self.toric.current_state = self.toric.next_state
terminal_state = self.toric.terminal_state(self.toric.current_state)
def get_reward(self):
terminal = np.all(self.toric.next_state==0)
if terminal == True:
reward = 100
else:
defects_state = np.sum(self.toric.current_state)
defects_next_state = np.sum(self.toric.next_state)
reward = defects_state - defects_next_state
return reward
def select_action(self, number_of_actions=int, epsilon=float, grid_shift=int):
# set network in evluation mode
self.policy_net.eval()
# generate perspectives
perspectives = self.toric.generate_perspective(grid_shift, self.toric.current_state)
number_of_perspectives = len(perspectives)
# preprocess batch of perspectives and actions
perspectives = Perspective(*zip(*perspectives))
batch_perspectives = np.array(perspectives.perspective)
batch_perspectives = convert_from_np_to_tensor(batch_perspectives)
batch_perspectives = batch_perspectives.to(self.device)
batch_position_actions = perspectives.position
#choose action using epsilon greedy approach
rand = random.random()
if(1 - epsilon > rand):
# select greedy action
with torch.no_grad():
policy_net_output = self.policy_net(batch_perspectives)
q_values_table = np.array(policy_net_output.cpu())
row, col = np.where(q_values_table == np.max(q_values_table))
perspective = row[0]
max_q_action = col[0] + 1
step = Action(batch_position_actions[perspective], max_q_action)
# select random action
else:
random_perspective = random.randint(0, number_of_perspectives-1)
random_action = random.randint(1, number_of_actions)
step = Action(batch_position_actions[random_perspective], random_action)
return step
def select_action_prediction(self, number_of_actions=int, epsilon=float, grid_shift=int, prev_action=float):
# set network in eval mode
self.policy_net.eval()
# generate perspectives
perspectives = self.toric.generate_perspective(grid_shift, self.toric.current_state)
number_of_perspectives = len(perspectives)
# preprocess batch of perspectives and actions
perspectives = Perspective(*zip(*perspectives))
batch_perspectives = np.array(perspectives.perspective)
batch_perspectives = convert_from_np_to_tensor(batch_perspectives)
batch_perspectives = batch_perspectives.to(self.device)
batch_position_actions = perspectives.position
# generate action value for different perspectives
with torch.no_grad():
policy_net_output = self.policy_net(batch_perspectives)
q_values_table = np.array(policy_net_output.cpu())
#choose action using epsilon greedy approach
rand = random.random()
if(1 - epsilon > rand):
# select greedy action
row, col = np.where(q_values_table == np.max(q_values_table))
perspective = row[0]
max_q_action = col[0] + 1
step = Action(batch_position_actions[perspective], max_q_action)
if prev_action == step:
res = heapq.nlargest(2, q_values_table.flatten())
row, col = np.where(q_values_table == res[1])
perspective = row[0]
max_q_action = col[0] + 1
step = Action(batch_position_actions[perspective], max_q_action)
q_value = q_values_table[row[0], col[0]]
# select random action
else:
random_perspective = random.randint(0, number_of_perspectives-1)
random_action = random.randint(1, number_of_actions)
q_value = q_values_table[random_perspective, random_action-1]
step = Action(batch_position_actions[random_perspective], random_action)
return step, q_value
def prediction(self, num_of_predictions=1, epsilon=0.0, num_of_steps=50, PATH=None, plot_one_episode=False,
show_network=False, show_plot=False, prediction_list_p_error=float, minimum_nbr_of_qubit_errors=0, print_Q_values=False, save_prediction=True):
# load network for prediction and set eval mode
if PATH != None:
self.load_network(PATH)
self.policy_net.eval()
# init matrices
ground_state_list = np.zeros(len(prediction_list_p_error))
error_corrected_list = np.zeros(len(prediction_list_p_error))
average_number_of_steps_list = np.zeros(len(prediction_list_p_error))
mean_q_list = np.zeros(len(prediction_list_p_error))
failed_syndroms = []
failure_rate = 0
# loop through different p_error
for i, p_error in enumerate(prediction_list_p_error):
ground_state = np.ones(num_of_predictions, dtype=bool)
error_corrected = np.zeros(num_of_predictions)
mean_steps_per_p_error = 0
mean_q_per_p_error = 0
steps_counter = 0
for j in range(num_of_predictions):
num_of_steps_per_episode = 0
prev_action = 0
terminal_state = 0
# generate random syndrom
self.toric = Toric_code(self.system_size)
if minimum_nbr_of_qubit_errors == 0:
self.toric.generate_random_error(p_error)
else:
self.toric.generate_n_random_errors(minimum_nbr_of_qubit_errors)
terminal_state = self.toric.terminal_state(self.toric.current_state)
# plot one episode
if plot_one_episode == True and j == 0 and i == 0:
self.toric.plot_toric_code(self.toric.current_state, 'initial_syndrom')
init_qubit_state = deepcopy(self.toric.qubit_matrix)
# solve syndrome
while terminal_state == 1 and num_of_steps_per_episode < num_of_steps:
steps_counter += 1
num_of_steps_per_episode += 1
# choose greedy action
action, q_value = self.select_action_prediction(number_of_actions=self.number_of_actions,
epsilon=epsilon,
grid_shift=self.grid_shift,
prev_action=prev_action)
prev_action = action
self.toric.step(action)
self.toric.current_state = self.toric.next_state
terminal_state = self.toric.terminal_state(self.toric.current_state)
mean_q_per_p_error = incremental_mean(q_value, mean_q_per_p_error, steps_counter)
if plot_one_episode == True and j == 0 and i == 0:
self.toric.plot_toric_code(self.toric.current_state, 'step_'+str(num_of_steps_per_episode))
# compute mean steps
mean_steps_per_p_error = incremental_mean(num_of_steps_per_episode, mean_steps_per_p_error, j+1)
# save error corrected
error_corrected[j] = self.toric.terminal_state(self.toric.current_state) # 0: error corrected # 1: error not corrected
# update groundstate
self.toric.eval_ground_state()
ground_state[j] = self.toric.ground_state # False non trivial loops
if terminal_state == 1 or self.toric.ground_state == False:
failed_syndroms.append(init_qubit_state)
failed_syndroms.append(self.toric.qubit_matrix)
success_rate = (num_of_predictions - np.sum(error_corrected)) / num_of_predictions
error_corrected_list[i] = success_rate
ground_state_change = (num_of_predictions - np.sum(ground_state)) / num_of_predictions
ground_state_list[i] = 1 - ground_state_change
average_number_of_steps_list[i] = np.round(mean_steps_per_p_error, 1)
mean_q_list[i] = np.round(mean_q_per_p_error, 3)
return error_corrected_list, ground_state_list, average_number_of_steps_list, mean_q_list, failed_syndroms, ground_state_list, prediction_list_p_error, failure_rate
def train_for_n_epochs(self, training_steps=int, epochs=int, num_of_predictions=100, num_of_steps_prediction=50, target_update=100,
optimizer=str, save=True, directory_path='network', prediction_list_p_error=[0.1],
batch_size=32, replay_start_size=32, minimum_nbr_of_qubit_errors=0):
data_all = []
data_all = np.zeros((1, 19))
for i in range(epochs):
self.train(training_steps=training_steps,
target_update=target_update,
optimizer=optimizer,
batch_size=batch_size,
replay_start_size=replay_start_size,
minimum_nbr_of_qubit_errors=minimum_nbr_of_qubit_errors)
print('training done, epoch: ', i+1)
# evaluate network
error_corrected_list, ground_state_list, average_number_of_steps_list, mean_q_list, failed_syndroms, ground_state_list, prediction_list_p_error, failure_rate = self.prediction(num_of_predictions=num_of_predictions,
prediction_list_p_error=prediction_list_p_error,
minimum_nbr_of_qubit_errors=0,#int(self.system_size/2)+1, # why is this not taken from args? evaluation bad?
save_prediction=True,
num_of_steps=num_of_steps_prediction)
# Print evaluation lists after each epoch
print(error_corrected_list, 'error corrected')
print(ground_state_list, 'ground state conserved')
print(average_number_of_steps_list, 'average number of steps')
print(mean_q_list, 'mean q value')
data_all = np.append(data_all, np.array([[self.system_size, self.network_name, i+1, self.replay_memory, self.device, self.learning_rate, target_update, optimizer,
self.discount_factor, training_steps * (i+1), mean_q_list[0], prediction_list_p_error[0], num_of_predictions, len(failed_syndroms)/2, error_corrected_list[0], ground_state_list[0], average_number_of_steps_list[0],failure_rate, self.p_error]]), axis=0)
# save training settings in txt file
np.savetxt(directory_path + '/data_all.txt', data_all,
header='system_size, network_name, epoch, replay_memory, device, learning_rate, target_update, optimizer, discount_factor, total_training_steps, mean_q_list, prediction_list_p_error, number_of_predictions, number_of_failed_syndroms, error_corrected_list, ground_state_list, average_number_of_steps_list, failure_rate, p_error_train', delimiter=',', fmt="%s")
# save network
step = (i + 1) * training_steps
PATH = directory_path + '/network_epoch/size_{3}_{2}_epoch_{0}_memory_{7}_target_update_{5}_optimizer_{6}__steps_{4}_q_{1}_discount_{8}_learning_rate_{9}.pt'.format(
i+1, np.round(mean_q_list[0], 4), self.network_name, self.system_size, step, target_update, optimizer, self.replay_memory, self.discount_factor, self.learning_rate)
self.save_network(PATH)
return error_corrected_list
| [
"torch.nn.MSELoss",
"torch.save",
"torch.no_grad",
"torch.load",
"torch.Tensor"
] | 1.0.0 | KarlHammar/High-threshold-QEC-toric-RL | 22b14010321ea0e4298aa2640ad7816a7d89f747 |
1.3 | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
https://github.com/facebookresearch/fastMRI/blob/master/data/transforms.py
"""
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def apply_mask(data, mask_func, seed=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
return torch.where(mask == 0, torch.Tensor([0]), data), mask
def fft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.fft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.ifft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def rfft2(data):
"""
Apply centered 2-dimensional Real-to-Complex Fast Fourier Transform.
Args:
data (torch.Tensor): Real valued input data containing at least 2 dimensions: dimensions
-2 & -1 are spatial dimensions. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input where dimensions -3 & -2 are now spatial dimensions
and dimension -1 has size 2 for real & complex values.
"""
assert data.size(-1) != 1
data = ifftshift(data, dim=(-2, -1))
data = torch.rfft(data, 2, onesided=False)
data = fftshift(data, dim=(-3, -2))
return data
def irfft2(data):
"""
Apply centered 2-dimensional Complex-to-Real Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input where dimensions -2 & -1 are now spatial dimensions.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3,-2))
data = torch.irfft(data, 2, onesided=False)
data = fftshift(data, dim=(-2,-1))
return data
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1).sqrt()
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
| [
"torch.rfft",
"torch.cat",
"torch.irfft",
"torch.from_numpy",
"torch.ifft",
"torch.Tensor",
"torch.fft"
] | 1.3 | jnjaby/DISCNet | 63b1859519091f8790afcc47e8c726cbefdcd0fe |
1.7 | import pyforest
import os
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import sys
import torch
import nlp
from transformers import T5Tokenizer, BartTokenizer, HfArgumentParser
from datasets import list_datasets, load_dataset, list_metrics, load_metric, Dataset
import tqdm
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task: str = field(
metadata={"help": "Which task 'qa', 'qg', 'e2e_qg', 'ans_ext', 'multi'. 'multi' means 'qa', 'qg', 'ans_ext' tasks"},
)
model_type: str = field(metadata={"help": "One of 't5', 'bart'"})
dataset_path: Optional[str] = field(
default="data/squad_multitask",
metadata={"help": "Path for dataset directory"},
)
train_file_name: Optional[str] = field(
default=None,
metadata={"help": "name for cached train dataset"},
)
valid_file_name: Optional[str] = field(
default=None,
metadata={"help": "name for cached valid dataset"},
)
valid_for_qg_only: bool = field(
default=False,
metadata={"help": "For multitask dataset valid split should contain only qg task or all tasks."}
)
qg_format: Optional[str] = field(
default='highlight_qg_format',
metadata={"help": "How to format inputs for que generation, 'highlight_qg_format' or 'prepend_qg_format'"},
)
max_source_length: Optional[int] = field(
default=512,
metadata={"help": "Max input length for the source text"},
)
max_target_length: Optional[int] = field(
default=32,
metadata={"help": "Max input length for the target text"},
)
class DataProcessor:
def __init__(self, tokenizer, model_type="t5", max_source_length=512, max_target_length=32):
self.tokenizer = tokenizer
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.model_type = model_type
self.hl_token = "<hl>"
if model_type == "t5":
self.sep_token = "<sep>"
elif model_type == "bart":
self.sep_token = "<sep>"
else:
self.sep_token = "[SEP]"
def process(self, dataset):
if self.model_type == "t5":
dataset = dataset.map(self._add_eos_examples)
dataset = dataset.map(self._add_special_tokens)
dataset = dataset.map(self._convert_to_features, batched=True)
return dataset
def _add_eos_examples(self, example):
example['source_text'] = example['source_text'] + " </s>"
example['target_text'] = example['target_text'] + " </s>"
return example
def _add_special_tokens(self, example):
example['source_text'] = example['source_text'].replace("{hl_token}", self.hl_token)
example['target_text'] = example['target_text'].replace("{sep_token}", self.sep_token)
return example
# tokenize the examples
def _convert_to_features(self, example_batch):
source_encoding = self.tokenizer.batch_encode_plus(
example_batch['source_text'],
max_length=self.max_source_length,
padding='max_length',
pad_to_max_length=True,
truncation=True,
)
target_encoding = self.tokenizer.batch_encode_plus(
example_batch['target_text'],
max_length=self.max_target_length,
padding='max_length',
pad_to_max_length=True,
truncation=True,
)
encodings = {
'source_ids': source_encoding['input_ids'],
'target_ids': target_encoding['input_ids'],
'attention_mask': source_encoding['attention_mask'],
}
return encodings
def filter_qa(example):
return example['task'] == 'qa'
def filter_qg(example):
return example['task'] == 'qg'
def filter_e2e_qg(example):
return example['task'] == 'e2e_qg'
def filter_ans_ext(example):
return example['task'] == 'ans_ext'
def filter_multi(example):
return example['task'] != 'e2e_qg'
TASK_TO_FILTER_FN = {
'qa': filter_qa,
'qg': filter_qg,
'e2e_qg': filter_e2e_qg,
'ans_ext': filter_ans_ext,
'multi': filter_multi
}
def main():
parser = HfArgumentParser((DataTrainingArguments,))
data_args = parser.parse_args_into_dataclasses()[0]
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
if data_args.model_type == 't5':
tokenizer = T5Tokenizer.from_pretrained("t5-base")
else:
tokenizer = BartTokenizer.from_pretrained("facebook/bart-base")
tokenizer.add_tokens(['<sep>', '<hl>'])
train_dataset = load_dataset('eli5', split='train_eli5')
valid_dataset = load_dataset('eli5', split='validation_eli5')
processor = DataProcessor(
tokenizer,
model_type=data_args.model_type,
max_source_length=data_args.max_source_length,
max_target_length=data_args.max_target_length
)
print("Pre-processing datasets")
train_dataset=preprocess_data(train_dataset)
valid_dataset=preprocess_data(valid_dataset)
print("Tokenizing datasets")
train_dataset = processor.process(train_dataset)
valid_dataset = processor.process(valid_dataset)
columns = ["source_ids", "target_ids", "attention_mask"]
valid_dataset.set_format(type='torch', columns=columns)
train_dataset.set_format(type='torch', columns=columns)
torch.save(train_dataset, data_args.train_file_name)
logger.info(f"saved train dataset at {data_args.train_file_name}")
torch.save(valid_dataset, data_args.valid_file_name)
logger.info(f"saved validation dataset at {data_args.valid_file_name}")
tokenizer_path = f"{data_args.model_type}_qg_tokenizer"
if not os.path.exists(tokenizer_path):
os.mkdir(tokenizer_path)
tokenizer.save_pretrained(tokenizer_path)
logger.info(f"saved tokenizer at {tokenizer_path}")
def preprocess_data(data):
answers = [sub["answers"]["text"] for sub in data]
ans_num = [len(ans) for ans in answers]
questions = [sub["title"] for sub in data]
questions = [[questions[i]] * ans_num[i] for i in range(len(ans_num))]
answers = [item for sublist in answers for item in sublist]
questions = [item for sublist in questions for item in sublist]
data_dict= []
for i in tqdm.tqdm(range(len(answers))):
current={}
current["question"] = questions[i]
current["context"]=answers[i]
current= process_e2e_qg(current)
data_dict.append(current)
source_text=[sub["source_text"] for sub in data_dict]
target_text = [sub["target_text"] for sub in data_dict]
task_text = [sub["task"] for sub in data_dict]
data_dict={"source_text":source_text, "target_text": target_text, "task": task_text}
data_dict = nlp.Dataset.from_dict(data_dict)
return data_dict
def process_e2e_qg(paragraph):
source_text = f"generate questions: {paragraph['context'].strip()}"
questions = [paragraph['question'].strip()]
target_text = " {sep_token} ".join(questions)
target_text = f"{target_text} {{sep_token}}"
return {"source_text":source_text, "target_text": target_text,"task":"e2e_qg"}
if __name__ == "__main__":
main()
| [
"torch.save"
] | 1.7.1 | Zino-chata/question_gen_v2 | 440fbb4eaccb86232a54287d0890c79a4935e418 |
1.7 | #!/usr/bin/env python3
#PYTHON_ARGCOMPLETE_OK
import enum
import logging
from dataclasses import dataclass, replace
from simple_parsing import Serializable
from typing import Dict, Any
from tqdm.auto import tqdm
import torch as th
from torchvision.transforms import Compose
from torch.utils.tensorboard import SummaryWriter
import torch.autograd.profiler as profiler
from top.train.saver import Saver
from top.train.trainer import Trainer
from top.train.event.hub import Hub
from top.train.event.topics import Topic
from top.train.event.helpers import (Collect, Periodic, Evaluator)
from top.model.keypoint import KeypointNetwork2D
from top.model.loss import (
ObjectHeatmapLoss, KeypointDisplacementLoss,
KeypointScaleLoss)
from top.data.transforms.augment import PhotometricAugment
from top.data.transforms import (
DenseMapsMobilePose,
Normalize,
InstancePadding,
DrawKeypointMap,
PhotometricAugment
)
from top.data.schema import Schema
from top.data.load import (DatasetSettings, get_loaders)
from top.run.app_util import update_settings
from top.run.path_util import RunPath
from top.run.torch_util import resolve_device
@dataclass
class AppSettings(Serializable):
model: KeypointNetwork2D.Settings = KeypointNetwork2D.Settings()
# Dataset selection options.
dataset: DatasetSettings = DatasetSettings()
# NOTE(ycho): root run path is set to tmp dir y default.
path: RunPath.Settings = RunPath.Settings(root='/tmp/ai604-kpt')
train: Trainer.Settings = Trainer.Settings()
batch_size: int = 8
device: str = ''
# Logging interval / every N train steps
log_period: int = int(100)
# Checkpointing interval / every N train steps
save_period: int = int(1e3)
# Evaluation interval / every N train steps
eval_period: int = int(1e3)
# Auxiliary transform settings ...
padding: InstancePadding.Settings = InstancePadding.Settings()
maps: DenseMapsMobilePose.Settings = DenseMapsMobilePose.Settings()
photo_aug: PhotometricAugment.Settings = PhotometricAugment.Settings()
profile: bool = False
load_ckpt: str = ''
class TrainLogger:
"""
Logging during training - specifically, tqdm-based logging to the shell and tensorboard.
"""
def __init__(self, hub: Hub, writer: th.utils.tensorboard.SummaryWriter,
period: int):
self.step = None
self.hub = hub
self.writer = writer
self.tqdm = tqdm()
self.period = period
self._subscribe()
self.draw_kpt_map = DrawKeypointMap(
DrawKeypointMap.Settings(
as_displacement=False))
def _on_losses(self, losses: Dict[str, th.Tensor]):
"""Log individual training losses."""
for k, v in losses.items():
name = k
loss = v.detach().cpu()
self.writer.add_scalar(name, loss,
global_step=self.step)
def _on_loss(self, loss):
"""log training loss."""
loss = loss.detach().cpu()
# Update tensorboard ...
self.writer.add_scalar('train_loss', loss,
global_step=self.step)
# Update tqdm logger bar.
self.tqdm.set_postfix(loss=loss)
self.tqdm.update(self.period)
def _on_train_out(self, inputs, outputs):
"""log training outputs."""
# Fetch inputs ...
with th.no_grad():
input_image = inputs[Schema.IMAGE].detach()
out_heatmap = outputs[Schema.HEATMAP].detach()
target_heatmap = inputs[Schema.HEATMAP].detach()
# NOTE(ycho): Only show for first image
# feels a bit wasteful? consider better alternatives...
out_kpt_map = self.draw_kpt_map(
outputs[Schema.KEYPOINT_HEATMAP][0]).detach()
target_kpt_map = self.draw_kpt_map(
inputs[Schema.KEYPOINT_HEATMAP][0]).detach()
# NOTE(ycho): denormalize input image.
image = th.clip(0.5 + (input_image[0] * 0.25), 0.0, 1.0)
self.writer.add_image(
'train_images',
image.cpu(),
global_step=self.step)
for i_cls in range(out_heatmap.shape[1]):
self.writer.add_image(F'out_heatmap/{i_cls}',
out_heatmap[0, i_cls, None].cpu(),
global_step=self.step)
self.writer.add_image(F'target_heatmap/{i_cls}',
target_heatmap[0, i_cls, None].cpu(),
global_step=self.step)
self.writer.add_image('out_kpt_map',
out_kpt_map.cpu(),
global_step=self.step)
self.writer.add_image('target_kpt_map',
target_kpt_map.cpu(),
global_step=self.step)
def _on_step(self, step):
"""save current step."""
self.step = step
def _subscribe(self):
self.hub.subscribe(Topic.STEP, self._on_step)
# NOTE(ycho): Log loss only periodically.
self.hub.subscribe(Topic.TRAIN_LOSS,
Periodic(self.period, self._on_loss))
self.hub.subscribe(Topic.TRAIN_LOSSES,
Periodic(self.period, self._on_losses))
self.hub.subscribe(Topic.TRAIN_OUT,
Periodic(self.period, self._on_train_out))
def __del__(self):
self.tqdm.close()
class ModelAsTuple(th.nn.Module):
"""Workaround to avoid tracing bugs in add_graph from rejecting outputs of
form Dict[Schema,Any]."""
def __init__(self, model: th.nn.Module):
super().__init__()
self.model = model
def forward(self, inputs):
return tuple(v for (k, v) in self.model(inputs).items())
def main():
logging.basicConfig(level=logging.WARN)
opts = AppSettings()
opts = update_settings(opts)
path = RunPath(opts.path)
device = resolve_device(opts.device)
model = KeypointNetwork2D(opts.model).to(device)
# FIXME(ycho): Hardcoded lr == 1e-3
optimizer = th.optim.Adam(model.parameters(), lr=1e-3)
writer = th.utils.tensorboard.SummaryWriter(path.log)
# NOTE(ycho): Force data loading on the CPU.
data_device = th.device('cpu')
# TODO(ycho): Consider scripted compositions?
# If a series of transforms can be fused and compiled,
# it would probably make it a lot faster to train...
transform = Compose([
DenseMapsMobilePose(opts.maps, data_device),
PhotometricAugment(opts.photo_aug, False),
Normalize(Normalize.Settings()),
InstancePadding(opts.padding)
])
train_loader, test_loader = get_loaders(opts.dataset,
device=data_device,
batch_size=opts.batch_size,
transform=transform)
# NOTE(ycho): Synchronous event hub.
hub = Hub()
def _on_train_begin():
# Save meta-parameters.
opts.save(path.dir / 'opts.yaml')
# NOTE(ycho): Currently `load` only works with a modified version of the
# main SimpleParsing repository.
# opts.load(path.dir / 'opts.yaml')
# Generate tensorboard graph.
data = next(iter(test_loader))
dummy = data[Schema.IMAGE].to(device).detach()
# NOTE(ycho): No need to set model to `eval`,
# eval mode is set internally within add_graph().
writer.add_graph(ModelAsTuple(model), dummy)
hub.subscribe(
Topic.TRAIN_BEGIN, _on_train_begin)
# Periodically log training statistics.
# FIXME(ycho): hardcoded logging period.
# NOTE(ycho): Currently only plots `loss`.
collect = Collect(hub, Topic.METRICS, [])
train_logger = TrainLogger(hub, writer, opts.log_period)
# Periodically save model, per epoch.
# TODO(ycho): Consider folding this callback inside Trainer().
hub.subscribe(
Topic.EPOCH,
lambda epoch: Saver(
model,
optimizer).save(
path.ckpt /
F'epoch-{epoch}.zip'))
# Periodically save model, per N training steps.
# TODO(ycho): Consider folding this callback inside Trainer()
# and adding {save_period} args to Trainer instead.
hub.subscribe(
Topic.STEP,
Periodic(opts.save_period, lambda step: Saver(
model,
optimizer).save(
path.ckpt /
F'step-{step}.zip')))
# Periodically evaluate model, per N training steps.
# NOTE(ycho): Load and process test data ...
# TODO(ycho): Consider folding this callback inside Trainer()
# and adding {test_loader, eval_fn} args to Trainer instead.
def _eval_fn(model, data):
# TODO(ycho): Actually implement evaluation function.
# return model(data[Schema.IMAGE].to(device))
return None
evaluator = Evaluator(
Evaluator.Settings(period=opts.eval_period),
hub, model, test_loader, _eval_fn)
# TODO(ycho):
# All metrics evaluation should reset stats at eval_begin(),
# aggregate stats at eval_step(),
# and output stats at eval_end(). These signals are all implemented.
# What are the appropriate metrics to implement for keypoint regression?
# - keypoint matching F1 score(?)
# - loss_fn() but for the evaluation datasets
def _on_eval_step(inputs, outputs):
pass
hub.subscribe(Topic.EVAL_STEP, _on_eval_step)
collect = Collect(hub, Topic.METRICS, [])
def _log_all(metrics: Dict[Topic, Any]):
pass
hub.subscribe(Topic.METRICS, _log_all)
# TODO(ycho): weight the losses with some constant ??
losses = {
Schema.HEATMAP: ObjectHeatmapLoss(key=Schema.HEATMAP),
# Schema.DISPLACEMENT_MAP: KeypointDisplacementLoss(),
Schema.KEYPOINT_HEATMAP: ObjectHeatmapLoss(
key=Schema.KEYPOINT_HEATMAP),
Schema.SCALE: KeypointScaleLoss()
}
def _loss_fn(model: th.nn.Module, data):
# Now that we're here, convert all inputs to the device.
data = {k: (v.to(device) if isinstance(v, th.Tensor) else v)
for (k, v) in data.items()}
image = data[Schema.IMAGE]
outputs = model(image)
# Also make input/output pair from training
# iterations available to the event bus.
hub.publish(Topic.TRAIN_OUT,
inputs=data,
outputs=outputs)
kpt_heatmap_loss = losses[Schema.KEYPOINT_HEATMAP](outputs, data)
heatmap_loss = losses[Schema.HEATMAP](outputs, data)
scale_loss = losses[Schema.SCALE](outputs, data)
# Independently log stuff
hub.publish(Topic.TRAIN_LOSSES, {
'keypoint': kpt_heatmap_loss,
'center': heatmap_loss,
'scale': scale_loss})
return (kpt_heatmap_loss + heatmap_loss + scale_loss)
## Load from checkpoint
if opts.load_ckpt:
logging.info(F'Loading checkpoint {opts.load_ckpt} ...')
Saver(model, optimizer).load(opts.load_ckpt)
## Trainer
trainer = Trainer(
opts.train,
model,
optimizer,
_loss_fn,
hub,
train_loader)
# Train, optionally profile
if opts.profile:
try:
with profiler.profile(record_shapes=True, use_cuda=True) as prof:
trainer.train()
finally:
print(
prof.key_averages().table(
sort_by='cpu_time_total',
row_limit=16))
prof.export_chrome_trace("/tmp/trace.json")
else:
trainer.train()
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.no_grad",
"torch.autograd.profiler.profile",
"torch.utils.tensorboard.SummaryWriter",
"torch.clip"
] | 1.7.1 | yycho0108/ai604-video-object-pose | 7067f36281038272b0e39166d8f9718076bb6e75 |
1.7 | #!/usr/bin/env python3
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict
from top.data.schema import Schema
from top.model.loss_util import FocalLoss
class ObjectHeatmapLoss(nn.Module):
def __init__(self, key: Schema = Schema.HEATMAP):
super().__init__()
self.focal_loss = FocalLoss()
self.key = key
def forward(
self, output: Dict[str, th.Tensor],
targets: Dict[str, th.Tensor]) -> float:
# Extract relevant tensors from arguments.
pred = output[self.key]
target = targets[self.key]
# FIXME(ycho): Hardcoded batch_size inference
batch_size = target.shape[0]
# NOTE(ycho): deprecated for now ...
if False:
diff = pred - target
mask = th.ones_like(diff, dtype=th.bool)
# Ignore padded labels after `num_instance`.
#inums = target[Schema.INSTANCE_NUM]
#for batch_i in range(batch_size):
# num_instance = inums[batch_i]
# mask[batch_i, num_instance:] = False
diff[~mask] = 0.0
numer = th.sum(th.square(diff))
denom = th.sum(mask)
return numer / denom
out = self.focal_loss(pred, target)
return out
class KeypointDisplacementLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output: Dict[str, th.Tensor],
target: Dict[str, th.Tensor]) -> float:
pred = output[Schema.DISPLACEMENT_MAP]
mask = th.isfinite(target[Schema.DISPLACEMENT_MAP])
diff = pred - target[Schema.DISPLACEMENT_MAP]
# NOTE(ycho): during inference, this mask is approximated
# by the heatmaps.
# NOTE(ycho): We MUST this use form since inf * 0 = NaN.
diff[~mask] = 0.0
# NOTE(ycho): Using abs here, which results in L1 loss.
numer = th.sum(th.abs(diff))
denom = th.sum(mask)
return numer / denom
class KeypointHeatmapLoss(nn.Module):
def __init__(self):
return NotImplemented
def forward(
self, output: Dict[str, th.Tensor],
target: Dict[str, th.Tensor]) -> float:
return NotImplemented
class KeypointCrossEntropyLoss(nn.Module):
"""Given a keypoint heatmap of logits, compute the loss against integer-
valued target map of keypoints.
TODO(ycho): Perhaps not the best idea, especially if the number of keypoints are very sparse.
"""
def __init__(self):
super().__init__()
# self.loss = nn.MSELoss()
self.loss = nn.CrossEntropyLoss()
def forward(self, output: th.Tensor, target: th.Tensor) -> float:
return self.loss(output, target)
class KeypointScaleLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.L1Loss()
def forward(self, output: Dict[str, th.Tensor],
target: Dict[str, th.Tensor]) -> float:
# We extract the center index from the input.
# TODO(ycho): Consider adding a data-processing `transform` instead.
# H, W = inputs[Schema.IMAGE].shape[-2:]
h, w = output[Schema.SCALE_MAP].shape[-2:]
# FIXME(ycho): `visibility` mask should ultimately account for
# out-of-range behavior ... (fingers crossed)
visibility = target[Schema.VISIBILITY].to(dtype=th.bool)[..., 0]
keypoints_2d_uv = target[Schema.KEYPOINT_2D]
center_uv = keypoints_2d_uv[..., 0, :2]
scale_xy = th.as_tensor(
[w, h], dtype=th.int32, device=center_uv.device)
center_xy = th.round(center_uv * scale_xy).to(dtype=th.int64)
# NOTE(ycho): Explicitly writing out (i,j) since the `Objectron`
# keypoint order is # unconventional.
j = center_xy[..., 0] # (B, O)
i = center_xy[..., 1] # (B, O)
flat_index = (i * w + j)
in_bound = th.all(th.logical_and(center_xy >= 0,
center_xy < scale_xy), dim=-1)
visibility = th.logical_and(visibility, in_bound)
# NOTE(ycho): Overwrite invalid(invisible) index with 0
# in order to prevent errors during gather().
# Here, we explicitly check for not only the dataset visibility,
# but also the validity of the resulting indexes within image bounds as
# well.
flat_index[~visibility] = 0
shape = output[Schema.SCALE_MAP].shape
X = output[Schema.SCALE_MAP].reshape(shape[:-2] + (-1,))
I = flat_index[:, None]
I = I.expand(*((-1, shape[1]) + tuple(flat_index.shape[1:])))
V = visibility
# NOTE(ycho): permute required for (B,3,O) -> (B,O,3)
scale_output = X.gather(-1, I).permute(0, 2, 1)
scale_target = target[Schema.SCALE]
return self.loss(scale_output[V], scale_target[V])
| [
"torch.round",
"torch.square",
"torch.isfinite",
"torch.nn.L1Loss",
"torch.abs",
"torch.logical_and",
"torch.ones_like",
"torch.as_tensor",
"torch.nn.CrossEntropyLoss",
"torch.sum"
] | 1.7.1 | yycho0108/ai604-video-object-pose | 7067f36281038272b0e39166d8f9718076bb6e75 |
1.8 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from transformers import pipeline
from torchvision import models
from fairseq.optim.adafactor import Adafactor
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=52):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TimeDistributed(nn.Module):
# Takes any module and stacks the time dimension with the batch dimenison of inputs before applying the module
# Insipired from https://keras.io/api/layers/recurrent_layers/time_distributed/
# https://discuss.pytorch.org/t/any-pytorch-function-can-work-as-keras-timedistributed/1346/4
def __init__(self, module, batch_first=True):
super(TimeDistributed, self).__init__()
self.module = module # Can be any layer we wish to apply like Linear, Conv etc
self.batch_first = batch_first
def forward(self, x):
if len(x.size()) <= 2:
return self.module(x)
# Squash samples and timesteps into a single axis
x_reshape = x.contiguous().view(-1, x.size(-1))
y = self.module(x_reshape)
# We have to reshape Y
if self.batch_first:
y = y.contiguous().view(x.size(0), -1, y.size(-1)) # (samples, timesteps, output_size)
else:
y = y.view(-1, x.size(1), y.size(-1)) # (timesteps, samples, output_size)
return y
class FusionNetwork(nn.Module):
def __init__(self, embedding_dim, hidden_dim, use_img, use_text, dropout=0.2):
super(FusionNetwork, self).__init__()
self.img_pool = nn.AdaptiveAvgPool2d((1,1))
self.img_linear = nn.Linear(2048, embedding_dim)
self.use_img = use_img
self.use_text = use_text
input_dim = embedding_dim + (embedding_dim*use_img) + (embedding_dim*use_text)
self.feature_fusion = nn.Sequential(
nn.BatchNorm1d(input_dim),
nn.Linear(input_dim, input_dim, bias=False),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(input_dim, hidden_dim)
)
def forward(self, img_encoding, text_encoding, dummy_encoding):
# Fuse static features together
pooled_img = self.img_pool(img_encoding)
condensed_img = self.img_linear(pooled_img.flatten(1))
# Build input
decoder_inputs = []
if self.use_img == 1:
decoder_inputs.append(condensed_img)
if self.use_text == 1:
decoder_inputs.append(text_encoding)
decoder_inputs.append(dummy_encoding)
concat_features = torch.cat(decoder_inputs, dim=1)
final = self.feature_fusion(concat_features)
# final = self.feature_fusion(dummy_encoding)
return final
class GTrendEmbedder(nn.Module):
def __init__(self, forecast_horizon, embedding_dim, use_mask, trend_len, num_trends, gpu_num):
super().__init__()
self.forecast_horizon = forecast_horizon
self.input_linear = TimeDistributed(nn.Linear(num_trends, embedding_dim))
self.pos_embedding = PositionalEncoding(embedding_dim, max_len=trend_len)
encoder_layer = nn.TransformerEncoderLayer(d_model=embedding_dim, nhead=4, dropout=0.2)
self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=2)
self.use_mask = use_mask
self.gpu_num = gpu_num
def _generate_encoder_mask(self, size, forecast_horizon):
mask = torch.zeros((size, size))
split = math.gcd(size, forecast_horizon)
for i in range(0, size, split):
mask[i:i+split, i:i+split] = 1
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)).to('cuda:'+str(self.gpu_num))
return mask
def _generate_square_subsequent_mask(self, size):
mask = (torch.triu(torch.ones(size, size)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)).to('cuda:'+str(self.gpu_num))
return mask
def forward(self, gtrends):
gtrend_emb = self.input_linear(gtrends.permute(0,2,1))
gtrend_emb = self.pos_embedding(gtrend_emb.permute(1,0,2))
input_mask = self._generate_encoder_mask(gtrend_emb.shape[0], self.forecast_horizon)
if self.use_mask == 1:
gtrend_emb = self.encoder(gtrend_emb, input_mask)
else:
gtrend_emb = self.encoder(gtrend_emb)
return gtrend_emb
class TextEmbedder(nn.Module):
def __init__(self, embedding_dim, cat_dict, col_dict, fab_dict, gpu_num):
super().__init__()
self.embedding_dim = embedding_dim
self.cat_dict = {v: k for k, v in cat_dict.items()}
self.col_dict = {v: k for k, v in col_dict.items()}
self.fab_dict = {v: k for k, v in fab_dict.items()}
self.word_embedder = pipeline('feature-extraction', model='bert-base-uncased')
self.fc = nn.Linear(768, embedding_dim)
self.dropout = nn.Dropout(0.1)
self.gpu_num = gpu_num
def forward(self, category, color, fabric):
textual_description = [self.col_dict[color.detach().cpu().numpy().tolist()[i]] + ' ' \
+ self.fab_dict[fabric.detach().cpu().numpy().tolist()[i]] + ' ' \
+ self.cat_dict[category.detach().cpu().numpy().tolist()[i]] for i in range(len(category))]
# Use BERT to extract features
word_embeddings = self.word_embedder(textual_description)
# BERT gives us embeddings for [CLS] .. [EOS], which is why we only average the embeddings in the range [1:-1]
# We're not fine tuning BERT and we don't want the noise coming from [CLS] or [EOS]
word_embeddings = [torch.FloatTensor(x[0][1:-1]).mean(axis=0) for x in word_embeddings]
word_embeddings = torch.stack(word_embeddings).to('cuda:'+str(self.gpu_num))
# Embed to our embedding space
word_embeddings = self.dropout(self.fc(word_embeddings))
return word_embeddings
class ImageEmbedder(nn.Module):
def __init__(self):
super().__init__()
# Img feature extraction
resnet = models.resnet50(pretrained=True)
modules = list(resnet.children())[:-2]
self.resnet = nn.Sequential(*modules)
for p in self.resnet.parameters():
p.requires_grad = False
# Fine tune resnet
# for c in list(self.resnet.children())[6:]:
# for p in c.parameters():
# p.requires_grad = True
def forward(self, images):
img_embeddings = self.resnet(images)
size = img_embeddings.size()
out = img_embeddings.view(*size[:2],-1)
return out.view(*size).contiguous() # batch_size, 2048, image_size/32, image_size/32
class DummyEmbedder(nn.Module):
def __init__(self, embedding_dim):
super().__init__()
self.embedding_dim = embedding_dim
self.day_embedding = nn.Linear(1, embedding_dim)
self.week_embedding = nn.Linear(1, embedding_dim)
self.month_embedding = nn.Linear(1, embedding_dim)
self.year_embedding = nn.Linear(1, embedding_dim)
self.dummy_fusion = nn.Linear(embedding_dim*4, embedding_dim)
self.dropout = nn.Dropout(0.2)
def forward(self, temporal_features):
# Temporal dummy variables (day, week, month, year)
d, w, m, y = temporal_features[:, 0].unsqueeze(1), temporal_features[:, 1].unsqueeze(1), \
temporal_features[:, 2].unsqueeze(1), temporal_features[:, 3].unsqueeze(1)
d_emb, w_emb, m_emb, y_emb = self.day_embedding(d), self.week_embedding(w), self.month_embedding(m), self.year_embedding(y)
temporal_embeddings = self.dummy_fusion(torch.cat([d_emb, w_emb, m_emb, y_emb], dim=1))
temporal_embeddings = self.dropout(temporal_embeddings)
return temporal_embeddings
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu"):
super(TransformerDecoderLayer, self).__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = F.relu
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerDecoderLayer, self).__setstate__(state)
def forward(self, tgt, memory, tgt_mask = None, memory_mask = None, tgt_key_padding_mask = None,
memory_key_padding_mask = None):
tgt2, attn_weights = self.multihead_attn(tgt, memory, memory)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt, attn_weights
class GTM(pl.LightningModule):
def __init__(self, embedding_dim, hidden_dim, output_dim, num_heads, num_layers, use_text, use_img, \
cat_dict, col_dict, fab_dict, trend_len, num_trends, gpu_num, use_encoder_mask=1, autoregressive=False):
super().__init__()
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.output_len = output_dim
self.use_encoder_mask = use_encoder_mask
self.autoregressive = autoregressive
self.gpu_num = gpu_num
self.save_hyperparameters()
# Encoder
self.dummy_encoder = DummyEmbedder(embedding_dim)
self.image_encoder = ImageEmbedder()
self.text_encoder = TextEmbedder(embedding_dim, cat_dict, col_dict, fab_dict, gpu_num)
self.gtrend_encoder = GTrendEmbedder(output_dim, hidden_dim, use_encoder_mask, trend_len, num_trends, gpu_num)
self.static_feature_encoder = FusionNetwork(embedding_dim, hidden_dim, use_img, use_text)
# Decoder
self.decoder_linear = TimeDistributed(nn.Linear(1, hidden_dim))
decoder_layer = TransformerDecoderLayer(d_model=self.hidden_dim, nhead=num_heads, \
dim_feedforward=self.hidden_dim * 4, dropout=0.1)
if self.autoregressive: self.pos_encoder = PositionalEncoding(hidden_dim, max_len=12)
self.decoder = nn.TransformerDecoder(decoder_layer, num_layers)
self.decoder_fc = nn.Sequential(
nn.Linear(hidden_dim, self.output_len if not self.autoregressive else 1),
nn.Dropout(0.2)
)
def _generate_square_subsequent_mask(self, size):
mask = (torch.triu(torch.ones(size, size)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)).to('cuda:'+str(self.gpu_num))
return mask
def forward(self, category, color, fabric, temporal_features, gtrends, images):
# Encode features and get inputs
img_encoding = self.image_encoder(images)
dummy_encoding = self.dummy_encoder(temporal_features)
text_encoding = self.text_encoder(category, color, fabric)
gtrend_encoding = self.gtrend_encoder(gtrends)
# Fuse static features together
static_feature_fusion = self.static_feature_encoder(img_encoding, text_encoding, dummy_encoding)
if self.autoregressive == 1:
# Decode
tgt = torch.zeros(self.output_len, gtrend_encoding.shape[1], gtrend_encoding.shape[-1]).to('cuda:'+str(self.gpu_num))
tgt[0] = static_feature_fusion
tgt = self.pos_encoder(tgt)
tgt_mask = self._generate_square_subsequent_mask(self.output_len)
memory = gtrend_encoding
decoder_out, attn_weights = self.decoder(tgt, memory, tgt_mask)
forecast = self.decoder_fc(decoder_out)
else:
# Decode (generatively/non-autoregressively)
tgt = static_feature_fusion.unsqueeze(0)
memory = gtrend_encoding
decoder_out, attn_weights = self.decoder(tgt, memory)
forecast = self.decoder_fc(decoder_out)
return forecast.view(-1, self.output_len), attn_weights
def configure_optimizers(self):
optimizer = Adafactor(self.parameters(),scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
return [optimizer]
def training_step(self, train_batch, batch_idx):
item_sales, category, color, fabric, temporal_features, gtrends, images = train_batch
forecasted_sales, _ = self.forward(category, color, fabric, temporal_features, gtrends, images)
loss = F.mse_loss(item_sales, forecasted_sales.squeeze())
self.log('train_loss', loss)
return loss
def validation_step(self, test_batch, batch_idx):
item_sales, category, color, fabric, temporal_features, gtrends, images = test_batch
forecasted_sales, _ = self.forward(category, color, fabric, temporal_features, gtrends, images)
return item_sales.squeeze(), forecasted_sales.squeeze()
def validation_epoch_end(self, val_step_outputs):
item_sales, forecasted_sales = [x[0] for x in val_step_outputs], [x[1] for x in val_step_outputs]
item_sales, forecasted_sales = torch.stack(item_sales), torch.stack(forecasted_sales)
rescaled_item_sales, rescaled_forecasted_sales = item_sales*1065, forecasted_sales*1065 # 1065 is the normalization factor (max of the sales of the training set)
loss = F.mse_loss(item_sales, forecasted_sales.squeeze())
mae = F.l1_loss(rescaled_item_sales, rescaled_forecasted_sales)
self.log('val_mae', mae)
self.log('val_loss', loss)
print('Validation MAE:', mae.detach().cpu().numpy(), 'LR:', self.optimizers().param_groups[0]['lr'])
| [
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.ones",
"torch.nn.MultiheadAttention",
"torch.nn.LayerNorm",
"torch.FloatTensor",
"torch.zeros",
"torch.cos",
"torch.nn.Sequential",
"torch.nn.functional.l1_loss",
"torch.nn.ReLU",
"torch.nn.TransformerDecoder",
"torch.nn.TransformerEncoder",
"torch.nn.Dropout",
"torch.sin",
"torch.arange",
"torch.nn.BatchNorm1d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.TransformerEncoderLayer"
] | 1.8.2 | HumaticsLAB/GTM-Transformer | 94124d3246c7c22d8b952beeda53639a9ad170e3 |
1.10 | from typing import Callable, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from sklearn.cluster import KMeans
from torch.utils.data.dataloader import DataLoader, default_collate
from tqdm import tqdm
from ptdec.utils import cluster_accuracy, target_distribution
def train(
dataset: torch.utils.data.Dataset,
model: torch.nn.Module,
epochs: int,
batch_size: int,
optimizer: torch.optim.Optimizer,
stopping_delta: Optional[float] = None,
collate_fn=default_collate,
cuda: bool = True,
sampler: Optional[torch.utils.data.sampler.Sampler] = None,
silent: bool = False,
update_freq: int = 10,
evaluate_batch_size: int = 1024,
update_callback: Optional[Callable[..., None]] = None,
epoch_callback: Optional[Callable[[int, torch.nn.Module], None]] = None,
) -> None:
"""
Train the DEC model given a dataset, a model instance and various configuration parameters.
:param dataset: instance of Dataset to use for training
:param model: instance of DEC model to train
:param epochs: number of training epochs
:param batch_size: size of the batch to train with
:param optimizer: instance of optimizer to use
:param stopping_delta: label delta as a proportion to use for stopping, None to disable, default None
:param collate_fn: function to merge a list of samples into mini-batch
:param cuda: whether to use CUDA, defaults to True
:param sampler: optional sampler to use in the DataLoader, defaults to None
:param silent: set to True to prevent printing out summary statistics, defaults to False
:param update_freq: frequency of batches with which to update counter, None disables, default 10
:param evaluate_batch_size: batch size for evaluation stage, default 1024
:param update_callback: optional function of accuracy and loss to update, default None
:param epoch_callback: optional function of epoch and model, default None
:return: None
"""
static_dataloader = DataLoader(
dataset,
batch_size=batch_size,
collate_fn=collate_fn,
pin_memory=False,
sampler=sampler,
shuffle=False,
)
train_dataloader = DataLoader(
dataset,
batch_size=batch_size,
collate_fn=collate_fn,
sampler=sampler,
shuffle=True,
)
data_iterator = tqdm(
static_dataloader,
leave=True,
unit="batch",
postfix={
"epo": -1,
"acc": "%.4f" % 0.0,
"lss": "%.8f" % 0.0,
"dlb": "%.4f" % -1,
},
disable=silent,
)
kmeans = KMeans(n_clusters=model.cluster_number, n_init=20)
model.train()
features = []
actual = []
# form initial cluster centres
for index, batch in enumerate(data_iterator):
if (isinstance(batch, tuple) or isinstance(batch, list)) and len(batch) == 2:
batch, value = batch # if we have a prediction label, separate it to actual
actual.append(value)
if cuda:
batch = batch.cuda(non_blocking=True)
features.append(model.encoder(batch).detach().cpu())
actual = torch.cat(actual).long()
predicted = kmeans.fit_predict(torch.cat(features).numpy())
predicted_previous = torch.tensor(np.copy(predicted), dtype=torch.long)
_, accuracy = cluster_accuracy(predicted, actual.cpu().numpy())
cluster_centers = torch.tensor(
kmeans.cluster_centers_, dtype=torch.float, requires_grad=True
)
if cuda:
cluster_centers = cluster_centers.cuda(non_blocking=True)
with torch.no_grad():
# initialise the cluster centers
model.state_dict()["assignment.cluster_centers"].copy_(cluster_centers)
loss_function = nn.KLDivLoss(size_average=False)
delta_label = None
for epoch in range(epochs):
features = []
data_iterator = tqdm(
train_dataloader,
leave=True,
unit="batch",
postfix={
"epo": epoch,
"acc": "%.4f" % (accuracy or 0.0),
"lss": "%.8f" % 0.0,
"dlb": "%.4f" % (delta_label or 0.0),
},
disable=silent,
)
model.train()
for index, batch in enumerate(data_iterator):
if (isinstance(batch, tuple) or isinstance(batch, list)) and len(
batch
) == 2:
batch, _ = batch # if we have a prediction label, strip it away
if cuda:
batch = batch.cuda(non_blocking=True)
output = model(batch)
target = target_distribution(output).detach()
loss = loss_function(output.log(), target) / output.shape[0]
data_iterator.set_postfix(
epo=epoch,
acc="%.4f" % (accuracy or 0.0),
lss="%.8f" % float(loss.item()),
dlb="%.4f" % (delta_label or 0.0),
)
optimizer.zero_grad()
loss.backward()
optimizer.step(closure=None)
features.append(model.encoder(batch).detach().cpu())
if update_freq is not None and index % update_freq == 0:
loss_value = float(loss.item())
data_iterator.set_postfix(
epo=epoch,
acc="%.4f" % (accuracy or 0.0),
lss="%.8f" % loss_value,
dlb="%.4f" % (delta_label or 0.0),
)
if update_callback is not None:
update_callback(accuracy, loss_value, delta_label)
predicted, actual = predict(
dataset,
model,
batch_size=evaluate_batch_size,
collate_fn=collate_fn,
silent=True,
return_actual=True,
cuda=cuda,
)
delta_label = (
float((predicted != predicted_previous).float().sum().item())
/ predicted_previous.shape[0]
)
if stopping_delta is not None and delta_label < stopping_delta:
print(
'Early stopping as label delta "%1.5f" less than "%1.5f".'
% (delta_label, stopping_delta)
)
break
predicted_previous = predicted
_, accuracy = cluster_accuracy(predicted.cpu().numpy(), actual.cpu().numpy())
data_iterator.set_postfix(
epo=epoch,
acc="%.4f" % (accuracy or 0.0),
lss="%.8f" % 0.0,
dlb="%.4f" % (delta_label or 0.0),
)
if epoch_callback is not None:
epoch_callback(epoch, model)
def predict(
dataset: torch.utils.data.Dataset,
model: torch.nn.Module,
batch_size: int = 1024,
collate_fn=default_collate,
cuda: bool = True,
silent: bool = False,
return_actual: bool = False,
) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
"""
Predict clusters for a dataset given a DEC model instance and various configuration parameters.
:param dataset: instance of Dataset to use for training
:param model: instance of DEC model to predict
:param batch_size: size of the batch to predict with, default 1024
:param collate_fn: function to merge a list of samples into mini-batch
:param cuda: whether CUDA is used, defaults to True
:param silent: set to True to prevent printing out summary statistics, defaults to False
:param return_actual: return actual values, if present in the Dataset
:return: tuple of prediction and actual if return_actual is True otherwise prediction
"""
dataloader = DataLoader(
dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=False
)
data_iterator = tqdm(
dataloader,
leave=True,
unit="batch",
disable=silent,
)
features = []
actual = []
model.eval()
for batch in data_iterator:
if (isinstance(batch, tuple) or isinstance(batch, list)) and len(batch) == 2:
batch, value = batch # unpack if we have a prediction label
if return_actual:
actual.append(value)
elif return_actual:
raise ValueError(
"Dataset has no actual value to unpack, but return_actual is set."
)
if cuda:
batch = batch.cuda(non_blocking=True)
features.append(
model(batch).detach().cpu()
) # move to the CPU to prevent out of memory on the GPU
if return_actual:
return torch.cat(features).max(1)[1], torch.cat(actual).long()
else:
return torch.cat(features).max(1)[1]
| [
"torch.cat",
"torch.utils.data.dataloader.DataLoader",
"torch.no_grad",
"torch.tensor",
"torch.nn.KLDivLoss"
] | 1.10.1 | wingkitlee0/pt-dec | 087b6231ea52422d827bf446b2ecf755ae9a6679 |
1.0 | import argparse
import itertools
import os
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from six import add_metaclass
from torch.nn import functional
from torchvision.utils import save_image
import pyro
from pyro.contrib.examples import util
from pyro.distributions import Bernoulli, Normal
from pyro.infer import SVI, JitTrace_ELBO, Trace_ELBO
from pyro.optim import Adam
from utils.mnist_cached import DATA_DIR, RESULTS_DIR
"""
Comparison of VAE implementation in PyTorch and Pyro. This example can be
used for profiling purposes.
The PyTorch VAE example is taken (with minor modification) from pytorch/examples.
Source: https://github.com/pytorch/examples/tree/master/vae
"""
TRAIN = 'train'
TEST = 'test'
OUTPUT_DIR = RESULTS_DIR
# VAE encoder network
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.relu = nn.ReLU()
def forward(self, x):
x = x.reshape(-1, 784)
h1 = self.relu(self.fc1(x))
return self.fc21(h1), torch.exp(self.fc22(h1))
# VAE Decoder network
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
self.relu = nn.ReLU()
def forward(self, z):
h3 = self.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
@add_metaclass(ABCMeta)
class VAE(object):
"""
Abstract class for the variational auto-encoder. The abstract method
for training the network is implemented by subclasses.
"""
def __init__(self, args, train_loader, test_loader):
self.args = args
self.vae_encoder = Encoder()
self.vae_decoder = Decoder()
self.train_loader = train_loader
self.test_loader = test_loader
self.mode = TRAIN
def set_train(self, is_train=True):
if is_train:
self.mode = TRAIN
self.vae_encoder.train()
self.vae_decoder.train()
else:
self.mode = TEST
self.vae_encoder.eval()
self.vae_decoder.eval()
@abstractmethod
def compute_loss_and_gradient(self, x):
"""
Given a batch of data `x`, run the optimizer (backpropagate the gradient),
and return the computed loss.
:param x: batch of data or a single datum (MNIST image).
:return: loss computed on the data batch.
"""
return
def model_eval(self, x):
"""
Given a batch of data `x`, run it through the trained VAE network to get
the reconstructed image.
:param x: batch of data or a single datum (MNIST image).
:return: reconstructed image, and the latent z's mean and variance.
"""
z_mean, z_var = self.vae_encoder(x)
if self.mode == TRAIN:
z = Normal(z_mean, z_var.sqrt()).sample()
else:
z = z_mean
return self.vae_decoder(z), z_mean, z_var
def train(self, epoch):
self.set_train(is_train=True)
train_loss = 0
for batch_idx, (x, _) in enumerate(self.train_loader):
loss = self.compute_loss_and_gradient(x)
train_loss += loss
print('====> Epoch: {} \nTraining loss: {:.4f}'.format(
epoch, train_loss / len(self.train_loader.dataset)))
def test(self, epoch):
self.set_train(is_train=False)
test_loss = 0
for i, (x, _) in enumerate(self.test_loader):
with torch.no_grad():
recon_x = self.model_eval(x)[0]
test_loss += self.compute_loss_and_gradient(x)
if i == 0:
n = min(x.size(0), 8)
comparison = torch.cat([x[:n],
recon_x.reshape(self.args.batch_size, 1, 28, 28)[:n]])
save_image(comparison.detach().cpu(),
os.path.join(OUTPUT_DIR, 'reconstruction_' + str(epoch) + '.png'),
nrow=n)
test_loss /= len(self.test_loader.dataset)
print('Test set loss: {:.4f}'.format(test_loss))
class PyTorchVAEImpl(VAE):
"""
Adapted from pytorch/examples.
Source: https://github.com/pytorch/examples/tree/master/vae
"""
def __init__(self, *args, **kwargs):
super(PyTorchVAEImpl, self).__init__(*args, **kwargs)
self.optimizer = self.initialize_optimizer(lr=1e-3)
def compute_loss_and_gradient(self, x):
self.optimizer.zero_grad()
recon_x, z_mean, z_var = self.model_eval(x)
binary_cross_entropy = functional.binary_cross_entropy(recon_x, x.reshape(-1, 784))
# Uses analytical KL divergence expression for D_kl(q(z|x) || p(z))
# Refer to Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# (https://arxiv.org/abs/1312.6114)
kl_div = -0.5 * torch.sum(1 + z_var.log() - z_mean.pow(2) - z_var)
kl_div /= self.args.batch_size * 784
loss = binary_cross_entropy + kl_div
if self.mode == TRAIN:
loss.backward()
self.optimizer.step()
return loss.item()
def initialize_optimizer(self, lr=1e-3):
model_params = itertools.chain(self.vae_encoder.parameters(), self.vae_decoder.parameters())
return torch.optim.Adam(model_params, lr)
class PyroVAEImpl(VAE):
"""
Implementation of VAE using Pyro. Only the model and the guide specification
is needed to run the optimizer (the objective function does not need to be
specified as in the PyTorch implementation).
"""
def __init__(self, *args, **kwargs):
super(PyroVAEImpl, self).__init__(*args, **kwargs)
self.optimizer = self.initialize_optimizer(lr=1e-3)
def model(self, data):
decoder = pyro.module('decoder', self.vae_decoder)
z_mean, z_std = torch.zeros([data.size(0), 20]), torch.ones([data.size(0), 20])
with pyro.plate('data', data.size(0)):
z = pyro.sample('latent', Normal(z_mean, z_std).to_event(1))
img = decoder.forward(z)
pyro.sample('obs',
Bernoulli(img).to_event(1),
obs=data.reshape(-1, 784))
def guide(self, data):
encoder = pyro.module('encoder', self.vae_encoder)
with pyro.plate('data', data.size(0)):
z_mean, z_var = encoder.forward(data)
pyro.sample('latent', Normal(z_mean, z_var.sqrt()).to_event(1))
def compute_loss_and_gradient(self, x):
if self.mode == TRAIN:
loss = self.optimizer.step(x)
else:
loss = self.optimizer.evaluate_loss(x)
loss /= self.args.batch_size * 784
return loss
def initialize_optimizer(self, lr):
optimizer = Adam({'lr': lr})
elbo = JitTrace_ELBO() if self.args.jit else Trace_ELBO()
return SVI(self.model, self.guide, optimizer, loss=elbo)
def setup(args):
pyro.set_rng_seed(args.rng_seed)
train_loader = util.get_data_loader(dataset_name='MNIST',
data_dir=DATA_DIR,
batch_size=args.batch_size,
is_training_set=True,
shuffle=True)
test_loader = util.get_data_loader(dataset_name='MNIST',
data_dir=DATA_DIR,
batch_size=args.batch_size,
is_training_set=False,
shuffle=True)
global OUTPUT_DIR
OUTPUT_DIR = os.path.join(RESULTS_DIR, args.impl)
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
pyro.clear_param_store()
return train_loader, test_loader
def main(args):
train_loader, test_loader = setup(args)
if args.impl == 'pyro':
vae = PyroVAEImpl(args, train_loader, test_loader)
print('Running Pyro VAE implementation')
elif args.impl == 'pytorch':
vae = PyTorchVAEImpl(args, train_loader, test_loader)
print('Running PyTorch VAE implementation')
else:
raise ValueError('Incorrect implementation specified: {}'.format(args.impl))
for i in range(args.num_epochs):
vae.train(i)
if not args.skip_eval:
vae.test(i)
if __name__ == '__main__':
assert pyro.__version__.startswith('0.3.0')
parser = argparse.ArgumentParser(description='VAE using MNIST dataset')
parser.add_argument('-n', '--num-epochs', nargs='?', default=10, type=int)
parser.add_argument('--batch_size', nargs='?', default=128, type=int)
parser.add_argument('--rng_seed', nargs='?', default=0, type=int)
parser.add_argument('--impl', nargs='?', default='pyro', type=str)
parser.add_argument('--skip_eval', action='store_true')
parser.add_argument('--jit', action='store_true')
parser.set_defaults(skip_eval=False)
args = parser.parse_args()
main(args)
| [
"torch.nn.Linear",
"torch.no_grad",
"torch.optim.Adam",
"torch.nn.ReLU"
] | 1.0.0 | hesenp/pyro | 0c49858ab8c5f263d1ece7f212180c8ccd8da370 |
1.4 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__version__ = "0.1.0"
import copy
import warnings
import crypten.common
import crypten.communicator as comm
import crypten.mpc # noqa: F401
import crypten.nn # noqa: F401
import torch
# other imports:
from . import debug
from .cryptensor import CrypTensor
# functions controlling autograd:
no_grad = CrypTensor.no_grad
enable_grad = CrypTensor.enable_grad
set_grad_enabled = CrypTensor.set_grad_enabled
def init(party_name=None, device=None):
"""
Initialize CrypTen. It will initialize communicator, setup party
name for file save / load, and setup seeds for Random Number Generatiion.
By default the function will initialize a set of RNG generators on CPU.
If torch.cuda.is_available() returns True, it will initialize an additional
set of RNG generators on GPU. Users can specify the GPU device the generators are
initialized with device.
Args:
party_name (str): party_name for file save and load, default is None
device (int, str, torch.device): Specify device for RNG generators on
GPU. Must be a GPU device.
"""
# Return and raise warning if initialized
if comm.is_initialized():
warnings.warn("CrypTen is already initialized.", RuntimeWarning)
return
# Initialize communicator
comm._init(use_threads=False, init_ttp=crypten.mpc.ttp_required())
# Setup party name for file save / load
if party_name is not None:
comm.get().set_name(party_name)
# Setup seeds for Random Number Generation
if comm.get().get_rank() < comm.get().get_world_size():
_setup_przs(device=device)
if crypten.mpc.ttp_required():
crypten.mpc.provider.ttp_provider.TTPClient._init()
def init_thread(rank, world_size):
comm._init(use_threads=True, rank=rank, world_size=world_size)
_setup_przs()
def uninit():
return comm.uninit()
def is_initialized():
return comm.is_initialized()
def print_communication_stats():
comm.get().print_communication_stats()
def reset_communication_stats():
comm.get().reset_communication_stats()
# set tensor type to be used for CrypTensors:
__CRYPTENSOR_TYPES__ = {"mpc": crypten.mpc.MPCTensor}
__DEFAULT_CRYPTENSOR_TYPE__ = "mpc"
def register_cryptensor(name):
"""Registers a custom :class:`CrypTensor` subclass.
This decorator allows the user to instantiate a subclass of `CrypTensor`
from Python cpde, even if the class itself is not part of CrypTen. To use
it, apply this decorator to a `CrypTensor` subclass, like this:
.. code-block:: python
@crypten.register_cryptensor('my_cryptensor')
class MyCrypTensor(crypten.CrypTensor):
...
"""
def register_cryptensor_cls(cls):
if name in __CRYPTENSOR_TYPES__:
raise ValueError(
"Cannot register duplicate CrypTensor type: \
tensor type {} already exists.".format(
name
)
)
if not issubclass(cls, CrypTensor):
raise ValueError(
"Registered tensor ({}: {}) must extend \
CrypTensor".format(
name, cls.__name__
)
)
__CRYPTENSOR_TYPES__[name] = cls
return cls
return register_cryptensor_cls
def set_default_cryptensor_type(cryptensor_type):
"""Sets the default type used to create `CrypTensor`s."""
global __DEFAULT_CRYPTENSOR_TYPE__
if cryptensor_type not in __CRYPTENSOR_TYPES__:
raise ValueError("CrypTensor type %s does not exist." % cryptensor_type)
__DEFAULT_CRYPTENSOR_TYPE__ = cryptensor_type
def get_default_cryptensor_type():
"""Gets the default type used to create `CrypTensor`s."""
return __DEFAULT_CRYPTENSOR_TYPE__
def get_cryptensor_type(tensor):
"""Gets the type name of the specified `tensor` `CrypTensor`."""
if not isinstance(tensor, CrypTensor):
raise ValueError(
"Specified tensor is not a CrypTensor: {}".format(type(tensor))
)
for name, cls in __CRYPTENSOR_TYPES__.items():
if isinstance(tensor, cls):
return name
raise ValueError("Unregistered CrypTensor type: {}".format(type(tensor)))
def cryptensor(*args, cryptensor_type=None, **kwargs):
"""
Factory function to return encrypted tensor of given `cryptensor_type`. If no
`cryptensor_type` is specified, the default type is used.
"""
# determine CrypTensor type to use:
if cryptensor_type is None:
cryptensor_type = get_default_cryptensor_type()
if cryptensor_type not in __CRYPTENSOR_TYPES__:
raise ValueError("CrypTensor type %s does not exist." % cryptensor_type)
# create CrypTensor:
return __CRYPTENSOR_TYPES__[cryptensor_type](*args, **kwargs)
def is_encrypted_tensor(obj):
"""
Returns True if obj is an encrypted tensor.
"""
return isinstance(obj, CrypTensor)
def _setup_przs(device=None):
"""
Generate shared random seeds to generate pseudo-random sharings of
zero. The random seeds are shared such that each process shares
one seed with the previous rank process and one with the next rank.
This allows for the generation of `n` random values, each known to
exactly two of the `n` parties.
For arithmetic sharing, one of these parties will add the number
while the other subtracts it, allowing for the generation of a
pseudo-random sharing of zero. (This can be done for binary
sharing using bitwise-xor rather than addition / subtraction)
"""
# Initialize RNG Generators
comm.get().g0 = torch.Generator()
comm.get().g1 = torch.Generator()
device = "cuda" if device is None else device
device = torch.device(device)
assert device.type == "cuda", "Must be a GPU device"
if torch.cuda.is_available():
comm.get().g0_cuda = torch.Generator(device=device)
comm.get().g1_cuda = torch.Generator(device=device)
# Generate random seeds for Generators
# NOTE: Chosen seed can be any number, but we choose as a random 64-bit
# integer here so other parties cannot guess its value.
# We sometimes get here from a forked process, which causes all parties
# to have the same RNG state. Reset the seed to make sure RNG streams
# are different in all the parties. We use numpy's random here since
# setting its seed to None will produce different seeds even from
# forked processes.
import numpy
numpy.random.seed(seed=None)
next_seed = torch.tensor(numpy.random.randint(-(2 ** 63), 2 ** 63 - 1, (1,)))
prev_seed = torch.LongTensor([0]) # placeholder
# Send random seed to next party, receive random seed from prev party
world_size = comm.get().get_world_size()
rank = comm.get().get_rank()
if world_size >= 2: # Otherwise sending seeds will segfault.
next_rank = (rank + 1) % world_size
prev_rank = (next_rank - 2) % world_size
req0 = comm.get().isend(tensor=next_seed, dst=next_rank)
req1 = comm.get().irecv(tensor=prev_seed, src=prev_rank)
req0.wait()
req1.wait()
else:
prev_seed = next_seed
# Seed Generators
comm.get().g0.manual_seed(next_seed.item())
comm.get().g1.manual_seed(prev_seed.item())
# Create global generator
global_seed = torch.tensor(numpy.random.randint(-(2 ** 63), 2 ** 63 - 1, (1,)))
global_seed = comm.get().broadcast(global_seed, 0)
comm.get().global_generator = torch.Generator()
comm.get().global_generator.manual_seed(global_seed.item())
def load_from_party(
f=None,
preloaded=None,
encrypted=False,
dummy_model=None,
src=0,
load_closure=torch.load,
**kwargs
):
"""
Loads an object saved with `torch.save()` or `crypten.save_from_party()`.
Args:
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
preloaded: Use the preloaded value instead of loading a tensor/model from f.
encrypted: Determines whether crypten should load an encrypted tensor
or a plaintext torch tensor.
dummy_model: Takes a model architecture to fill with the loaded model
(on the `src` party only). Non-source parties will return the
`dummy_model` input (with data unchanged). Loading a model will
assert the correctness of the model architecture provided against
the model loaded. This argument is ignored if the file loaded is
a tensor. (deprecated)
src: Determines the source of the tensor. If `src` is None, each
party will attempt to read in the specified file. If `src` is
specified, the source party will read the tensor from `f` and it
will broadcast it to the other parties
load_closure: Custom load function that matches the interface of `torch.load`,
to be used when the tensor is saved with a custom save function in
`crypten.save_from_party`. Additional kwargs are passed on to the closure.
"""
if dummy_model is not None:
warnings.warn(
"dummy_model is deprecated and no longer required", DeprecationWarning
)
if encrypted:
raise NotImplementedError("Loading encrypted tensors is not yet supported")
else:
assert isinstance(src, int), "Load failed: src argument must be an integer"
assert (
src >= 0 and src < comm.get().get_world_size()
), "Load failed: src must be in [0, world_size)"
# source party
if comm.get().get_rank() == src:
assert (f is None and (preloaded is not None)) or (
(f is not None) and preloaded is None
), "Exactly one of f and preloaded must not be None"
if f is None:
result = preloaded
if preloaded is None:
result = load_closure(f, **kwargs)
# Zero out the tensors / modules to hide loaded data from broadcast
if torch.is_tensor(result):
result_zeros = result.new_zeros(result.size())
elif isinstance(result, torch.nn.Module):
result_zeros = copy.deepcopy(result)
result_zeros.set_all_parameters(0)
else:
result = comm.get().broadcast_obj(-1, src)
raise TypeError("Unrecognized load type %s" % type(result))
comm.get().broadcast_obj(result_zeros, src)
# Non-source party
else:
result = comm.get().broadcast_obj(None, src)
if isinstance(result, int) and result == -1:
raise TypeError("Unrecognized load type from src party")
if torch.is_tensor(result):
result = crypten.cryptensor(result, src=src)
# TODO: Encrypt modules before returning them
# elif isinstance(result, torch.nn.Module):
# result = crypten.nn.from_pytorch(result, src=src)
result.src = src
return result
def load(
f,
preloaded=None,
encrypted=False,
dummy_model=None,
src=0,
load_closure=torch.load,
**kwargs
):
"""
Loads an object saved with `torch.save()` or `crypten.save_from_party()`.
Note: this function is deprecated; please use load_from_party instead.
"""
warnings.warn(
"The current 'load' function is deprecated, and will be removed soon. "
"To continue using current 'load' functionality, please use the "
"'load_from_party' function instead.",
DeprecationWarning,
)
return load_from_party(
f, preloaded, encrypted, dummy_model, src, load_closure, **kwargs
)
def save_from_party(obj, f, src=0, save_closure=torch.save, **kwargs):
"""
Saves a CrypTensor or PyTorch tensor to a file.
Args:
obj: The CrypTensor or PyTorch tensor to be saved
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
src: The source party that writes data to the specified file.
save_closure: Custom save function that matches the interface of `torch.save`,
to be used when the tensor is saved with a custom load function in
`crypten.load_from_party`. Additional kwargs are passed on to the closure.
"""
if is_encrypted_tensor(obj):
raise NotImplementedError("Saving encrypted tensors is not yet supported")
else:
assert isinstance(src, int), "Save failed: src must be an integer"
assert (
src >= 0 and src < comm.get().get_world_size()
), "Save failed: src must be an integer in [0, world_size)"
if comm.get().get_rank() == src:
save_closure(obj, f, **kwargs)
# Implement barrier to avoid race conditions that require file to exist
comm.get().barrier()
def save(obj, f, src=0, save_closure=torch.save, **kwargs):
"""
Saves a CrypTensor or PyTorch tensor to a file.
Note: this function is deprecated, please use save_from_party instead
"""
warnings.warn(
"The current 'save' function is deprecated, and will be removed soon. "
"To continue using current 'save' functionality, please use the "
"'save_from_party' function instead.",
DeprecationWarning,
)
save_from_party(obj, f, src, save_closure, **kwargs)
def where(condition, input, other):
"""
Return a tensor of elements selected from either `input` or `other`, depending
on `condition`.
"""
if is_encrypted_tensor(condition):
return condition * input + (1 - condition) * other
elif torch.is_tensor(condition):
condition = condition.float()
return input * condition + other * (1 - condition)
def cat(tensors, dim=0):
"""
Concatenates the specified CrypTen `tensors` along dimension `dim`.
"""
assert isinstance(tensors, list), "input to cat must be a list"
assert all(isinstance(t, CrypTensor) for t in tensors), "inputs must be CrypTensors"
tensor_types = [get_cryptensor_type(t) for t in tensors]
assert all(
ttype == tensor_types[0] for ttype in tensor_types
), "cannot concatenate CrypTensors with different underlying types"
if len(tensors) == 1:
return tensors[0]
return type(tensors[0]).cat(tensors, dim=dim)
def stack(tensors, dim=0):
"""
Stacks the specified CrypTen `tensors` along dimension `dim`. In contrast to
`crypten.cat`, this adds a dimension to the result tensor.
"""
assert isinstance(tensors, list), "input to stack must be a list"
assert all(isinstance(t, CrypTensor) for t in tensors), "inputs must be CrypTensors"
tensor_types = [get_cryptensor_type(t) for t in tensors]
assert all(
ttype == tensor_types[0] for ttype in tensor_types
), "cannot stack CrypTensors with different underlying types"
if len(tensors) == 1:
return tensors[0].unsqueeze(dim)
return type(tensors[0]).stack(tensors, dim=dim)
def rand(*sizes, cryptensor_type=None):
"""
Returns a tensor with elements uniformly sampled in [0, 1).
"""
if cryptensor_type is None:
cryptensor_type = get_default_cryptensor_type()
return __CRYPTENSOR_TYPES__[cryptensor_type].rand(*sizes)
def bernoulli(tensor, cryptensor_type=None):
"""
Returns a tensor with elements in {0, 1}. The i-th element of the
output will be 1 with probability according to the i-th value of the
input tensor.
"""
return rand(tensor.size(), cryptensor_type=cryptensor_type) < tensor
# expose classes and functions in package:
__all__ = [
"CrypTensor",
"no_grad",
"enable_grad",
"set_grad_enabled",
"debug",
"init",
"init_thread",
"mpc",
"nn",
"uninit",
]
| [
"torch.device",
"torch.is_tensor",
"torch.Generator",
"torch.cuda.is_available",
"torch.LongTensor"
] | 1.4.0 | marksibrahim/CrypTen | 4e5b13487d7f6ceaa4f06e86f0b260e0761960fd |
1.4 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import unittest
import crypten
import torch
from crypten.common.util import chebyshev_series
from crypten.encoder import FixedPointEncoder, nearest_integer_division
def get_test_tensor(max_value=10, float=False):
"""Create simple test tensor."""
tensor = torch.LongTensor(list(range(max_value)))
if float:
tensor = tensor.float()
return tensor
class TestCommon(unittest.TestCase):
"""
Test cases for common functionality.
"""
def _check(self, tensor, reference, msg):
test_passed = (tensor == reference).all().item() == 1
self.assertTrue(test_passed, msg=msg)
def test_encode_decode(self):
"""Tests tensor encoding and decoding."""
for float in [False, True]:
if float:
fpe = FixedPointEncoder(precision_bits=16)
else:
fpe = FixedPointEncoder(precision_bits=0)
tensor = get_test_tensor(float=float)
decoded = fpe.decode(fpe.encode(tensor))
self._check(
decoded,
tensor,
"Encoding/decoding a %s failed." % "float" if float else "long",
)
# Make sure encoding a subclass of CrypTensor is a no-op
crypten.mpc.set_default_provider(crypten.mpc.provider.TrustedFirstParty)
crypten.init()
tensor = get_test_tensor(float=True)
encrypted_tensor = crypten.cryptensor(tensor)
encrypted_tensor = fpe.encode(encrypted_tensor)
self._check(
encrypted_tensor.get_plain_text(),
tensor,
"Encoding an EncryptedTensor failed.",
)
# Try a few other types.
fpe = FixedPointEncoder(precision_bits=0)
for dtype in [torch.uint8, torch.int8, torch.int16]:
tensor = torch.zeros(5, dtype=dtype).random_()
decoded = fpe.decode(fpe.encode(tensor)).type(dtype)
self._check(decoded, tensor, "Encoding/decoding a %s failed." % dtype)
def test_nearest_integer_division(self):
# test without scaling:
scale = 1
reference = [[-26, -25, -7, -5, -4, -1, 0, 1, 3, 4, 5, 7, 25, 26]]
tensor = torch.LongTensor(reference)
result = nearest_integer_division(tensor, scale)
self._check(
torch.LongTensor(result.tolist()),
torch.LongTensor(reference),
"Nearest integer division failed.",
)
# test with scaling:
scale = 4
reference = [[-6, -6, -2, -1, -1, 0, 0, 0, 1, 1, 1, 2, 6, 6]]
result = nearest_integer_division(tensor, scale)
self._check(
torch.LongTensor(result.tolist()),
torch.LongTensor(reference),
"Nearest integer division failed.",
)
def test_chebyshev_series(self):
"""Checks coefficients returned by chebyshev_series are correct"""
for width, terms in [(6, 10), (6, 20)]:
result = chebyshev_series(torch.tanh, width, terms)
# check shape
self.assertTrue(result.shape == torch.Size([terms]))
# check terms
self.assertTrue(result[0] < 1e-4)
self.assertTrue(torch.isclose(result[-1], torch.tensor(3.5e-2), atol=1e-1))
def test_config_managers(self):
"""Checks setting configuartion with config manager works"""
# Set the config directly
crypten.mpc.config.exp_iterations = 8
self.assertTrue(crypten.mpc.config.exp_iterations == 8)
# Set with a context manager
with crypten.mpc.ConfigManager("exp_iterations", 3):
self.assertTrue(crypten.mpc.config.exp_iterations == 3)
self.assertTrue(crypten.mpc.config.exp_iterations == 8)
crypten.mpc.set_config(crypten.mpc.MPCConfig(exp_iterations=5))
self.assertTrue(crypten.mpc.config.exp_iterations == 5)
self.assertTrue(crypten.mpc.mpc.config.exp_iterations == 5)
if __name__ == "__main__":
unittest.main(argv=sys.argv[0])
| [
"torch.zeros",
"torch.tensor",
"torch.LongTensor",
"torch.Size"
] | 1.4.0 | marksibrahim/CrypTen | 4e5b13487d7f6ceaa4f06e86f0b260e0761960fd |
1.2 | from typing import Tuple, Dict, Optional
from overrides import overrides
import torch
from torch.nn import LSTMCell
from allennlp.modules import Attention
from allennlp.modules.seq2seq_decoders.decoder_net import DecoderNet
from allennlp.nn import util
@DecoderNet.register("lstm_cell")
class LstmCellDecoderNet(DecoderNet):
"""
This decoder net implements simple decoding network with LSTMCell and Attention.
# Parameters
decoding_dim : ``int``, required
Defines dimensionality of output vectors.
target_embedding_dim : ``int``, required
Defines dimensionality of input target embeddings. Since this model takes it's output on a previous step
as input of following step, this is also an input dimensionality.
attention : ``Attention``, optional (default = None)
If you want to use attention to get a dynamic summary of the encoder outputs at each step
of decoding, this is the function used to compute similarity between the decoder hidden
state and encoder outputs.
"""
def __init__(
self,
decoding_dim: int,
target_embedding_dim: int,
attention: Optional[Attention] = None,
bidirectional_input: bool = False,
) -> None:
super().__init__(
decoding_dim=decoding_dim,
target_embedding_dim=target_embedding_dim,
decodes_parallel=False,
)
# In this particular type of decoder output of previous step passes directly to the input of current step
# We also assume that decoder output dimensionality is equal to the encoder output dimensionality
decoder_input_dim = self.target_embedding_dim
# Attention mechanism applied to the encoder output for each step.
self._attention = attention
if self._attention:
# If using attention, a weighted average over encoder outputs will be concatenated
# to the previous target embedding to form the input to the decoder at each
# time step. encoder output dim will be same as decoding_dim
decoder_input_dim += decoding_dim
# We'll use an LSTM cell as the recurrent cell that produces a hidden state
# for the decoder at each time step.
self._decoder_cell = LSTMCell(decoder_input_dim, self.decoding_dim)
self._bidirectional_input = bidirectional_input
def _prepare_attended_input(
self,
decoder_hidden_state: torch.Tensor = None,
encoder_outputs: torch.Tensor = None,
encoder_outputs_mask: torch.Tensor = None,
) -> torch.Tensor:
"""Apply attention over encoder outputs and decoder state."""
# Ensure mask is also a FloatTensor. Or else the multiplication within
# attention will complain.
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs_mask = encoder_outputs_mask.float()
# shape: (batch_size, max_input_sequence_length)
input_weights = self._attention(decoder_hidden_state, encoder_outputs, encoder_outputs_mask)
# shape: (batch_size, encoder_output_dim)
attended_input = util.weighted_sum(encoder_outputs, input_weights)
return attended_input
def init_decoder_state(
self, encoder_out: Dict[str, torch.LongTensor]
) -> Dict[str, torch.Tensor]:
batch_size, _ = encoder_out["source_mask"].size()
# Initialize the decoder hidden state with the final output of the encoder,
# and the decoder context with zeros.
# shape: (batch_size, encoder_output_dim)
final_encoder_output = util.get_final_encoder_states(
encoder_out["encoder_outputs"],
encoder_out["source_mask"],
bidirectional=self._bidirectional_input,
)
return {
"decoder_hidden": final_encoder_output, # shape: (batch_size, decoder_output_dim)
"decoder_context": final_encoder_output.new_zeros(batch_size, self.decoding_dim)
# shape: (batch_size, decoder_output_dim)
}
@overrides
def forward(
self,
previous_state: Dict[str, torch.Tensor],
encoder_outputs: torch.Tensor,
source_mask: torch.Tensor,
previous_steps_predictions: torch.Tensor,
previous_steps_mask: Optional[torch.Tensor] = None,
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
decoder_hidden = previous_state["decoder_hidden"]
decoder_context = previous_state["decoder_context"]
# shape: (group_size, output_dim)
last_predictions_embedding = previous_steps_predictions[:, -1]
if self._attention:
# shape: (group_size, encoder_output_dim)
attended_input = self._prepare_attended_input(
decoder_hidden, encoder_outputs, source_mask
)
# shape: (group_size, decoder_output_dim + target_embedding_dim)
decoder_input = torch.cat((attended_input, last_predictions_embedding), -1)
else:
# shape: (group_size, target_embedding_dim)
decoder_input = last_predictions_embedding
# shape (decoder_hidden): (batch_size, decoder_output_dim)
# shape (decoder_context): (batch_size, decoder_output_dim)
decoder_hidden, decoder_context = self._decoder_cell(
decoder_input, (decoder_hidden, decoder_context)
)
return (
{"decoder_hidden": decoder_hidden, "decoder_context": decoder_context},
decoder_hidden,
)
| [
"torch.cat",
"torch.nn.LSTMCell"
] | 1.2.0 | nadgeri14/allennlp | 2eefffaf71612263a1c20e8ce4107849cfd5efe3 |
1.2 | from typing import Dict, Optional, List, Any
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward
from allennlp.modules import Seq2SeqEncoder, SimilarityFunction, TimeDistributed, TextFieldEmbedder
from allennlp.modules.matrix_attention.legacy_matrix_attention import LegacyMatrixAttention
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, masked_softmax, weighted_sum
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("decomposable_attention")
class DecomposableAttention(Model):
"""
This ``Model`` implements the Decomposable Attention model described in [A Decomposable
Attention Model for Natural Language Inference](
https://www.semanticscholar.org/paper/A-Decomposable-Attention-Model-for-Natural-Languag-Parikh-T%C3%A4ckstr%C3%B6m/07a9478e87a8304fc3267fa16e83e9f3bbd98b27)
by Parikh et al., 2016, with some optional enhancements before the decomposable attention
actually happens. Parikh's original model allowed for computing an "intra-sentence" attention
before doing the decomposable entailment step. We generalize this to any
:class:`Seq2SeqEncoder` that can be applied to the premise and/or the hypothesis before
computing entailment.
The basic outline of this model is to get an embedded representation of each word in the
premise and hypothesis, align words between the two, compare the aligned phrases, and make a
final entailment decision based on this aggregated comparison. Each step in this process uses
a feedforward network to modify the representation.
# Parameters
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``premise`` and ``hypothesis`` ``TextFields`` we get as input to the
model.
attend_feedforward : ``FeedForward``
This feedforward network is applied to the encoded sentence representations before the
similarity matrix is computed between words in the premise and words in the hypothesis.
similarity_function : ``SimilarityFunction``
This is the similarity function used when computing the similarity matrix between words in
the premise and words in the hypothesis.
compare_feedforward : ``FeedForward``
This feedforward network is applied to the aligned premise and hypothesis representations,
individually.
aggregate_feedforward : ``FeedForward``
This final feedforward network is applied to the concatenated, summed result of the
``compare_feedforward`` network, and its output is used as the entailment class logits.
premise_encoder : ``Seq2SeqEncoder``, optional (default=``None``)
After embedding the premise, we can optionally apply an encoder. If this is ``None``, we
will do nothing.
hypothesis_encoder : ``Seq2SeqEncoder``, optional (default=``None``)
After embedding the hypothesis, we can optionally apply an encoder. If this is ``None``,
we will use the ``premise_encoder`` for the encoding (doing nothing if ``premise_encoder``
is also ``None``).
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
attend_feedforward: FeedForward,
similarity_function: SimilarityFunction,
compare_feedforward: FeedForward,
aggregate_feedforward: FeedForward,
premise_encoder: Optional[Seq2SeqEncoder] = None,
hypothesis_encoder: Optional[Seq2SeqEncoder] = None,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super().__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._attend_feedforward = TimeDistributed(attend_feedforward)
self._matrix_attention = LegacyMatrixAttention(similarity_function)
self._compare_feedforward = TimeDistributed(compare_feedforward)
self._aggregate_feedforward = aggregate_feedforward
self._premise_encoder = premise_encoder
self._hypothesis_encoder = hypothesis_encoder or premise_encoder
self._num_labels = vocab.get_vocab_size(namespace="labels")
check_dimensions_match(
text_field_embedder.get_output_dim(),
attend_feedforward.get_input_dim(),
"text field embedding dim",
"attend feedforward input dim",
)
check_dimensions_match(
aggregate_feedforward.get_output_dim(),
self._num_labels,
"final output dimension",
"number of labels",
)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward( # type: ignore
self,
premise: TextFieldTensors,
hypothesis: TextFieldTensors,
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
premise : TextFieldTensors
From a ``TextField``
hypothesis : TextFieldTensors
From a ``TextField``
label : torch.IntTensor, optional, (default = None)
From a ``LabelField``
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
Metadata containing the original tokenization of the premise and
hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.
# Returns
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log
probabilities of the entailment label.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the
entailment label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_premise = self._text_field_embedder(premise)
embedded_hypothesis = self._text_field_embedder(hypothesis)
premise_mask = get_text_field_mask(premise).float()
hypothesis_mask = get_text_field_mask(hypothesis).float()
if self._premise_encoder:
embedded_premise = self._premise_encoder(embedded_premise, premise_mask)
if self._hypothesis_encoder:
embedded_hypothesis = self._hypothesis_encoder(embedded_hypothesis, hypothesis_mask)
projected_premise = self._attend_feedforward(embedded_premise)
projected_hypothesis = self._attend_feedforward(embedded_hypothesis)
# Shape: (batch_size, premise_length, hypothesis_length)
similarity_matrix = self._matrix_attention(projected_premise, projected_hypothesis)
# Shape: (batch_size, premise_length, hypothesis_length)
p2h_attention = masked_softmax(similarity_matrix, hypothesis_mask)
# Shape: (batch_size, premise_length, embedding_dim)
attended_hypothesis = weighted_sum(embedded_hypothesis, p2h_attention)
# Shape: (batch_size, hypothesis_length, premise_length)
h2p_attention = masked_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask)
# Shape: (batch_size, hypothesis_length, embedding_dim)
attended_premise = weighted_sum(embedded_premise, h2p_attention)
premise_compare_input = torch.cat([embedded_premise, attended_hypothesis], dim=-1)
hypothesis_compare_input = torch.cat([embedded_hypothesis, attended_premise], dim=-1)
compared_premise = self._compare_feedforward(premise_compare_input)
compared_premise = compared_premise * premise_mask.unsqueeze(-1)
# Shape: (batch_size, compare_dim)
compared_premise = compared_premise.sum(dim=1)
compared_hypothesis = self._compare_feedforward(hypothesis_compare_input)
compared_hypothesis = compared_hypothesis * hypothesis_mask.unsqueeze(-1)
# Shape: (batch_size, compare_dim)
compared_hypothesis = compared_hypothesis.sum(dim=1)
aggregate_input = torch.cat([compared_premise, compared_hypothesis], dim=-1)
label_logits = self._aggregate_feedforward(aggregate_input)
label_probs = torch.nn.functional.softmax(label_logits, dim=-1)
output_dict = {
"label_logits": label_logits,
"label_probs": label_probs,
"h2p_attention": h2p_attention,
"p2h_attention": p2h_attention,
}
if label is not None:
loss = self._loss(label_logits, label.long().view(-1))
self._accuracy(label_logits, label)
output_dict["loss"] = loss
if metadata is not None:
output_dict["premise_tokens"] = [x["premise_tokens"] for x in metadata]
output_dict["hypothesis_tokens"] = [x["hypothesis_tokens"] for x in metadata]
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self._accuracy.get_metric(reset)}
| [
"torch.cat",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax"
] | 1.2.0 | nadgeri14/allennlp | 2eefffaf71612263a1c20e8ce4107849cfd5efe3 |
1.2 | import os
import sys
import argparse
import torch
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
from allennlp.common.tqdm import Tqdm
from allennlp.common import Params
from allennlp.models.archival import load_archive
from allennlp.data.iterators import BasicIterator
from allennlp.data import DatasetReader
from allennlp.models.semantic_role_labeler import write_to_conll_eval_file
from allennlp.nn.util import move_to_device
def main(serialization_directory: int, device: int, data: str, prefix: str, domain: str = None):
"""
serialization_directory : str, required.
The directory containing the serialized weights.
device: int, default = -1
The device to run the evaluation on.
data: str, default = None
The data to evaluate on. By default, we use the validation data from
the original experiment.
prefix: str, default=""
The prefix to prepend to the generated gold and prediction files, to distinguish
different models/data.
domain: str, optional (default = None)
If passed, filters the ontonotes evaluation/test dataset to only contain the
specified domain. This overwrites the domain in the config file from the model,
to allow evaluation on domains other than the one the model was trained on.
"""
config = Params.from_file(os.path.join(serialization_directory, "config.json"))
if domain is not None:
# Hack to allow evaluation on different domains than the
# model was trained on.
config["dataset_reader"]["domain_identifier"] = domain
prefix = f"{domain}_{prefix}"
else:
config["dataset_reader"].pop("domain_identifier", None)
dataset_reader = DatasetReader.from_params(config["dataset_reader"])
evaluation_data_path = data if data else config["validation_data_path"]
archive = load_archive(
os.path.join(serialization_directory, "model.tar.gz"), cuda_device=device
)
model = archive.model
model.eval()
prediction_file_path = os.path.join(serialization_directory, prefix + "_predictions.txt")
gold_file_path = os.path.join(serialization_directory, prefix + "_gold.txt")
prediction_file = open(prediction_file_path, "w+")
gold_file = open(gold_file_path, "w+")
# Load the evaluation data and index it.
print("reading evaluation data from {}".format(evaluation_data_path))
instances = dataset_reader.read(evaluation_data_path)
with torch.autograd.no_grad():
iterator = BasicIterator(batch_size=32)
iterator.index_with(model.vocab)
model_predictions = []
batches = iterator(instances, num_epochs=1, shuffle=False)
for batch in Tqdm.tqdm(batches):
batch = move_to_device(batch, device)
result = model(**batch)
predictions = model.decode(result)
model_predictions.extend(predictions["tags"])
for instance, prediction in zip(instances, model_predictions):
fields = instance.fields
verb_index = fields["metadata"]["verb_index"]
gold_tags = fields["metadata"]["gold_tags"]
sentence = fields["metadata"]["words"]
write_to_conll_eval_file(
prediction_file, gold_file, verb_index, sentence, prediction, gold_tags
)
prediction_file.close()
gold_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="write conll format srl predictions to file from a pretrained model."
)
parser.add_argument("--path", type=str, help="the serialization directory.")
parser.add_argument("--device", type=int, default=-1, help="the device to load the model onto.")
parser.add_argument(
"--data", type=str, default=None, help="A directory containing a dataset to evaluate on."
)
parser.add_argument(
"--prefix", type=str, default="", help="A prefix to distinguish model outputs."
)
parser.add_argument(
"--domain",
type=str,
default=None,
help="An optional domain to filter by for producing results.",
)
args = parser.parse_args()
main(args.path, args.device, args.data, args.prefix, args.domain)
| [
"torch.autograd.no_grad"
] | 1.2.0 | nadgeri14/allennlp | 2eefffaf71612263a1c20e8ce4107849cfd5efe3 |
1.2 | from typing import Dict, Optional, List, Set, Tuple, Union
import pytest
import torch
from allennlp.common import Params
from allennlp.common.from_params import FromParams, takes_arg, remove_optional, create_kwargs
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import DatasetReader, Tokenizer
from allennlp.models import Model
from allennlp.models.archival import load_archive
from allennlp.common.checks import ConfigurationError
class MyClass(FromParams):
def __init__(self, my_int: int, my_bool: bool = False) -> None:
self.my_int = my_int
self.my_bool = my_bool
class TestFromParams(AllenNlpTestCase):
def test_takes_arg(self):
def bare_function(some_input: int) -> int:
return some_input + 1
assert takes_arg(bare_function, "some_input")
assert not takes_arg(bare_function, "some_other_input")
class SomeClass:
total = 0
def __init__(self, constructor_param: str) -> None:
self.constructor_param = constructor_param
def check_param(self, check: str) -> bool:
return self.constructor_param == check
@classmethod
def set_total(cls, new_total: int) -> None:
cls.total = new_total
assert takes_arg(SomeClass, "self")
assert takes_arg(SomeClass, "constructor_param")
assert not takes_arg(SomeClass, "check")
assert takes_arg(SomeClass.check_param, "check")
assert not takes_arg(SomeClass.check_param, "other_check")
assert takes_arg(SomeClass.set_total, "new_total")
assert not takes_arg(SomeClass.set_total, "total")
def test_remove_optional(self):
optional_type = Optional[Dict[str, str]]
bare_type = remove_optional(optional_type) # type: ignore
bare_bare_type = remove_optional(bare_type)
assert bare_type == Dict[str, str]
assert bare_bare_type == Dict[str, str]
assert remove_optional(Optional[str]) == str
assert remove_optional(str) == str
def test_from_params(self):
my_class = MyClass.from_params(Params({"my_int": 10}), my_bool=True)
assert isinstance(my_class, MyClass)
assert my_class.my_int == 10
assert my_class.my_bool
def test_create_kwargs(self):
kwargs = create_kwargs(MyClass, MyClass, Params({"my_int": 5}), my_bool=True, my_float=4.4)
# my_float should not be included because it's not a param of the MyClass constructor
assert kwargs == {"my_int": 5, "my_bool": True}
def test_extras(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int, name: str) -> None:
self.size = size
self.name = name
@A.register("c")
class C(A):
def __init__(self, size: int, name: str) -> None:
self.size = size
self.name = name
# custom from params
@classmethod
def from_params(cls, params: Params, size: int, **extras) -> "C": # type: ignore
name = params.pop("name")
return cls(size=size, name=name)
# Check that extras get passed, even though A doesn't need them.
params = Params({"type": "b", "size": 10})
b = A.from_params(params, name="extra")
assert b.name == "extra"
assert b.size == 10
# Check that extra extras don't get passed.
params = Params({"type": "b", "size": 10})
b = A.from_params(params, name="extra", unwanted=True)
assert b.name == "extra"
assert b.size == 10
# Now the same with a custom from_params.
params = Params({"type": "c", "name": "extra_c"})
c = A.from_params(params, size=20)
assert c.name == "extra_c"
assert c.size == 20
# Check that extra extras don't get passed.
params = Params({"type": "c", "name": "extra_c"})
c = A.from_params(params, size=20, unwanted=True)
assert c.name == "extra_c"
assert c.size == 20
def test_extras_for_custom_classes(self):
from allennlp.common.registrable import Registrable
class BaseClass(Registrable):
pass
class BaseClass2(Registrable):
pass
@BaseClass.register("A")
class A(BaseClass):
def __init__(self, a: int, b: int, val: str) -> None:
self.a = a
self.b = b
self.val = val
def __hash__(self):
return self.b
def __eq__(self, other):
return self.b == other.b
@classmethod
def from_params(cls, params: Params, a: int, **extras) -> "A": # type: ignore
# A custom from params
b = params.pop_int("b")
val = params.pop("val", "C")
params.assert_empty(cls.__name__)
return cls(a=a, b=b, val=val)
@BaseClass2.register("B")
class B(BaseClass2):
def __init__(self, c: int, b: int) -> None:
self.c = c
self.b = b
@classmethod
def from_params(cls, params: Params, c: int, **extras) -> "B": # type: ignore
b = params.pop_int("b")
params.assert_empty(cls.__name__)
return cls(c=c, b=b)
@BaseClass.register("E")
class E(BaseClass):
def __init__(self, m: int, n: int) -> None:
self.m = m
self.n = n
@classmethod
def from_params(cls, params: Params, **extras2) -> "E": # type: ignore
m = params.pop_int("m")
params.assert_empty(cls.__name__)
n = extras2["n"]
return cls(m=m, n=n)
class C:
pass
@BaseClass.register("D")
class D(BaseClass):
def __init__(
self,
arg1: List[BaseClass],
arg2: Tuple[BaseClass, BaseClass2],
arg3: Dict[str, BaseClass],
arg4: Set[BaseClass],
arg5: List[BaseClass],
) -> None:
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.arg4 = arg4
self.arg5 = arg5
vals = [1, 2, 3]
params = Params(
{
"type": "D",
"arg1": [
{"type": "A", "b": vals[0]},
{"type": "A", "b": vals[1]},
{"type": "A", "b": vals[2]},
],
"arg2": [{"type": "A", "b": vals[0]}, {"type": "B", "b": vals[0]}],
"arg3": {
"class_1": {"type": "A", "b": vals[0]},
"class_2": {"type": "A", "b": vals[1]},
},
"arg4": [
{"type": "A", "b": vals[0], "val": "M"},
{"type": "A", "b": vals[1], "val": "N"},
{"type": "A", "b": vals[1], "val": "N"},
],
"arg5": [{"type": "E", "m": 9}],
}
)
extra = C()
tval1 = 5
tval2 = 6
d = BaseClass.from_params(params=params, extra=extra, a=tval1, c=tval2, n=10)
# Tests for List # Parameters
assert len(d.arg1) == len(vals)
assert isinstance(d.arg1, list)
assert isinstance(d.arg1[0], A)
assert all([x.b == y for x, y in zip(d.arg1, vals)])
assert all([x.a == tval1 for x in d.arg1])
# Tests for Tuple
assert isinstance(d.arg2, tuple)
assert isinstance(d.arg2[0], A)
assert isinstance(d.arg2[1], B)
assert d.arg2[0].a == tval1
assert d.arg2[1].c == tval2
assert d.arg2[0].b == d.arg2[1].b == vals[0]
# Tests for Dict
assert isinstance(d.arg3, dict)
assert isinstance(d.arg3["class_1"], A)
assert d.arg3["class_1"].a == d.arg3["class_2"].a == tval1
assert d.arg3["class_1"].b == vals[0]
assert d.arg3["class_2"].b == vals[1]
# Tests for Set
assert isinstance(d.arg4, set)
assert len(d.arg4) == 2
assert any(x.val == "M" for x in d.arg4)
assert any(x.val == "N" for x in d.arg4)
# Tests for custom extras parameters
assert isinstance(d.arg5, list)
assert isinstance(d.arg5[0], E)
assert d.arg5[0].m == 9
assert d.arg5[0].n == 10
def test_no_constructor(self):
params = Params({"type": "just_spaces"})
Tokenizer.from_params(params)
def test_union(self):
class A(FromParams):
def __init__(self, a: Union[int, List[int]]) -> None:
self.a = a
class B(FromParams):
def __init__(self, b: Union[A, List[A]]) -> None:
# Really you would want to be sure that `self.b` has a consistent type, but for
# this test we'll ignore that.
self.b = b
class C(FromParams):
def __init__(self, c: Union[A, B, Dict[str, A]]) -> None:
# Really you would want to be sure that `self.c` has a consistent type, but for
# this test we'll ignore that.
self.c = c
params = Params({"a": 3})
a = A.from_params(params)
assert a.a == 3
params = Params({"a": [3, 4, 5]})
a = A.from_params(params)
assert a.a == [3, 4, 5]
params = Params({"b": {"a": 3}})
b = B.from_params(params)
assert isinstance(b.b, A)
assert b.b.a == 3
params = Params({"b": [{"a": 3}, {"a": [4, 5]}]})
b = B.from_params(params)
assert isinstance(b.b, list)
assert b.b[0].a == 3
assert b.b[1].a == [4, 5]
# This is a contrived, ugly example (why would you want to duplicate names in a nested
# structure like this??), but it demonstrates a potential bug when dealing with mutatable
# parameters. If you're not careful about keeping the parameters un-mutated in two
# separate places, you'll end up with a B, or with a dict that's missing the 'b' key.
params = Params({"c": {"a": {"a": 3}, "b": {"a": [4, 5]}}})
c = C.from_params(params)
assert isinstance(c.c, dict)
assert c.c["a"].a == 3
assert c.c["b"].a == [4, 5]
def test_dict(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Dict[str, A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": {"first": {"type": "b", "size": 1}, "second": {"type": "b", "size": 2}},
}
)
d = C.from_params(params)
assert isinstance(d.items, dict)
assert len(d.items) == 2
assert all(isinstance(key, str) for key in d.items.keys())
assert all(isinstance(value, B) for value in d.items.values())
assert d.items["first"].size == 1
assert d.items["second"].size == 2
def test_dict_not_params(self):
class A(FromParams):
def __init__(self, counts: Dict[str, int]) -> None:
self.counts = counts
params = Params({"counts": {"a": 10, "b": 20}})
a = A.from_params(params)
assert isinstance(a.counts, dict)
assert not isinstance(a.counts, Params)
def test_list(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: List[A]) -> None:
self.items = items
params = Params(
{"type": "d", "items": [{"type": "b", "size": 1}, {"type": "b", "size": 2}]}
)
d = C.from_params(params)
assert isinstance(d.items, list)
assert len(d.items) == 2
assert all(isinstance(item, B) for item in d.items)
assert d.items[0].size == 1
assert d.items[1].size == 2
def test_tuple(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, name: str) -> None:
self.name = name
class E(Registrable):
pass
@E.register("f")
class F(E):
def __init__(self, items: Tuple[A, C]) -> None:
self.items = items
params = Params(
{"type": "f", "items": [{"type": "b", "size": 1}, {"type": "d", "name": "item2"}]}
)
f = E.from_params(params)
assert isinstance(f.items, tuple)
assert len(f.items) == 2
assert isinstance(f.items[0], B)
assert isinstance(f.items[1], D)
assert f.items[0].size == 1
assert f.items[1].name == "item2"
def test_set(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
def __init__(self, name: str) -> None:
self.name = name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
@A.register("b")
class B(A):
pass
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Set[A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": [
{"type": "b", "name": "item1"},
{"type": "b", "name": "item2"},
{"type": "b", "name": "item2"},
],
}
)
d = C.from_params(params)
assert isinstance(d.items, set)
assert len(d.items) == 2
assert all(isinstance(item, B) for item in d.items)
assert any(item.name == "item1" for item in d.items)
assert any(item.name == "item2" for item in d.items)
def test_transferring_of_modules(self):
model_archive = str(
self.FIXTURES_ROOT / "decomposable_attention" / "serialization" / "model.tar.gz"
)
trained_model = load_archive(model_archive).model
config_file = str(self.FIXTURES_ROOT / "decomposable_attention" / "experiment.json")
model_params = Params.from_file(config_file).pop("model").as_dict(quiet=True)
# Override only text_field_embedder (freeze) and attend_feedforward params (tunable)
model_params["text_field_embedder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_text_field_embedder",
"freeze": True,
}
}
model_params["attend_feedforward"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_attend_feedforward._module",
"freeze": False,
}
}
transfer_model = Model.from_params(vocab=trained_model.vocab, params=Params(model_params))
# TextFieldEmbedder and AttendFeedforward parameters should be transferred
for trained_parameter, transfer_parameter in zip(
trained_model._text_field_embedder.parameters(),
transfer_model._text_field_embedder.parameters(),
):
assert torch.all(trained_parameter == transfer_parameter)
for trained_parameter, transfer_parameter in zip(
trained_model._attend_feedforward.parameters(),
transfer_model._attend_feedforward.parameters(),
):
assert torch.all(trained_parameter == transfer_parameter)
# Any other module's parameters shouldn't be same (eg. compare_feedforward)
for trained_parameter, transfer_parameter in zip(
trained_model._compare_feedforward.parameters(),
transfer_model._compare_feedforward.parameters(),
):
assert torch.all(trained_parameter != transfer_parameter)
# TextFieldEmbedder should have requires_grad Off
for parameter in transfer_model._text_field_embedder.parameters():
assert not parameter.requires_grad
# # AttendFeedforward should have requires_grad On
for parameter in transfer_model._attend_feedforward.parameters():
assert parameter.requires_grad
def test_transferring_of_modules_ensures_type_consistency(self):
model_archive = str(
self.FIXTURES_ROOT / "decomposable_attention" / "serialization" / "model.tar.gz"
)
trained_model = load_archive(model_archive).model
config_file = str(self.FIXTURES_ROOT / "decomposable_attention" / "experiment.json")
model_params = Params.from_file(config_file).pop("model").as_dict(quiet=True)
# Override only text_field_embedder and make it load AttendFeedForward
model_params["text_field_embedder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_attend_feedforward._module",
}
}
with pytest.raises(ConfigurationError):
Model.from_params(vocab=trained_model.vocab, params=Params(model_params))
def test_kwargs_are_passed_to_superclass(self):
params = Params(
{"type": "text_classification_json", "lazy": True, "cache_directory": "tmp"}
)
reader = DatasetReader.from_params(params)
assert reader.lazy is True
assert str(reader._cache_directory) == "tmp"
| [
"torch.all"
] | 1.2.0 | nadgeri14/allennlp | 2eefffaf71612263a1c20e8ce4107849cfd5efe3 |
1.4 | from typing import Any, Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from tianshou.data import Batch, to_torch
from tianshou.utils.net.common import MLP
class Actor(nn.Module):
"""Simple actor network.
Will create an actor operated in discrete action space with structure of
preprocess_net ---> action_shape.
:param preprocess_net: a self-defined preprocess_net which output a
flattened hidden state.
:param action_shape: a sequence of int for the shape of action.
:param hidden_sizes: a sequence of int for constructing the MLP after
preprocess_net. Default to empty sequence (where the MLP now contains
only a single linear layer).
:param bool softmax_output: whether to apply a softmax layer over the last
layer's output.
:param int preprocess_net_output_dim: the output dimension of
preprocess_net.
For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
.. seealso::
Please refer to :class:`~tianshou.utils.net.common.Net` as an instance
of how preprocess_net is suggested to be defined.
"""
def __init__(
self,
preprocess_net: nn.Module,
action_shape: Sequence[int],
hidden_sizes: Sequence[int] = (),
softmax_output: bool = True,
preprocess_net_output_dim: Optional[int] = None,
device: Union[str, int, torch.device] = "cpu",
) -> None:
super().__init__()
self.device = device
self.preprocess = preprocess_net
self.output_dim = int(np.prod(action_shape))
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.last = MLP(
input_dim, # type: ignore
self.output_dim,
hidden_sizes,
device=self.device
)
self.softmax_output = softmax_output
def forward(
self,
s: Union[np.ndarray, torch.Tensor],
state: Any = None,
info: Dict[str, Any] = {},
) -> Tuple[torch.Tensor, Any]:
r"""Mapping: s -> Q(s, \*)."""
logits, h = self.preprocess(s, state)
logits = self.last(logits)
if self.softmax_output:
logits = F.softmax(logits, dim=-1)
return logits, h
class Critic(nn.Module):
"""Simple critic network. Will create an actor operated in discrete \
action space with structure of preprocess_net ---> 1(q value).
:param preprocess_net: a self-defined preprocess_net which output a
flattened hidden state.
:param hidden_sizes: a sequence of int for constructing the MLP after
preprocess_net. Default to empty sequence (where the MLP now contains
only a single linear layer).
:param int last_size: the output dimension of Critic network. Default to 1.
:param int preprocess_net_output_dim: the output dimension of
preprocess_net.
For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
.. seealso::
Please refer to :class:`~tianshou.utils.net.common.Net` as an instance
of how preprocess_net is suggested to be defined.
"""
def __init__(
self,
preprocess_net: nn.Module,
hidden_sizes: Sequence[int] = (),
last_size: int = 1,
preprocess_net_output_dim: Optional[int] = None,
device: Union[str, int, torch.device] = "cpu",
) -> None:
super().__init__()
self.device = device
self.preprocess = preprocess_net
self.output_dim = last_size
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.last = MLP(
input_dim, # type: ignore
last_size,
hidden_sizes,
device=self.device
)
def forward(
self, s: Union[np.ndarray, torch.Tensor], **kwargs: Any
) -> torch.Tensor:
"""Mapping: s -> V(s)."""
logits, _ = self.preprocess(s, state=kwargs.get("state", None))
return self.last(logits)
class CosineEmbeddingNetwork(nn.Module):
"""Cosine embedding network for IQN. Convert a scalar in [0, 1] to a list \
of n-dim vectors.
:param num_cosines: the number of cosines used for the embedding.
:param embedding_dim: the dimension of the embedding/output.
.. note::
From https://github.com/ku2482/fqf-iqn-qrdqn.pytorch/blob/master
/fqf_iqn_qrdqn/network.py .
"""
def __init__(self, num_cosines: int, embedding_dim: int) -> None:
super().__init__()
self.net = nn.Sequential(nn.Linear(num_cosines, embedding_dim), nn.ReLU())
self.num_cosines = num_cosines
self.embedding_dim = embedding_dim
def forward(self, taus: torch.Tensor) -> torch.Tensor:
batch_size = taus.shape[0]
N = taus.shape[1]
# Calculate i * \pi (i=1,...,N).
i_pi = np.pi * torch.arange(
start=1, end=self.num_cosines + 1, dtype=taus.dtype, device=taus.device
).view(1, 1, self.num_cosines)
# Calculate cos(i * \pi * \tau).
cosines = torch.cos(taus.view(batch_size, N, 1) * i_pi
).view(batch_size * N, self.num_cosines)
# Calculate embeddings of taus.
tau_embeddings = self.net(cosines).view(batch_size, N, self.embedding_dim)
return tau_embeddings
class ImplicitQuantileNetwork(Critic):
"""Implicit Quantile Network.
:param preprocess_net: a self-defined preprocess_net which output a
flattened hidden state.
:param int action_dim: the dimension of action space.
:param hidden_sizes: a sequence of int for constructing the MLP after
preprocess_net. Default to empty sequence (where the MLP now contains
only a single linear layer).
:param int num_cosines: the number of cosines to use for cosine embedding.
Default to 64.
:param int preprocess_net_output_dim: the output dimension of
preprocess_net.
.. note::
Although this class inherits Critic, it is actually a quantile Q-Network
with output shape (batch_size, action_dim, sample_size).
The second item of the first return value is tau vector.
"""
def __init__(
self,
preprocess_net: nn.Module,
action_shape: Sequence[int],
hidden_sizes: Sequence[int] = (),
num_cosines: int = 64,
preprocess_net_output_dim: Optional[int] = None,
device: Union[str, int, torch.device] = "cpu"
) -> None:
last_size = np.prod(action_shape)
super().__init__(
preprocess_net, hidden_sizes, last_size, preprocess_net_output_dim, device
)
self.input_dim = getattr(
preprocess_net, "output_dim", preprocess_net_output_dim
)
self.embed_model = CosineEmbeddingNetwork(
num_cosines,
self.input_dim # type: ignore
).to(device)
def forward( # type: ignore
self, s: Union[np.ndarray, torch.Tensor], sample_size: int, **kwargs: Any
) -> Tuple[Any, torch.Tensor]:
r"""Mapping: s -> Q(s, \*)."""
logits, h = self.preprocess(s, state=kwargs.get("state", None))
# Sample fractions.
batch_size = logits.size(0)
taus = torch.rand(
batch_size, sample_size, dtype=logits.dtype, device=logits.device
)
embedding = (logits.unsqueeze(1) *
self.embed_model(taus)).view(batch_size * sample_size, -1)
out = self.last(embedding).view(batch_size, sample_size, -1).transpose(1, 2)
return (out, taus), h
class FractionProposalNetwork(nn.Module):
"""Fraction proposal network for FQF.
:param num_fractions: the number of factions to propose.
:param embedding_dim: the dimension of the embedding/input.
.. note::
Adapted from https://github.com/ku2482/fqf-iqn-qrdqn.pytorch/blob/master
/fqf_iqn_qrdqn/network.py .
"""
def __init__(self, num_fractions: int, embedding_dim: int) -> None:
super().__init__()
self.net = nn.Linear(embedding_dim, num_fractions)
torch.nn.init.xavier_uniform_(self.net.weight, gain=0.01)
torch.nn.init.constant_(self.net.bias, 0)
self.num_fractions = num_fractions
self.embedding_dim = embedding_dim
def forward(
self, state_embeddings: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# Calculate (log of) probabilities q_i in the paper.
m = torch.distributions.Categorical(logits=self.net(state_embeddings))
taus_1_N = torch.cumsum(m.probs, dim=1)
# Calculate \tau_i (i=0,...,N).
taus = F.pad(taus_1_N, (1, 0))
# Calculate \hat \tau_i (i=0,...,N-1).
tau_hats = (taus[:, :-1] + taus[:, 1:]).detach() / 2.0
# Calculate entropies of value distributions.
entropies = m.entropy()
return taus, tau_hats, entropies
class FullQuantileFunction(ImplicitQuantileNetwork):
"""Full(y parameterized) Quantile Function.
:param preprocess_net: a self-defined preprocess_net which output a
flattened hidden state.
:param int action_dim: the dimension of action space.
:param hidden_sizes: a sequence of int for constructing the MLP after
preprocess_net. Default to empty sequence (where the MLP now contains
only a single linear layer).
:param int num_cosines: the number of cosines to use for cosine embedding.
Default to 64.
:param int preprocess_net_output_dim: the output dimension of
preprocess_net.
.. note::
The first return value is a tuple of (quantiles, fractions, quantiles_tau),
where fractions is a Batch(taus, tau_hats, entropies).
"""
def __init__(
self,
preprocess_net: nn.Module,
action_shape: Sequence[int],
hidden_sizes: Sequence[int] = (),
num_cosines: int = 64,
preprocess_net_output_dim: Optional[int] = None,
device: Union[str, int, torch.device] = "cpu",
) -> None:
super().__init__(
preprocess_net, action_shape, hidden_sizes, num_cosines,
preprocess_net_output_dim, device
)
def _compute_quantiles(
self, obs: torch.Tensor, taus: torch.Tensor
) -> torch.Tensor:
batch_size, sample_size = taus.shape
embedding = (obs.unsqueeze(1) *
self.embed_model(taus)).view(batch_size * sample_size, -1)
quantiles = self.last(embedding).view(batch_size, sample_size,
-1).transpose(1, 2)
return quantiles
def forward( # type: ignore
self, s: Union[np.ndarray, torch.Tensor],
propose_model: FractionProposalNetwork,
fractions: Optional[Batch] = None,
**kwargs: Any
) -> Tuple[Any, torch.Tensor]:
r"""Mapping: s -> Q(s, \*)."""
logits, h = self.preprocess(s, state=kwargs.get("state", None))
# Propose fractions
if fractions is None:
taus, tau_hats, entropies = propose_model(logits.detach())
fractions = Batch(taus=taus, tau_hats=tau_hats, entropies=entropies)
else:
taus, tau_hats = fractions.taus, fractions.tau_hats
quantiles = self._compute_quantiles(logits, tau_hats)
# Calculate quantiles_tau for computing fraction grad
quantiles_tau = None
if self.training:
with torch.no_grad():
quantiles_tau = self._compute_quantiles(logits, taus[:, 1:-1])
return (quantiles, fractions, quantiles_tau), h
class NoisyLinear(nn.Module):
"""Implementation of Noisy Networks. arXiv:1706.10295.
:param int in_features: the number of input features.
:param int out_features: the number of output features.
:param float noisy_std: initial standard deviation of noisy linear layers.
.. note::
Adapted from https://github.com/ku2482/fqf-iqn-qrdqn.pytorch/blob/master
/fqf_iqn_qrdqn/network.py .
"""
def __init__(
self, in_features: int, out_features: int, noisy_std: float = 0.5
) -> None:
super().__init__()
# Learnable parameters.
self.mu_W = nn.Parameter(torch.FloatTensor(out_features, in_features))
self.sigma_W = nn.Parameter(torch.FloatTensor(out_features, in_features))
self.mu_bias = nn.Parameter(torch.FloatTensor(out_features))
self.sigma_bias = nn.Parameter(torch.FloatTensor(out_features))
# Factorized noise parameters.
self.register_buffer('eps_p', torch.FloatTensor(in_features))
self.register_buffer('eps_q', torch.FloatTensor(out_features))
self.in_features = in_features
self.out_features = out_features
self.sigma = noisy_std
self.reset()
self.sample()
def reset(self) -> None:
bound = 1 / np.sqrt(self.in_features)
self.mu_W.data.uniform_(-bound, bound)
self.mu_bias.data.uniform_(-bound, bound)
self.sigma_W.data.fill_(self.sigma / np.sqrt(self.in_features))
self.sigma_bias.data.fill_(self.sigma / np.sqrt(self.in_features))
def f(self, x: torch.Tensor) -> torch.Tensor:
x = torch.randn(x.size(0), device=x.device)
return x.sign().mul_(x.abs().sqrt_())
def sample(self) -> None:
self.eps_p.copy_(self.f(self.eps_p)) # type: ignore
self.eps_q.copy_(self.f(self.eps_q)) # type: ignore
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.training:
weight = self.mu_W + self.sigma_W * (
self.eps_q.ger(self.eps_p) # type: ignore
)
bias = self.mu_bias + self.sigma_bias * self.eps_q.clone() # type: ignore
else:
weight = self.mu_W
bias = self.mu_bias
return F.linear(x, weight, bias)
def sample_noise(model: nn.Module) -> bool:
"""Sample the random noises of NoisyLinear modules in the model.
:param model: a PyTorch module which may have NoisyLinear submodules.
:returns: True if model has at least one NoisyLinear submodule;
otherwise, False.
"""
done = False
for m in model.modules():
if isinstance(m, NoisyLinear):
m.sample()
done = True
return done
class IntrinsicCuriosityModule(nn.Module):
"""Implementation of Intrinsic Curiosity Module. arXiv:1705.05363.
:param torch.nn.Module feature_net: a self-defined feature_net which output a
flattened hidden state.
:param int feature_dim: input dimension of the feature net.
:param int action_dim: dimension of the action space.
:param hidden_sizes: hidden layer sizes for forward and inverse models.
:param device: device for the module.
"""
def __init__(
self,
feature_net: nn.Module,
feature_dim: int,
action_dim: int,
hidden_sizes: Sequence[int] = (),
device: Union[str, torch.device] = "cpu"
) -> None:
super().__init__()
self.feature_net = feature_net
self.forward_model = MLP(
feature_dim + action_dim,
output_dim=feature_dim,
hidden_sizes=hidden_sizes,
device=device
)
self.inverse_model = MLP(
feature_dim * 2,
output_dim=action_dim,
hidden_sizes=hidden_sizes,
device=device
)
self.feature_dim = feature_dim
self.action_dim = action_dim
self.device = device
def forward(
self, s1: Union[np.ndarray, torch.Tensor],
act: Union[np.ndarray, torch.Tensor], s2: Union[np.ndarray,
torch.Tensor], **kwargs: Any
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Mapping: s1, act, s2 -> mse_loss, act_hat."""
s1 = to_torch(s1, dtype=torch.float32, device=self.device)
s2 = to_torch(s2, dtype=torch.float32, device=self.device)
phi1, phi2 = self.feature_net(s1), self.feature_net(s2)
act = to_torch(act, dtype=torch.long, device=self.device)
phi2_hat = self.forward_model(
torch.cat([phi1, F.one_hot(act, num_classes=self.action_dim)], dim=1)
)
mse_loss = 0.5 * F.mse_loss(phi2_hat, phi2, reduction="none").sum(1)
act_hat = self.inverse_model(torch.cat([phi1, phi2], dim=1))
return mse_loss, act_hat
| [
"torch.nn.Linear",
"torch.rand",
"torch.cat",
"torch.nn.functional.one_hot",
"torch.arange",
"torch.nn.init.constant_",
"torch.FloatTensor",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"torch.nn.functional.mse_loss",
"torch.nn.functional.linear",
"torch.nn.functional.softmax",
"torch.nn.functional.pad",
"torch.cumsum"
] | 1.4.0 | dumpmemory/tianshou | bc53ead273f6f9d3788a78ecc739249eeb96b8c6 |
1.2 | import pytest
import torch
from capreolus.reranker.POSITDRMM import POSITDRMM
from capreolus.reranker.KNRM import KNRM
def test_validate_params_for_knrm():
with pytest.raises(ValueError):
KNRM.validate_params({"foo": "bar"})
with pytest.raises(ValueError):
KNRM.validate_params({"pad_token": 0})
config = {"pad_token": 0, "gradkernels": True, "singlefc": False, "scoretanh": True}
KNRM.validate_params(config)
def test_positdrmm_get_exact_match_count():
query = torch.tensor([1, 2, 3])
doc = torch.tensor([1, 5, 3, 2, 1, 1, 9])
query_idf = [0.5, 0.5, 0.5]
exact_count, exact_count_idf = POSITDRMM.get_exact_match_count(query, doc, query_idf)
assert exact_count == 5 / len(doc)
assert exact_count_idf == (3 * 0.5 + 0.5 + 0.5) / len(doc)
def test_positdrmm_get_bigrams():
# Each number in the doc represents an index into the vocabulary
doc = torch.tensor([1, 2, 3, 4])
doc_bigrams = POSITDRMM.get_bigrams(doc)
expected_doc_bigrams = torch.tensor([[1, 2], [2, 3], [3, 4]])
assert torch.all(torch.eq(doc_bigrams, expected_doc_bigrams))
def test_positdrmm_get_bigram_match_count():
doc = torch.tensor([1, 2, 3, 4, 1, 2])
query = torch.tensor([1, 5, 9, 3, 4])
bigram_match_count = POSITDRMM.get_bigram_match_count(query, doc)
expected_count = 1 / 5 # The only matching bigram is [3, 4], and length of doc bigrams is 5
assert bigram_match_count == expected_count
def test_positdrmm_get_exact_match_stats():
# 3 docs, zero padded at the end
docs = torch.tensor([[1, 2, 3, 4, 0], [2, 3, 1, 5, 0], [3, 4, 5, 6, 0]])
# 1 query repeated 3 times (i.e, batch size = 3), zero padded at the end
queries = torch.tensor([[3, 1, 5, 7, 0], [3, 1, 5, 7, 0], [3, 1, 5, 7, 0]])
query_idf = torch.tensor([[0.5, 0.5, 0.5, 0.5, 0], [0.5, 0.5, 0.5, 0.5, 0], [0.5, 0.5, 0.5, 0.5, 0]])
exact_matches, exact_match_idf, bigram_matches = POSITDRMM.get_exact_match_stats(query_idf, queries, docs)
assert torch.all(torch.eq(exact_matches.reshape(3), torch.tensor([2 / 4, 3 / 4, 2 / 4])))
assert torch.all(torch.eq(exact_match_idf.reshape(3), torch.tensor([2 * 0.5 / 4, 3 * 0.5 / 4, 2 * 0.5 / 4])))
# The query bigrams are:
# [[3, 1], [1, 5], [5, 7], [7, 0]] - we don't clean the query
# The doc bigrams are:
# [[1, 2], [2, 3], [3, 4]]
# [[2, 3], [3, 1], [1, 5]]
# [[3, 4], [4, 5], [5, 6]]
assert torch.all(torch.eq(bigram_matches.reshape(3), torch.tensor([0, 2 / 3, 0])))
| [
"torch.tensor",
"torch.eq"
] | 1.2.0 | bpiwowar/capreolus-xpm | 5374eb48df96b54d51365fc32441ae50a3e634c2 |
1.0 |
import json
from overrides import overrides
import torch
import random
from claf.config.factory.data_loader import make_data_loader
from claf.data.dataset.base import DatasetBase
class MultiTaskBertDataset(DatasetBase):
"""
Dataset for Multi-Task GLUE using BERT
* Args:
batch: Batch DTO (claf.data.batch)
* Kwargs:
helper: helper from data_reader
"""
def __init__(self, batches, vocab, helper=None):
super(MultiTaskBertDataset, self).__init__()
self.name = "multitask_bert"
self.vocab = vocab
task_helpers = helper["task_helpers"]
self.multi_dataset_size = 0
self.batch_sizes = []
self.task_datasets = []
for b, h in zip(batches, task_helpers):
batch_size = h["batch_size"]
self.batch_sizes.append(batch_size)
dataset_cls = h["dataset"]
dataset = dataset_cls(b, vocab, helper=h)
self.task_datasets.append(dataset)
task_dataset_size, remain = divmod(len(dataset), batch_size)
if remain > 0:
task_dataset_size += 1
self.multi_dataset_size += task_dataset_size
self.init_iterators()
def init_iterators(self):
cuda_device_id = None
if torch.cuda.is_available():
cuda_device_id = 0 # TODO: Hard-code
self.iterators = []
for batch_size, dataset in zip(self.batch_sizes, self.task_datasets):
data_loader = make_data_loader(dataset, batch_size=batch_size, cuda_device_id=cuda_device_id) # TODO: cuda_device_id
self.iterators.append(iter(data_loader))
self.available_iterators = list(range(len(self.iterators)))
@overrides
def collate_fn(self, cuda_device_id=None):
def pass_tensor(data):
task_idx, tensor_datas = zip(*data)
tensor_batch = tensor_datas[0]
task_id_tensor = torch.LongTensor(list(task_idx))
if torch.cuda.is_available():
task_id_tensor.cuda(cuda_device_id)
tensor_batch.features["task_index"] = task_id_tensor
return tensor_batch
return pass_tensor
@overrides
def __getitem__(self, index):
# self.lazy_evaluation(index)
if len(self.available_iterators) == 0:
self.init_iterators()
random_index = random.choice(self.available_iterators)
task_iterator = self.iterators[random_index]
try:
return random_index, next(task_iterator)
except StopIteration:
self.available_iterators.remove(random_index)
return self.__getitem__(index)
def __len__(self):
return self.multi_dataset_size
def __repr__(self):
dataset_properties = {
"name": self.name,
"total_count": self.__len__(),
"dataset_count": len(self.iterators),
"task_dataset_sizes": [len(dataset) for dataset in self.task_datasets],
}
return json.dumps(dataset_properties, indent=4)
| [
"torch.cuda.is_available"
] | 1.0.1 | GMDennis/claf | d1e064e593127e5d654f000f5506c5ae1caab5ce |
3 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import random
import unittest
import numpy as np
import torch
from common_testing import TestCaseMixin
from pytorch3d.structures.meshes import Meshes
class TestMeshes(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
np.random.seed(42)
torch.manual_seed(42)
@staticmethod
def init_mesh(
num_meshes: int = 10,
max_v: int = 100,
max_f: int = 300,
lists_to_tensors: bool = False,
device: str = "cpu",
requires_grad: bool = False,
):
"""
Function to generate a Meshes object of N meshes with
random numbers of vertices and faces.
Args:
num_meshes: Number of meshes to generate.
max_v: Max number of vertices per mesh.
max_f: Max number of faces per mesh.
lists_to_tensors: Determines whether the generated meshes should be
constructed from lists (=False) or
a tensor (=True) of faces/verts.
Returns:
Meshes object.
"""
device = torch.device(device)
verts_list = []
faces_list = []
# Randomly generate numbers of faces and vertices in each mesh.
if lists_to_tensors:
# If we define faces/verts with tensors, f/v has to be the
# same for each mesh in the batch.
f = torch.randint(max_f, size=(1,), dtype=torch.int32)
v = torch.randint(3, high=max_v, size=(1,), dtype=torch.int32)
f = f.repeat(num_meshes)
v = v.repeat(num_meshes)
else:
# For lists of faces and vertices, we can sample different v/f
# per mesh.
f = torch.randint(max_f, size=(num_meshes,), dtype=torch.int32)
v = torch.randint(3, high=max_v, size=(num_meshes,), dtype=torch.int32)
# Generate the actual vertices and faces.
for i in range(num_meshes):
verts = torch.rand(
(v[i], 3),
dtype=torch.float32,
device=device,
requires_grad=requires_grad,
)
faces = torch.randint(
v[i], size=(f[i], 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
if lists_to_tensors:
verts_list = torch.stack(verts_list)
faces_list = torch.stack(faces_list)
return Meshes(verts=verts_list, faces=faces_list)
@staticmethod
def init_simple_mesh(device: str = "cpu"):
"""
Returns a Meshes data structure of simple mesh examples.
Returns:
Meshes object.
"""
device = torch.device(device)
verts = [
torch.tensor(
[[0.1, 0.3, 0.5], [0.5, 0.2, 0.1], [0.6, 0.8, 0.7]],
dtype=torch.float32,
device=device,
),
torch.tensor(
[[0.1, 0.3, 0.3], [0.6, 0.7, 0.8], [0.2, 0.3, 0.4], [0.1, 0.5, 0.3]],
dtype=torch.float32,
device=device,
),
torch.tensor(
[
[0.7, 0.3, 0.6],
[0.2, 0.4, 0.8],
[0.9, 0.5, 0.2],
[0.2, 0.3, 0.4],
[0.9, 0.3, 0.8],
],
dtype=torch.float32,
device=device,
),
]
faces = [
torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device),
torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64, device=device),
torch.tensor(
[
[1, 2, 0],
[0, 1, 3],
[2, 3, 1],
[4, 3, 2],
[4, 0, 1],
[4, 3, 1],
[4, 2, 1],
],
dtype=torch.int64,
device=device,
),
]
return Meshes(verts=verts, faces=faces)
def test_simple(self):
mesh = TestMeshes.init_simple_mesh("cuda:0")
# Check that faces/verts per mesh are set in init:
self.assertClose(mesh._num_faces_per_mesh.cpu(), torch.tensor([1, 2, 7]))
self.assertClose(mesh._num_verts_per_mesh.cpu(), torch.tensor([3, 4, 5]))
# Check computed tensors
self.assertClose(
mesh.verts_packed_to_mesh_idx().cpu(),
torch.tensor([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]),
)
self.assertClose(
mesh.mesh_to_verts_packed_first_idx().cpu(), torch.tensor([0, 3, 7])
)
self.assertClose(
mesh.verts_padded_to_packed_idx().cpu(),
torch.tensor([0, 1, 2, 5, 6, 7, 8, 10, 11, 12, 13, 14]),
)
self.assertClose(
mesh.faces_packed_to_mesh_idx().cpu(),
torch.tensor([0, 1, 1, 2, 2, 2, 2, 2, 2, 2]),
)
self.assertClose(
mesh.mesh_to_faces_packed_first_idx().cpu(), torch.tensor([0, 1, 3])
)
self.assertClose(
mesh.num_edges_per_mesh().cpu(), torch.tensor([3, 5, 10], dtype=torch.int32)
)
self.assertClose(
mesh.mesh_to_edges_packed_first_idx().cpu(),
torch.tensor([0, 3, 8], dtype=torch.int64),
)
def test_init_error(self):
# Check if correct errors are raised when verts/faces are on
# different devices
mesh = TestMeshes.init_mesh(10, 10, 100)
verts_list = mesh.verts_list() # all tensors on cpu
verts_list = [
v.to("cuda:0") if random.uniform(0, 1) > 0.5 else v for v in verts_list
]
faces_list = mesh.faces_list()
with self.assertRaises(ValueError) as cm:
Meshes(verts=verts_list, faces=faces_list)
self.assertTrue("same device" in cm.msg)
verts_padded = mesh.verts_padded() # on cpu
verts_padded = verts_padded.to("cuda:0")
faces_padded = mesh.faces_padded()
with self.assertRaises(ValueError) as cm:
Meshes(verts=verts_padded, faces=faces_padded)
self.assertTrue("same device" in cm.msg)
def test_simple_random_meshes(self):
# Define the test mesh object either as a list or tensor of faces/verts.
for lists_to_tensors in (False, True):
N = 10
mesh = TestMeshes.init_mesh(N, 100, 300, lists_to_tensors=lists_to_tensors)
verts_list = mesh.verts_list()
faces_list = mesh.faces_list()
# Check batch calculations.
verts_padded = mesh.verts_padded()
faces_padded = mesh.faces_padded()
verts_per_mesh = mesh.num_verts_per_mesh()
faces_per_mesh = mesh.num_faces_per_mesh()
for n in range(N):
v = verts_list[n].shape[0]
f = faces_list[n].shape[0]
self.assertClose(verts_padded[n, :v, :], verts_list[n])
if verts_padded.shape[1] > v:
self.assertTrue(verts_padded[n, v:, :].eq(0).all())
self.assertClose(faces_padded[n, :f, :], faces_list[n])
if faces_padded.shape[1] > f:
self.assertTrue(faces_padded[n, f:, :].eq(-1).all())
self.assertEqual(verts_per_mesh[n], v)
self.assertEqual(faces_per_mesh[n], f)
# Check compute packed.
verts_packed = mesh.verts_packed()
vert_to_mesh = mesh.verts_packed_to_mesh_idx()
mesh_to_vert = mesh.mesh_to_verts_packed_first_idx()
faces_packed = mesh.faces_packed()
face_to_mesh = mesh.faces_packed_to_mesh_idx()
mesh_to_face = mesh.mesh_to_faces_packed_first_idx()
curv, curf = 0, 0
for n in range(N):
v = verts_list[n].shape[0]
f = faces_list[n].shape[0]
self.assertClose(verts_packed[curv : curv + v, :], verts_list[n])
self.assertClose(faces_packed[curf : curf + f, :] - curv, faces_list[n])
self.assertTrue(vert_to_mesh[curv : curv + v].eq(n).all())
self.assertTrue(face_to_mesh[curf : curf + f].eq(n).all())
self.assertTrue(mesh_to_vert[n] == curv)
self.assertTrue(mesh_to_face[n] == curf)
curv += v
curf += f
# Check compute edges and compare with numpy unique.
edges = mesh.edges_packed().cpu().numpy()
edge_to_mesh_idx = mesh.edges_packed_to_mesh_idx().cpu().numpy()
num_edges_per_mesh = mesh.num_edges_per_mesh().cpu().numpy()
npfaces_packed = mesh.faces_packed().cpu().numpy()
e01 = npfaces_packed[:, [0, 1]]
e12 = npfaces_packed[:, [1, 2]]
e20 = npfaces_packed[:, [2, 0]]
npedges = np.concatenate((e12, e20, e01), axis=0)
npedges = np.sort(npedges, axis=1)
unique_edges, unique_idx = np.unique(npedges, return_index=True, axis=0)
self.assertTrue(np.allclose(edges, unique_edges))
temp = face_to_mesh.cpu().numpy()
temp = np.concatenate((temp, temp, temp), axis=0)
edge_to_mesh = temp[unique_idx]
self.assertTrue(np.allclose(edge_to_mesh_idx, edge_to_mesh))
num_edges = np.bincount(edge_to_mesh, minlength=N)
self.assertTrue(np.allclose(num_edges_per_mesh, num_edges))
mesh_to_edges_packed_first_idx = (
mesh.mesh_to_edges_packed_first_idx().cpu().numpy()
)
self.assertTrue(
np.allclose(mesh_to_edges_packed_first_idx[1:], num_edges.cumsum()[:-1])
)
self.assertTrue(mesh_to_edges_packed_first_idx[0] == 0)
def test_allempty(self):
verts_list = []
faces_list = []
mesh = Meshes(verts=verts_list, faces=faces_list)
self.assertEqual(len(mesh), 0)
self.assertEqual(mesh.verts_padded().shape[0], 0)
self.assertEqual(mesh.faces_padded().shape[0], 0)
self.assertEqual(mesh.verts_packed().shape[0], 0)
self.assertEqual(mesh.faces_packed().shape[0], 0)
self.assertEqual(mesh.num_faces_per_mesh().shape[0], 0)
self.assertEqual(mesh.num_verts_per_mesh().shape[0], 0)
def test_empty(self):
N, V, F = 10, 100, 300
device = torch.device("cuda:0")
verts_list = []
faces_list = []
valid = torch.randint(2, size=(N,), dtype=torch.uint8, device=device)
for n in range(N):
if valid[n]:
v = torch.randint(
3, high=V, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(F, size=(1,), dtype=torch.int32, device=device)[0]
verts = torch.rand((v, 3), dtype=torch.float32, device=device)
faces = torch.randint(v, size=(f, 3), dtype=torch.int64, device=device)
else:
verts = torch.tensor([], dtype=torch.float32, device=device)
faces = torch.tensor([], dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
verts_padded = mesh.verts_padded()
faces_padded = mesh.faces_padded()
verts_per_mesh = mesh.num_verts_per_mesh()
faces_per_mesh = mesh.num_faces_per_mesh()
for n in range(N):
v = len(verts_list[n])
f = len(faces_list[n])
if v > 0:
self.assertClose(verts_padded[n, :v, :], verts_list[n])
if verts_padded.shape[1] > v:
self.assertTrue(verts_padded[n, v:, :].eq(0).all())
if f > 0:
self.assertClose(faces_padded[n, :f, :], faces_list[n])
if faces_padded.shape[1] > f:
self.assertTrue(faces_padded[n, f:, :].eq(-1).all())
self.assertTrue(verts_per_mesh[n] == v)
self.assertTrue(faces_per_mesh[n] == f)
def test_padding(self):
N, V, F = 10, 100, 300
device = torch.device("cuda:0")
verts, faces = [], []
valid = torch.randint(2, size=(N,), dtype=torch.uint8, device=device)
num_verts, num_faces = (
torch.zeros(N, dtype=torch.int32),
torch.zeros(N, dtype=torch.int32),
)
for n in range(N):
verts.append(torch.rand((V, 3), dtype=torch.float32, device=device))
this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device)
if valid[n]:
v = torch.randint(
3, high=V, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(F, size=(1,), dtype=torch.int32, device=device)[0]
this_faces[:f, :] = torch.randint(
v, size=(f, 3), dtype=torch.int64, device=device
)
num_verts[n] = v
num_faces[n] = f
faces.append(this_faces)
mesh = Meshes(verts=torch.stack(verts), faces=torch.stack(faces))
# Check verts/faces per mesh are set correctly in init.
self.assertListEqual(mesh._num_faces_per_mesh.tolist(), num_faces.tolist())
self.assertListEqual(mesh._num_verts_per_mesh.tolist(), [V] * N)
for n, (vv, ff) in enumerate(zip(mesh.verts_list(), mesh.faces_list())):
self.assertClose(ff, faces[n][: num_faces[n]])
self.assertClose(vv, verts[n])
new_faces = [ff.clone() for ff in faces]
v = torch.randint(3, high=V, size=(1,), dtype=torch.int32, device=device)[0]
f = torch.randint(F - 10, size=(1,), dtype=torch.int32, device=device)[0]
this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device)
this_faces[10 : f + 10, :] = torch.randint(
v, size=(f, 3), dtype=torch.int64, device=device
)
new_faces[3] = this_faces
with self.assertRaisesRegex(ValueError, "Padding of faces"):
Meshes(verts=torch.stack(verts), faces=torch.stack(new_faces))
def test_clone(self):
N = 5
mesh = TestMeshes.init_mesh(N, 10, 100)
for force in [0, 1]:
if force:
# force mesh to have computed attributes
mesh.verts_packed()
mesh.edges_packed()
mesh.verts_padded()
new_mesh = mesh.clone()
# Modify tensors in both meshes.
new_mesh._verts_list[0] = new_mesh._verts_list[0] * 5
# Check cloned and original Meshes objects do not share tensors.
self.assertFalse(
torch.allclose(new_mesh._verts_list[0], mesh._verts_list[0])
)
self.assertSeparate(new_mesh.verts_packed(), mesh.verts_packed())
self.assertSeparate(new_mesh.verts_padded(), mesh.verts_padded())
self.assertSeparate(new_mesh.faces_packed(), mesh.faces_packed())
self.assertSeparate(new_mesh.faces_padded(), mesh.faces_padded())
self.assertSeparate(new_mesh.edges_packed(), mesh.edges_packed())
def test_detach(self):
N = 5
mesh = TestMeshes.init_mesh(N, 10, 100, requires_grad=True)
for force in [0, 1]:
if force:
# force mesh to have computed attributes
mesh.verts_packed()
mesh.edges_packed()
mesh.verts_padded()
new_mesh = mesh.detach()
self.assertFalse(new_mesh.verts_packed().requires_grad)
self.assertClose(new_mesh.verts_packed(), mesh.verts_packed())
self.assertFalse(new_mesh.verts_padded().requires_grad)
self.assertClose(new_mesh.verts_padded(), mesh.verts_padded())
for v, newv in zip(mesh.verts_list(), new_mesh.verts_list()):
self.assertFalse(newv.requires_grad)
self.assertClose(newv, v)
def test_laplacian_packed(self):
def naive_laplacian_packed(meshes):
verts_packed = meshes.verts_packed()
edges_packed = meshes.edges_packed()
V = verts_packed.shape[0]
L = torch.zeros((V, V), dtype=torch.float32, device=meshes.device)
for e in edges_packed:
L[e[0], e[1]] = 1
# symetric
L[e[1], e[0]] = 1
deg = L.sum(1).view(-1, 1)
deg[deg > 0] = 1.0 / deg[deg > 0]
L = L * deg
diag = torch.eye(V, dtype=torch.float32, device=meshes.device)
L.masked_fill_(diag > 0, -1)
return L
# Note that we don't test with random meshes for this case, as the
# definition of Laplacian is defined for simple graphs (aka valid meshes)
meshes = TestMeshes.init_simple_mesh("cuda:0")
lapl_naive = naive_laplacian_packed(meshes)
lapl = meshes.laplacian_packed().to_dense()
# check with naive
self.assertClose(lapl, lapl_naive)
def test_offset_verts(self):
def naive_offset_verts(mesh, vert_offsets_packed):
# new Meshes class
new_verts_packed = mesh.verts_packed() + vert_offsets_packed
new_verts_list = list(
new_verts_packed.split(mesh.num_verts_per_mesh().tolist(), 0)
)
new_faces_list = [f.clone() for f in mesh.faces_list()]
return Meshes(verts=new_verts_list, faces=new_faces_list)
N = 5
mesh = TestMeshes.init_mesh(N, 10, 100)
all_v = mesh.verts_packed().size(0)
verts_per_mesh = mesh.num_verts_per_mesh()
for force in [0, 1]:
if force:
# force mesh to have computed attributes
mesh._compute_packed(refresh=True)
mesh._compute_padded()
mesh._compute_edges_packed()
mesh.verts_padded_to_packed_idx()
mesh._compute_face_areas_normals(refresh=True)
mesh._compute_vertex_normals(refresh=True)
deform = torch.rand((all_v, 3), dtype=torch.float32, device=mesh.device)
# new meshes class to hold the deformed mesh
new_mesh_naive = naive_offset_verts(mesh, deform)
new_mesh = mesh.offset_verts(deform)
# check verts_list & faces_list
verts_cumsum = torch.cumsum(verts_per_mesh, 0).tolist()
verts_cumsum.insert(0, 0)
for i in range(N):
self.assertClose(
new_mesh.verts_list()[i],
mesh.verts_list()[i]
+ deform[verts_cumsum[i] : verts_cumsum[i + 1]],
)
self.assertClose(
new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i]
)
self.assertClose(mesh.faces_list()[i], new_mesh_naive.faces_list()[i])
self.assertClose(
new_mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
# check faces and vertex normals
self.assertClose(
new_mesh.verts_normals_list()[i],
new_mesh_naive.verts_normals_list()[i],
)
self.assertClose(
new_mesh.faces_normals_list()[i],
new_mesh_naive.faces_normals_list()[i],
)
# check padded & packed
self.assertClose(new_mesh.faces_padded(), new_mesh_naive.faces_padded())
self.assertClose(new_mesh.verts_padded(), new_mesh_naive.verts_padded())
self.assertClose(new_mesh.faces_packed(), new_mesh_naive.faces_packed())
self.assertClose(new_mesh.verts_packed(), new_mesh_naive.verts_packed())
self.assertClose(new_mesh.edges_packed(), new_mesh_naive.edges_packed())
self.assertClose(
new_mesh.verts_packed_to_mesh_idx(),
new_mesh_naive.verts_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_verts_packed_first_idx(),
new_mesh_naive.mesh_to_verts_packed_first_idx(),
)
self.assertClose(
new_mesh.num_verts_per_mesh(), new_mesh_naive.num_verts_per_mesh()
)
self.assertClose(
new_mesh.faces_packed_to_mesh_idx(),
new_mesh_naive.faces_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_faces_packed_first_idx(),
new_mesh_naive.mesh_to_faces_packed_first_idx(),
)
self.assertClose(
new_mesh.num_faces_per_mesh(), new_mesh_naive.num_faces_per_mesh()
)
self.assertClose(
new_mesh.edges_packed_to_mesh_idx(),
new_mesh_naive.edges_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.verts_padded_to_packed_idx(),
new_mesh_naive.verts_padded_to_packed_idx(),
)
self.assertTrue(all(new_mesh.valid == new_mesh_naive.valid))
self.assertTrue(new_mesh.equisized == new_mesh_naive.equisized)
# check face areas, normals and vertex normals
self.assertClose(
new_mesh.verts_normals_packed(), new_mesh_naive.verts_normals_packed()
)
self.assertClose(
new_mesh.verts_normals_padded(), new_mesh_naive.verts_normals_padded()
)
self.assertClose(
new_mesh.faces_normals_packed(), new_mesh_naive.faces_normals_packed()
)
self.assertClose(
new_mesh.faces_normals_padded(), new_mesh_naive.faces_normals_padded()
)
self.assertClose(
new_mesh.faces_areas_packed(), new_mesh_naive.faces_areas_packed()
)
self.assertClose(
new_mesh.mesh_to_edges_packed_first_idx(),
new_mesh_naive.mesh_to_edges_packed_first_idx(),
)
def test_scale_verts(self):
def naive_scale_verts(mesh, scale):
if not torch.is_tensor(scale):
scale = torch.ones(len(mesh)).mul_(scale)
# new Meshes class
new_verts_list = [
scale[i] * v.clone() for (i, v) in enumerate(mesh.verts_list())
]
new_faces_list = [f.clone() for f in mesh.faces_list()]
return Meshes(verts=new_verts_list, faces=new_faces_list)
N = 5
for test in ["tensor", "scalar"]:
for force in (False, True):
mesh = TestMeshes.init_mesh(N, 10, 100)
if force:
# force mesh to have computed attributes
mesh.verts_packed()
mesh.edges_packed()
mesh.verts_padded()
mesh._compute_face_areas_normals(refresh=True)
mesh._compute_vertex_normals(refresh=True)
if test == "tensor":
scales = torch.rand(N)
elif test == "scalar":
scales = torch.rand(1)[0].item()
new_mesh_naive = naive_scale_verts(mesh, scales)
new_mesh = mesh.scale_verts(scales)
for i in range(N):
if test == "tensor":
self.assertClose(
scales[i] * mesh.verts_list()[i], new_mesh.verts_list()[i]
)
else:
self.assertClose(
scales * mesh.verts_list()[i], new_mesh.verts_list()[i]
)
self.assertClose(
new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i]
)
self.assertClose(
mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
self.assertClose(
new_mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
# check face and vertex normals
self.assertClose(
new_mesh.verts_normals_list()[i],
new_mesh_naive.verts_normals_list()[i],
)
self.assertClose(
new_mesh.faces_normals_list()[i],
new_mesh_naive.faces_normals_list()[i],
)
# check padded & packed
self.assertClose(new_mesh.faces_padded(), new_mesh_naive.faces_padded())
self.assertClose(new_mesh.verts_padded(), new_mesh_naive.verts_padded())
self.assertClose(new_mesh.faces_packed(), new_mesh_naive.faces_packed())
self.assertClose(new_mesh.verts_packed(), new_mesh_naive.verts_packed())
self.assertClose(new_mesh.edges_packed(), new_mesh_naive.edges_packed())
self.assertClose(
new_mesh.verts_packed_to_mesh_idx(),
new_mesh_naive.verts_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_verts_packed_first_idx(),
new_mesh_naive.mesh_to_verts_packed_first_idx(),
)
self.assertClose(
new_mesh.num_verts_per_mesh(), new_mesh_naive.num_verts_per_mesh()
)
self.assertClose(
new_mesh.faces_packed_to_mesh_idx(),
new_mesh_naive.faces_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_faces_packed_first_idx(),
new_mesh_naive.mesh_to_faces_packed_first_idx(),
)
self.assertClose(
new_mesh.num_faces_per_mesh(), new_mesh_naive.num_faces_per_mesh()
)
self.assertClose(
new_mesh.edges_packed_to_mesh_idx(),
new_mesh_naive.edges_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.verts_padded_to_packed_idx(),
new_mesh_naive.verts_padded_to_packed_idx(),
)
self.assertTrue(all(new_mesh.valid == new_mesh_naive.valid))
self.assertTrue(new_mesh.equisized == new_mesh_naive.equisized)
# check face areas, normals and vertex normals
self.assertClose(
new_mesh.verts_normals_packed(),
new_mesh_naive.verts_normals_packed(),
)
self.assertClose(
new_mesh.verts_normals_padded(),
new_mesh_naive.verts_normals_padded(),
)
self.assertClose(
new_mesh.faces_normals_packed(),
new_mesh_naive.faces_normals_packed(),
)
self.assertClose(
new_mesh.faces_normals_padded(),
new_mesh_naive.faces_normals_padded(),
)
self.assertClose(
new_mesh.faces_areas_packed(), new_mesh_naive.faces_areas_packed()
)
self.assertClose(
new_mesh.mesh_to_edges_packed_first_idx(),
new_mesh_naive.mesh_to_edges_packed_first_idx(),
)
def test_extend_list(self):
N = 10
mesh = TestMeshes.init_mesh(5, 10, 100)
for force in [0, 1]:
if force:
# force some computes to happen
mesh._compute_packed(refresh=True)
mesh._compute_padded()
mesh._compute_edges_packed()
mesh.verts_padded_to_packed_idx()
new_mesh = mesh.extend(N)
self.assertEqual(len(mesh) * 10, len(new_mesh))
for i in range(len(mesh)):
for n in range(N):
self.assertClose(
mesh.verts_list()[i], new_mesh.verts_list()[i * N + n]
)
self.assertClose(
mesh.faces_list()[i], new_mesh.faces_list()[i * N + n]
)
self.assertTrue(mesh.valid[i] == new_mesh.valid[i * N + n])
self.assertAllSeparate(
mesh.verts_list()
+ new_mesh.verts_list()
+ mesh.faces_list()
+ new_mesh.faces_list()
)
self.assertTrue(new_mesh._verts_packed is None)
self.assertTrue(new_mesh._faces_packed is None)
self.assertTrue(new_mesh._verts_padded is None)
self.assertTrue(new_mesh._faces_padded is None)
self.assertTrue(new_mesh._edges_packed is None)
with self.assertRaises(ValueError):
mesh.extend(N=-1)
def test_to(self):
mesh = TestMeshes.init_mesh(5, 10, 100, device=torch.device("cuda:0"))
device = torch.device("cuda:1")
new_mesh = mesh.to(device)
self.assertTrue(new_mesh.device == device)
self.assertTrue(mesh.device == torch.device("cuda:0"))
def test_split_mesh(self):
mesh = TestMeshes.init_mesh(5, 10, 100)
split_sizes = [2, 3]
split_meshes = mesh.split(split_sizes)
self.assertTrue(len(split_meshes[0]) == 2)
self.assertTrue(
split_meshes[0].verts_list()
== [mesh.get_mesh_verts_faces(0)[0], mesh.get_mesh_verts_faces(1)[0]]
)
self.assertTrue(len(split_meshes[1]) == 3)
self.assertTrue(
split_meshes[1].verts_list()
== [
mesh.get_mesh_verts_faces(2)[0],
mesh.get_mesh_verts_faces(3)[0],
mesh.get_mesh_verts_faces(4)[0],
]
)
split_sizes = [2, 0.3]
with self.assertRaises(ValueError):
mesh.split(split_sizes)
def test_update_padded(self):
# Define the test mesh object either as a list or tensor of faces/verts.
N = 10
for lists_to_tensors in (False, True):
for force in (True, False):
mesh = TestMeshes.init_mesh(
N, 100, 300, lists_to_tensors=lists_to_tensors
)
num_verts_per_mesh = mesh.num_verts_per_mesh()
if force:
# force mesh to have computed attributes
mesh.verts_packed()
mesh.edges_packed()
mesh.laplacian_packed()
mesh.faces_areas_packed()
new_verts = torch.rand((mesh._N, mesh._V, 3), device=mesh.device)
new_verts_list = [
new_verts[i, : num_verts_per_mesh[i]] for i in range(N)
]
new_mesh = mesh.update_padded(new_verts)
# check the attributes assigned at construction time
self.assertEqual(new_mesh._N, mesh._N)
self.assertEqual(new_mesh._F, mesh._F)
self.assertEqual(new_mesh._V, mesh._V)
self.assertEqual(new_mesh.equisized, mesh.equisized)
self.assertTrue(all(new_mesh.valid == mesh.valid))
self.assertNotSeparate(
new_mesh.num_verts_per_mesh(), mesh.num_verts_per_mesh()
)
self.assertClose(
new_mesh.num_verts_per_mesh(), mesh.num_verts_per_mesh()
)
self.assertNotSeparate(
new_mesh.num_faces_per_mesh(), mesh.num_faces_per_mesh()
)
self.assertClose(
new_mesh.num_faces_per_mesh(), mesh.num_faces_per_mesh()
)
# check that the following attributes are not assigned
self.assertIsNone(new_mesh._verts_list)
self.assertIsNone(new_mesh._faces_areas_packed)
self.assertIsNone(new_mesh._faces_normals_packed)
self.assertIsNone(new_mesh._verts_normals_packed)
check_tensors = [
"_faces_packed",
"_verts_packed_to_mesh_idx",
"_faces_packed_to_mesh_idx",
"_mesh_to_verts_packed_first_idx",
"_mesh_to_faces_packed_first_idx",
"_edges_packed",
"_edges_packed_to_mesh_idx",
"_mesh_to_edges_packed_first_idx",
"_faces_packed_to_edges_packed",
"_num_edges_per_mesh",
]
for k in check_tensors:
v = getattr(new_mesh, k)
if not force:
self.assertIsNone(v)
else:
v_old = getattr(mesh, k)
self.assertNotSeparate(v, v_old)
self.assertClose(v, v_old)
# check verts/faces padded
self.assertClose(new_mesh.verts_padded(), new_verts)
self.assertNotSeparate(new_mesh.verts_padded(), new_verts)
self.assertClose(new_mesh.faces_padded(), mesh.faces_padded())
self.assertNotSeparate(new_mesh.faces_padded(), mesh.faces_padded())
# check verts/faces list
for i in range(N):
self.assertNotSeparate(
new_mesh.faces_list()[i], mesh.faces_list()[i]
)
self.assertClose(new_mesh.faces_list()[i], mesh.faces_list()[i])
self.assertSeparate(new_mesh.verts_list()[i], mesh.verts_list()[i])
self.assertClose(new_mesh.verts_list()[i], new_verts_list[i])
# check verts/faces packed
self.assertClose(new_mesh.verts_packed(), torch.cat(new_verts_list))
self.assertSeparate(new_mesh.verts_packed(), mesh.verts_packed())
self.assertClose(new_mesh.faces_packed(), mesh.faces_packed())
# check pad_to_packed
self.assertClose(
new_mesh.verts_padded_to_packed_idx(),
mesh.verts_padded_to_packed_idx(),
)
# check edges
self.assertClose(new_mesh.edges_packed(), mesh.edges_packed())
def test_get_mesh_verts_faces(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
verts_faces = [(10, 100), (20, 200)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
for i, (V, F) in enumerate(verts_faces):
verts, faces = mesh.get_mesh_verts_faces(i)
self.assertTrue(len(verts) == V)
self.assertClose(verts, verts_list[i])
self.assertTrue(len(faces) == F)
self.assertClose(faces, faces_list[i])
with self.assertRaises(ValueError):
mesh.get_mesh_verts_faces(5)
with self.assertRaises(ValueError):
mesh.get_mesh_verts_faces(0.2)
def test_get_bounding_boxes(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
for (V, F) in [(10, 100)]:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mins = torch.min(verts, dim=0)[0]
maxs = torch.max(verts, dim=0)[0]
bboxes_gt = torch.stack([mins, maxs], dim=1).unsqueeze(0)
mesh = Meshes(verts=verts_list, faces=faces_list)
bboxes = mesh.get_bounding_boxes()
self.assertClose(bboxes_gt, bboxes)
def test_padded_to_packed_idx(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
verts_faces = [(10, 100), (20, 200), (30, 300)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
verts_padded_to_packed_idx = mesh.verts_padded_to_packed_idx()
verts_packed = mesh.verts_packed()
verts_padded = mesh.verts_padded()
verts_padded_flat = verts_padded.view(-1, 3)
self.assertClose(verts_padded_flat[verts_padded_to_packed_idx], verts_packed)
idx = verts_padded_to_packed_idx.view(-1, 1).expand(-1, 3)
self.assertClose(verts_padded_flat.gather(0, idx), verts_packed)
def test_getitem(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
verts_faces = [(10, 100), (20, 200), (30, 300)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
def check_equal(selected, indices):
for selectedIdx, index in enumerate(indices):
self.assertClose(
selected.verts_list()[selectedIdx], mesh.verts_list()[index]
)
self.assertClose(
selected.faces_list()[selectedIdx], mesh.faces_list()[index]
)
# int index
index = 1
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == 1)
check_equal(mesh_selected, [index])
# list index
index = [1, 2]
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == len(index))
check_equal(mesh_selected, index)
# slice index
index = slice(0, 2, 1)
mesh_selected = mesh[index]
check_equal(mesh_selected, [0, 1])
# bool tensor
index = torch.tensor([1, 0, 1], dtype=torch.bool, device=device)
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == index.sum())
check_equal(mesh_selected, [0, 2])
# int tensor
index = torch.tensor([1, 2], dtype=torch.int64, device=device)
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == index.numel())
check_equal(mesh_selected, index.tolist())
# invalid index
index = torch.tensor([1, 0, 1], dtype=torch.float32, device=device)
with self.assertRaises(IndexError):
mesh_selected = mesh[index]
index = 1.2
with self.assertRaises(IndexError):
mesh_selected = mesh[index]
def test_compute_faces_areas(self):
verts = torch.tensor(
[
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.25, 0.8, 0.0],
],
dtype=torch.float32,
)
faces = torch.tensor([[0, 1, 2], [0, 3, 4]], dtype=torch.int64)
mesh = Meshes(verts=[verts], faces=[faces])
face_areas = mesh.faces_areas_packed()
expected_areas = torch.tensor([0.125, 0.2])
self.assertClose(face_areas, expected_areas)
def test_compute_normals(self):
# Simple case with one mesh where normals point in either +/- ijk
verts = torch.tensor(
[
[0.1, 0.3, 0.0],
[0.5, 0.2, 0.0],
[0.6, 0.8, 0.0],
[0.0, 0.3, 0.2],
[0.0, 0.2, 0.5],
[0.0, 0.8, 0.7],
[0.5, 0.0, 0.2],
[0.6, 0.0, 0.5],
[0.8, 0.0, 0.7],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
dtype=torch.float32,
)
faces = torch.tensor(
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], dtype=torch.int64
)
mesh = Meshes(verts=[verts], faces=[faces])
verts_normals_expected = torch.tensor(
[
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
faces_normals_expected = verts_normals_expected[[0, 3, 6, 9], :]
self.assertTrue(
torch.allclose(mesh.verts_normals_list()[0], verts_normals_expected)
)
self.assertTrue(
torch.allclose(mesh.faces_normals_list()[0], faces_normals_expected)
)
self.assertTrue(
torch.allclose(mesh.verts_normals_packed(), verts_normals_expected)
)
self.assertTrue(
torch.allclose(mesh.faces_normals_packed(), faces_normals_expected)
)
# Multiple meshes in the batch with equal sized meshes
meshes_extended = mesh.extend(3)
for m in meshes_extended.verts_normals_list():
self.assertClose(m, verts_normals_expected)
for f in meshes_extended.faces_normals_list():
self.assertClose(f, faces_normals_expected)
# Multiple meshes in the batch with different sized meshes
# Check padded and packed normals are the correct sizes.
verts2 = torch.tensor(
[
[0.1, 0.3, 0.0],
[0.5, 0.2, 0.0],
[0.6, 0.8, 0.0],
[0.0, 0.3, 0.2],
[0.0, 0.2, 0.5],
[0.0, 0.8, 0.7],
],
dtype=torch.float32,
)
faces2 = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.int64)
verts_list = [verts, verts2]
faces_list = [faces, faces2]
meshes = Meshes(verts=verts_list, faces=faces_list)
verts_normals_padded = meshes.verts_normals_padded()
faces_normals_padded = meshes.faces_normals_padded()
for n in range(len(meshes)):
v = verts_list[n].shape[0]
f = faces_list[n].shape[0]
if verts_normals_padded.shape[1] > v:
self.assertTrue(verts_normals_padded[n, v:, :].eq(0).all())
self.assertTrue(
torch.allclose(
verts_normals_padded[n, :v, :].view(-1, 3),
verts_normals_expected[:v, :],
)
)
if faces_normals_padded.shape[1] > f:
self.assertTrue(faces_normals_padded[n, f:, :].eq(0).all())
self.assertTrue(
torch.allclose(
faces_normals_padded[n, :f, :].view(-1, 3),
faces_normals_expected[:f, :],
)
)
verts_normals_packed = meshes.verts_normals_packed()
faces_normals_packed = meshes.faces_normals_packed()
self.assertTrue(
list(verts_normals_packed.shape) == [verts.shape[0] + verts2.shape[0], 3]
)
self.assertTrue(
list(faces_normals_packed.shape) == [faces.shape[0] + faces2.shape[0], 3]
)
# Single mesh where two faces share one vertex so the normal is
# the weighted sum of the two face normals.
verts = torch.tensor(
[
[0.1, 0.3, 0.0],
[0.5, 0.2, 0.0],
[0.0, 0.3, 0.2], # vertex is shared between two faces
[0.0, 0.2, 0.5],
[0.0, 0.8, 0.7],
],
dtype=torch.float32,
)
faces = torch.tensor([[0, 1, 2], [2, 3, 4]], dtype=torch.int64)
mesh = Meshes(verts=[verts], faces=[faces])
verts_normals_expected = torch.tensor(
[
[-0.2408, -0.9631, -0.1204],
[-0.2408, -0.9631, -0.1204],
[-0.9389, -0.3414, -0.0427],
[-1.0000, 0.0000, 0.0000],
[-1.0000, 0.0000, 0.0000],
]
)
faces_normals_expected = torch.tensor(
[[-0.2408, -0.9631, -0.1204], [-1.0000, 0.0000, 0.0000]]
)
self.assertTrue(
torch.allclose(
mesh.verts_normals_list()[0], verts_normals_expected, atol=4e-5
)
)
self.assertTrue(
torch.allclose(
mesh.faces_normals_list()[0], faces_normals_expected, atol=4e-5
)
)
# Check empty mesh has empty normals
meshes = Meshes(verts=[], faces=[])
self.assertEqual(meshes.verts_normals_packed().shape[0], 0)
self.assertEqual(meshes.verts_normals_padded().shape[0], 0)
self.assertEqual(meshes.verts_normals_list(), [])
self.assertEqual(meshes.faces_normals_packed().shape[0], 0)
self.assertEqual(meshes.faces_normals_padded().shape[0], 0)
self.assertEqual(meshes.faces_normals_list(), [])
def test_compute_faces_areas_cpu_cuda(self):
num_meshes = 10
max_v = 100
max_f = 300
mesh_cpu = TestMeshes.init_mesh(num_meshes, max_v, max_f, device="cpu")
device = torch.device("cuda:0")
mesh_cuda = mesh_cpu.to(device)
face_areas_cpu = mesh_cpu.faces_areas_packed()
face_normals_cpu = mesh_cpu.faces_normals_packed()
face_areas_cuda = mesh_cuda.faces_areas_packed()
face_normals_cuda = mesh_cuda.faces_normals_packed()
self.assertClose(face_areas_cpu, face_areas_cuda.cpu(), atol=1e-6)
# because of the normalization of the normals with arbitrarily small values,
# normals can become unstable. Thus only compare normals, for faces
# with areas > eps=1e-6
nonzero = face_areas_cpu > 1e-6
self.assertClose(
face_normals_cpu[nonzero], face_normals_cuda.cpu()[nonzero], atol=1e-6
)
@staticmethod
def compute_packed_with_init(
num_meshes: int = 10, max_v: int = 100, max_f: int = 300, device: str = "cpu"
):
mesh = TestMeshes.init_mesh(num_meshes, max_v, max_f, device=device)
torch.cuda.synchronize()
def compute_packed():
mesh._compute_packed(refresh=True)
torch.cuda.synchronize()
return compute_packed
@staticmethod
def compute_padded_with_init(
num_meshes: int = 10, max_v: int = 100, max_f: int = 300, device: str = "cpu"
):
mesh = TestMeshes.init_mesh(num_meshes, max_v, max_f, device=device)
torch.cuda.synchronize()
def compute_padded():
mesh._compute_padded(refresh=True)
torch.cuda.synchronize()
return compute_padded
| [
"torch.cat",
"torch.stack",
"torch.eye",
"torch.allclose",
"torch.is_tensor",
"torch.manual_seed",
"torch.randint",
"torch.tensor",
"torch.zeros",
"torch.device",
"torch.min",
"torch.max",
"torch.full",
"torch.cumsum",
"torch.rand",
"torch.cuda.synchronize"
] | 3 | rahulvenkk/pytorch3d | 68bfac3394f9a87fb268165d1c9dd264e1d9316b |
3 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import math
import warnings
from typing import List, Optional, Union
import torch
from .rotation_conversions import _axis_angle_rotation
class Transform3d:
"""
A Transform3d object encapsulates a batch of N 3D transformations, and knows
how to transform points and normal vectors. Suppose that t is a Transform3d;
then we can do the following:
.. code-block:: python
N = len(t)
points = torch.randn(N, P, 3)
normals = torch.randn(N, P, 3)
points_transformed = t.transform_points(points) # => (N, P, 3)
normals_transformed = t.transform_normals(normals) # => (N, P, 3)
BROADCASTING
Transform3d objects supports broadcasting. Suppose that t1 and tN are
Transform3D objects with len(t1) == 1 and len(tN) == N respectively. Then we
can broadcast transforms like this:
.. code-block:: python
t1.transform_points(torch.randn(P, 3)) # => (P, 3)
t1.transform_points(torch.randn(1, P, 3)) # => (1, P, 3)
t1.transform_points(torch.randn(M, P, 3)) # => (M, P, 3)
tN.transform_points(torch.randn(P, 3)) # => (N, P, 3)
tN.transform_points(torch.randn(1, P, 3)) # => (N, P, 3)
COMBINING TRANSFORMS
Transform3d objects can be combined in two ways: composing and stacking.
Composing is function composition. Given Transform3d objects t1, t2, t3,
the following all compute the same thing:
.. code-block:: python
y1 = t3.transform_points(t2.transform_points(t1.transform_points(x)))
y2 = t1.compose(t2).compose(t3).transform_points(x)
y3 = t1.compose(t2, t3).transform_points(x)
Composing transforms should broadcast.
.. code-block:: python
if len(t1) == 1 and len(t2) == N, then len(t1.compose(t2)) == N.
We can also stack a sequence of Transform3d objects, which represents
composition along the batch dimension; then the following should compute the
same thing.
.. code-block:: python
N, M = len(tN), len(tM)
xN = torch.randn(N, P, 3)
xM = torch.randn(M, P, 3)
y1 = torch.cat([tN.transform_points(xN), tM.transform_points(xM)], dim=0)
y2 = tN.stack(tM).transform_points(torch.cat([xN, xM], dim=0))
BUILDING TRANSFORMS
We provide convenience methods for easily building Transform3d objects
as compositions of basic transforms.
.. code-block:: python
# Scale by 0.5, then translate by (1, 2, 3)
t1 = Transform3d().scale(0.5).translate(1, 2, 3)
# Scale each axis by a different amount, then translate, then scale
t2 = Transform3d().scale(1, 3, 3).translate(2, 3, 1).scale(2.0)
t3 = t1.compose(t2)
tN = t1.stack(t3, t3)
BACKPROP THROUGH TRANSFORMS
When building transforms, we can also parameterize them by Torch tensors;
in this case we can backprop through the construction and application of
Transform objects, so they could be learned via gradient descent or
predicted by a neural network.
.. code-block:: python
s1_params = torch.randn(N, requires_grad=True)
t_params = torch.randn(N, 3, requires_grad=True)
s2_params = torch.randn(N, 3, requires_grad=True)
t = Transform3d().scale(s1_params).translate(t_params).scale(s2_params)
x = torch.randn(N, 3)
y = t.transform_points(x)
loss = compute_loss(y)
loss.backward()
with torch.no_grad():
s1_params -= lr * s1_params.grad
t_params -= lr * t_params.grad
s2_params -= lr * s2_params.grad
CONVENTIONS
We adopt a right-hand coordinate system, meaning that rotation about an axis
with a positive angle results in a counter clockwise rotation.
This class assumes that transformations are applied on inputs which
are row vectors. The internal representation of the Nx4x4 transformation
matrix is of the form:
.. code-block:: python
M = [
[Rxx, Ryx, Rzx, 0],
[Rxy, Ryy, Rzy, 0],
[Rxz, Ryz, Rzz, 0],
[Tx, Ty, Tz, 1],
]
To apply the transformation to points which are row vectors, the M matrix
can be pre multiplied by the points:
.. code-block:: python
points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point
transformed_points = points * M
"""
def __init__(
self,
dtype: torch.dtype = torch.float32,
device="cpu",
matrix: Optional[torch.Tensor] = None,
):
"""
Args:
dtype: The data type of the transformation matrix.
to be used if `matrix = None`.
device: The device for storing the implemented transformation.
If `matrix != None`, uses the device of input `matrix`.
matrix: A tensor of shape (4, 4) or of shape (minibatch, 4, 4)
representing the 4x4 3D transformation matrix.
If `None`, initializes with identity using
the specified `device` and `dtype`.
"""
if matrix is None:
self._matrix = torch.eye(4, dtype=dtype, device=device).view(1, 4, 4)
else:
if matrix.ndim not in (2, 3):
raise ValueError('"matrix" has to be a 2- or a 3-dimensional tensor.')
if matrix.shape[-2] != 4 or matrix.shape[-1] != 4:
raise ValueError(
'"matrix" has to be a tensor of shape (minibatch, 4, 4)'
)
# set the device from matrix
device = matrix.device
self._matrix = matrix.view(-1, 4, 4)
self._transforms = [] # store transforms to compose
self._lu = None
self.device = device
def __len__(self):
return self.get_matrix().shape[0]
def __getitem__(
self, index: Union[int, List[int], slice, torch.Tensor]
) -> "Transform3d":
"""
Args:
index: Specifying the index of the transform to retrieve.
Can be an int, slice, list of ints, boolean, long tensor.
Supports negative indices.
Returns:
Transform3d object with selected transforms. The tensors are not cloned.
"""
if isinstance(index, int):
index = [index]
return self.__class__(matrix=self.get_matrix()[index])
def compose(self, *others):
"""
Return a new Transform3d with the tranforms to compose stored as
an internal list.
Args:
*others: Any number of Transform3d objects
Returns:
A new Transform3d with the stored transforms
"""
out = Transform3d(device=self.device)
out._matrix = self._matrix.clone()
for other in others:
if not isinstance(other, Transform3d):
msg = "Only possible to compose Transform3d objects; got %s"
raise ValueError(msg % type(other))
out._transforms = self._transforms + list(others)
return out
def get_matrix(self):
"""
Return a matrix which is the result of composing this transform
with others stored in self.transforms. Where necessary transforms
are broadcast against each other.
For example, if self.transforms contains transforms t1, t2, and t3, and
given a set of points x, the following should be true:
.. code-block:: python
y1 = t1.compose(t2, t3).transform(x)
y2 = t3.transform(t2.transform(t1.transform(x)))
y1.get_matrix() == y2.get_matrix()
Returns:
A transformation matrix representing the composed inputs.
"""
composed_matrix = self._matrix.clone()
if len(self._transforms) > 0:
for other in self._transforms:
other_matrix = other.get_matrix()
composed_matrix = _broadcast_bmm(composed_matrix, other_matrix)
return composed_matrix
def _get_matrix_inverse(self):
"""
Return the inverse of self._matrix.
"""
return torch.inverse(self._matrix)
def inverse(self, invert_composed: bool = False):
"""
Returns a new Transform3D object that represents an inverse of the
current transformation.
Args:
invert_composed:
- True: First compose the list of stored transformations
and then apply inverse to the result. This is
potentially slower for classes of transformations
with inverses that can be computed efficiently
(e.g. rotations and translations).
- False: Invert the individual stored transformations
independently without composing them.
Returns:
A new Transform3D object contaning the inverse of the original
transformation.
"""
tinv = Transform3d(device=self.device)
if invert_composed:
# first compose then invert
tinv._matrix = torch.inverse(self.get_matrix())
else:
# self._get_matrix_inverse() implements efficient inverse
# of self._matrix
i_matrix = self._get_matrix_inverse()
# 2 cases:
if len(self._transforms) > 0:
# a) Either we have a non-empty list of transforms:
# Here we take self._matrix and append its inverse at the
# end of the reverted _transforms list. After composing
# the transformations with get_matrix(), this correctly
# right-multiplies by the inverse of self._matrix
# at the end of the composition.
tinv._transforms = [t.inverse() for t in reversed(self._transforms)]
last = Transform3d(device=self.device)
last._matrix = i_matrix
tinv._transforms.append(last)
else:
# b) Or there are no stored transformations
# we just set inverted matrix
tinv._matrix = i_matrix
return tinv
def stack(self, *others):
transforms = [self] + list(others)
matrix = torch.cat([t._matrix for t in transforms], dim=0)
out = Transform3d()
out._matrix = matrix
return out
def transform_points(self, points, eps: Optional[float] = None):
"""
Use this transform to transform a set of 3D points. Assumes row major
ordering of the input points.
Args:
points: Tensor of shape (P, 3) or (N, P, 3)
eps: If eps!=None, the argument is used to clamp the
last coordinate before peforming the final division.
The clamping corresponds to:
last_coord := (last_coord.sign() + (last_coord==0)) *
torch.clamp(last_coord.abs(), eps),
i.e. the last coordinates that are exactly 0 will
be clamped to +eps.
Returns:
points_out: points of shape (N, P, 3) or (P, 3) depending
on the dimensions of the transform
"""
points_batch = points.clone()
if points_batch.dim() == 2:
points_batch = points_batch[None] # (P, 3) -> (1, P, 3)
if points_batch.dim() != 3:
msg = "Expected points to have dim = 2 or dim = 3: got shape %r"
raise ValueError(msg % repr(points.shape))
N, P, _3 = points_batch.shape
ones = torch.ones(N, P, 1, dtype=points.dtype, device=points.device)
points_batch = torch.cat([points_batch, ones], dim=2)
composed_matrix = self.get_matrix()
points_out = _broadcast_bmm(points_batch, composed_matrix)
denom = points_out[..., 3:] # denominator
if eps is not None:
denom_sign = denom.sign() + (denom == 0.0).type_as(denom)
denom = denom_sign * torch.clamp(denom.abs(), eps)
points_out = points_out[..., :3] / denom
# When transform is (1, 4, 4) and points is (P, 3) return
# points_out of shape (P, 3)
if points_out.shape[0] == 1 and points.dim() == 2:
points_out = points_out.reshape(points.shape)
return points_out
def transform_normals(self, normals):
"""
Use this transform to transform a set of normal vectors.
Args:
normals: Tensor of shape (P, 3) or (N, P, 3)
Returns:
normals_out: Tensor of shape (P, 3) or (N, P, 3) depending
on the dimensions of the transform
"""
if normals.dim() not in [2, 3]:
msg = "Expected normals to have dim = 2 or dim = 3: got shape %r"
raise ValueError(msg % (normals.shape,))
composed_matrix = self.get_matrix()
# TODO: inverse is bad! Solve a linear system instead
mat = composed_matrix[:, :3, :3]
normals_out = _broadcast_bmm(normals, mat.transpose(1, 2).inverse())
# This doesn't pass unit tests. TODO investigate further
# if self._lu is None:
# self._lu = self._matrix[:, :3, :3].transpose(1, 2).lu()
# normals_out = normals.lu_solve(*self._lu)
# When transform is (1, 4, 4) and normals is (P, 3) return
# normals_out of shape (P, 3)
if normals_out.shape[0] == 1 and normals.dim() == 2:
normals_out = normals_out.reshape(normals.shape)
return normals_out
def translate(self, *args, **kwargs):
return self.compose(Translate(device=self.device, *args, **kwargs))
def scale(self, *args, **kwargs):
return self.compose(Scale(device=self.device, *args, **kwargs))
def rotate(self, *args, **kwargs):
return self.compose(Rotate(device=self.device, *args, **kwargs))
def rotate_axis_angle(self, *args, **kwargs):
return self.compose(RotateAxisAngle(device=self.device, *args, **kwargs))
def clone(self):
"""
Deep copy of Transforms object. All internal tensors are cloned
individually.
Returns:
new Transforms object.
"""
other = Transform3d(device=self.device)
if self._lu is not None:
other._lu = [elem.clone() for elem in self._lu]
other._matrix = self._matrix.clone()
other._transforms = [t.clone() for t in self._transforms]
return other
def to(self, device, copy: bool = False, dtype=None):
"""
Match functionality of torch.Tensor.to()
If copy = True or the self Tensor is on a different device, the
returned tensor is a copy of self with the desired torch.device.
If copy = False and the self Tensor already has the correct torch.device,
then self is returned.
Args:
device: Device id for the new tensor.
copy: Boolean indicator whether or not to clone self. Default False.
dtype: If not None, casts the internal tensor variables
to a given torch.dtype.
Returns:
Transform3d object.
"""
if not copy and self.device == device:
return self
other = self.clone()
if self.device != device:
other.device = device
other._matrix = self._matrix.to(device=device, dtype=dtype)
for t in other._transforms:
t.to(device, copy=copy, dtype=dtype)
return other
def cpu(self):
return self.to(torch.device("cpu"))
def cuda(self):
return self.to(torch.device("cuda"))
class Translate(Transform3d):
def __init__(self, x, y=None, z=None, dtype=torch.float32, device="cpu"):
"""
Create a new Transform3d representing 3D translations.
Option I: Translate(xyz, dtype=torch.float32, device='cpu')
xyz should be a tensor of shape (N, 3)
Option II: Translate(x, y, z, dtype=torch.float32, device='cpu')
Here x, y, and z will be broadcast against each other and
concatenated to form the translation. Each can be:
- A python scalar
- A torch scalar
- A 1D torch tensor
"""
super().__init__(device=device)
xyz = _handle_input(x, y, z, dtype, device, "Translate")
N = xyz.shape[0]
mat = torch.eye(4, dtype=dtype, device=device)
mat = mat.view(1, 4, 4).repeat(N, 1, 1)
mat[:, 3, :3] = xyz
self._matrix = mat
def _get_matrix_inverse(self):
"""
Return the inverse of self._matrix.
"""
inv_mask = self._matrix.new_ones([1, 4, 4])
inv_mask[0, 3, :3] = -1.0
i_matrix = self._matrix * inv_mask
return i_matrix
class Scale(Transform3d):
def __init__(self, x, y=None, z=None, dtype=torch.float32, device="cpu"):
"""
A Transform3d representing a scaling operation, with different scale
factors along each coordinate axis.
Option I: Scale(s, dtype=torch.float32, device='cpu')
s can be one of
- Python scalar or torch scalar: Single uniform scale
- 1D torch tensor of shape (N,): A batch of uniform scale
- 2D torch tensor of shape (N, 3): Scale differently along each axis
Option II: Scale(x, y, z, dtype=torch.float32, device='cpu')
Each of x, y, and z can be one of
- python scalar
- torch scalar
- 1D torch tensor
"""
super().__init__(device=device)
xyz = _handle_input(x, y, z, dtype, device, "scale", allow_singleton=True)
N = xyz.shape[0]
# TODO: Can we do this all in one go somehow?
mat = torch.eye(4, dtype=dtype, device=device)
mat = mat.view(1, 4, 4).repeat(N, 1, 1)
mat[:, 0, 0] = xyz[:, 0]
mat[:, 1, 1] = xyz[:, 1]
mat[:, 2, 2] = xyz[:, 2]
self._matrix = mat
def _get_matrix_inverse(self):
"""
Return the inverse of self._matrix.
"""
xyz = torch.stack([self._matrix[:, i, i] for i in range(4)], dim=1)
ixyz = 1.0 / xyz
imat = torch.diag_embed(ixyz, dim1=1, dim2=2)
return imat
class Rotate(Transform3d):
def __init__(
self, R, dtype=torch.float32, device="cpu", orthogonal_tol: float = 1e-5
):
"""
Create a new Transform3d representing 3D rotation using a rotation
matrix as the input.
Args:
R: a tensor of shape (3, 3) or (N, 3, 3)
orthogonal_tol: tolerance for the test of the orthogonality of R
"""
super().__init__(device=device)
if R.dim() == 2:
R = R[None]
if R.shape[-2:] != (3, 3):
msg = "R must have shape (3, 3) or (N, 3, 3); got %s"
raise ValueError(msg % repr(R.shape))
R = R.to(dtype=dtype).to(device=device)
_check_valid_rotation_matrix(R, tol=orthogonal_tol)
N = R.shape[0]
mat = torch.eye(4, dtype=dtype, device=device)
mat = mat.view(1, 4, 4).repeat(N, 1, 1)
mat[:, :3, :3] = R
self._matrix = mat
def _get_matrix_inverse(self):
"""
Return the inverse of self._matrix.
"""
return self._matrix.permute(0, 2, 1).contiguous()
class RotateAxisAngle(Rotate):
def __init__(
self,
angle,
axis: str = "X",
degrees: bool = True,
dtype=torch.float64,
device="cpu",
):
"""
Create a new Transform3d representing 3D rotation about an axis
by an angle.
Assuming a right-hand coordinate system, positive rotation angles result
in a counter clockwise rotation.
Args:
angle:
- A torch tensor of shape (N,)
- A python scalar
- A torch scalar
axis:
string: one of ["X", "Y", "Z"] indicating the axis about which
to rotate.
NOTE: All batch elements are rotated about the same axis.
"""
axis = axis.upper()
if axis not in ["X", "Y", "Z"]:
msg = "Expected axis to be one of ['X', 'Y', 'Z']; got %s"
raise ValueError(msg % axis)
angle = _handle_angle_input(angle, dtype, device, "RotateAxisAngle")
angle = (angle / 180.0 * math.pi) if degrees else angle
# We assume the points on which this transformation will be applied
# are row vectors. The rotation matrix returned from _axis_angle_rotation
# is for transforming column vectors. Therefore we transpose this matrix.
# R will always be of shape (N, 3, 3)
R = _axis_angle_rotation(axis, angle).transpose(1, 2)
super().__init__(device=device, R=R)
def _handle_coord(c, dtype, device):
"""
Helper function for _handle_input.
Args:
c: Python scalar, torch scalar, or 1D torch tensor
Returns:
c_vec: 1D torch tensor
"""
if not torch.is_tensor(c):
c = torch.tensor(c, dtype=dtype, device=device)
if c.dim() == 0:
c = c.view(1)
return c
def _handle_input(x, y, z, dtype, device, name: str, allow_singleton: bool = False):
"""
Helper function to handle parsing logic for building transforms. The output
is always a tensor of shape (N, 3), but there are several types of allowed
input.
Case I: Single Matrix
In this case x is a tensor of shape (N, 3), and y and z are None. Here just
return x.
Case II: Vectors and Scalars
In this case each of x, y, and z can be one of the following
- Python scalar
- Torch scalar
- Torch tensor of shape (N, 1) or (1, 1)
In this case x, y and z are broadcast to tensors of shape (N, 1)
and concatenated to a tensor of shape (N, 3)
Case III: Singleton (only if allow_singleton=True)
In this case y and z are None, and x can be one of the following:
- Python scalar
- Torch scalar
- Torch tensor of shape (N, 1) or (1, 1)
Here x will be duplicated 3 times, and we return a tensor of shape (N, 3)
Returns:
xyz: Tensor of shape (N, 3)
"""
# If x is actually a tensor of shape (N, 3) then just return it
if torch.is_tensor(x) and x.dim() == 2:
if x.shape[1] != 3:
msg = "Expected tensor of shape (N, 3); got %r (in %s)"
raise ValueError(msg % (x.shape, name))
if y is not None or z is not None:
msg = "Expected y and z to be None (in %s)" % name
raise ValueError(msg)
return x
if allow_singleton and y is None and z is None:
y = x
z = x
# Convert all to 1D tensors
xyz = [_handle_coord(c, dtype, device) for c in [x, y, z]]
# Broadcast and concatenate
sizes = [c.shape[0] for c in xyz]
N = max(sizes)
for c in xyz:
if c.shape[0] != 1 and c.shape[0] != N:
msg = "Got non-broadcastable sizes %r (in %s)" % (sizes, name)
raise ValueError(msg)
xyz = [c.expand(N) for c in xyz]
xyz = torch.stack(xyz, dim=1)
return xyz
def _handle_angle_input(x, dtype, device, name: str):
"""
Helper function for building a rotation function using angles.
The output is always of shape (N,).
The input can be one of:
- Torch tensor of shape (N,)
- Python scalar
- Torch scalar
"""
if torch.is_tensor(x) and x.dim() > 1:
msg = "Expected tensor of shape (N,); got %r (in %s)"
raise ValueError(msg % (x.shape, name))
else:
return _handle_coord(x, dtype, device)
def _broadcast_bmm(a, b):
"""
Batch multiply two matrices and broadcast if necessary.
Args:
a: torch tensor of shape (P, K) or (M, P, K)
b: torch tensor of shape (N, K, K)
Returns:
a and b broadcast multipled. The output batch dimension is max(N, M).
To broadcast transforms across a batch dimension if M != N then
expect that either M = 1 or N = 1. The tensor with batch dimension 1 is
expanded to have shape N or M.
"""
if a.dim() == 2:
a = a[None]
if len(a) != len(b):
if not ((len(a) == 1) or (len(b) == 1)):
msg = "Expected batch dim for bmm to be equal or 1; got %r, %r"
raise ValueError(msg % (a.shape, b.shape))
if len(a) == 1:
a = a.expand(len(b), -1, -1)
if len(b) == 1:
b = b.expand(len(a), -1, -1)
return a.bmm(b)
def _check_valid_rotation_matrix(R, tol: float = 1e-7):
"""
Determine if R is a valid rotation matrix by checking it satisfies the
following conditions:
``RR^T = I and det(R) = 1``
Args:
R: an (N, 3, 3) matrix
Returns:
None
Emits a warning if R is an invalid rotation matrix.
"""
N = R.shape[0]
eye = torch.eye(3, dtype=R.dtype, device=R.device)
eye = eye.view(1, 3, 3).expand(N, -1, -1)
orthogonal = torch.allclose(R.bmm(R.transpose(1, 2)), eye, atol=tol)
det_R = torch.det(R)
no_distortion = torch.allclose(det_R, torch.ones_like(det_R))
if not (orthogonal and no_distortion):
msg = "R is not a valid rotation matrix"
warnings.warn(msg)
return
| [
"torch.diag_embed",
"torch.device",
"torch.cat",
"torch.stack",
"torch.is_tensor",
"torch.inverse",
"torch.ones",
"torch.det",
"torch.eye",
"torch.tensor",
"torch.ones_like"
] | 3 | rahulvenkk/pytorch3d | 68bfac3394f9a87fb268165d1c9dd264e1d9316b |
1.8 | import torch
import torch.nn as nn
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding
Args:
in_planes:
out_planes:
stride: (Default value = 1)
groups: (Default value = 1)
dilation: (Default value = 1)
Returns:
Raises:
"""
return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution
Args:
in_planes:
out_planes:
stride: (Default value = 1)
Returns:
Raises:
"""
return nn.Conv3d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
""" """
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
"""
Parameters
----------
inplanes :
planes :
stride :
(Default value = 1)
downsample :
(Default value = None)
groups :
(Default value = 1)
base_width :
(Default value = 64)
dilation :
(Default value = 1)
norm_layer :
(Default value = None)
Returns
-------
"""
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm3d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""
Args:
x:
Returns:
Raises:
"""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
""" """
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
"""
Parameters
----------
inplanes :
planes :
stride :
(Default value = 1)
downsample :
(Default value = None)
groups :
(Default value = 1)
base_width :
(Default value = 64)
dilation :
(Default value = 1)
norm_layer :
(Default value = None)
Returns
-------
"""
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm3d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""
Args:
x:
Returns:
Raises:
"""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
""" """
def __init__(self, block, layers, num_channels=1,num_classes=3, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, **kwargs): #, **kwargs is just a dummy to take whatever we want!
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm3d
self._norm_layer = norm_layer
self.inplanes = 32
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv3d(num_channels, self.inplanes, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 32, layers[0])
self.layer2 = self._make_layer(block, 64, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 128, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 256, layers[3])
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(256 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm3d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
"""
Args:
block:
planes:
blocks:
stride: (Default value = 1)
dilate: (Default value = False)
Returns:
Raises:
"""
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
"""
Args:
x:
Returns:
Raises:
"""
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
#print(x.shape)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
"""
Args:
x:
Returns:
Raises:
"""
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
"""
Args:
arch:
block:
layers:
pretrained:
progress:
**kwargs:
Returns:
Raises:
"""
model = ResNet(block, layers, **kwargs)
return model
def resnet18_brew2(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)
progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)
**kwargs:
Returns:
Raises:
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34_brew2(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)
progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)
**kwargs:
Returns:
Raises:
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50_brew2(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)
progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)
**kwargs:
Returns:
Raises:
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, #2,4,2
**kwargs)
def resnet101_brew2(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)
progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)
**kwargs:
Returns:
Raises:
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152_brew2(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)
progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)
**kwargs:
Returns:
Raises:
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d_brew2(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)
progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)
**kwargs:
Returns:
Raises:
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d_brew2(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)
progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)
**kwargs:
Returns:
Raises:
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2_brew2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)
progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)
**kwargs:
Returns:
Raises:
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2_brew2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)
progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)
**kwargs:
Returns:
Raises:
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| [
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.MaxPool3d",
"torch.nn.init.kaiming_normal_",
"torch.nn.ReLU",
"torch.nn.Conv3d",
"torch.flatten",
"torch.nn.AdaptiveAvgPool3d"
] | 1.8.0 | FredrikM97/Medical-ROI | 54246341460c04caeced2ef6dcab984f6c260c9d |
1.3 | from datetime import datetime as dt, timedelta
import numpy as np
import os
import torch
from torch.nn import MSELoss
from torch.optim import LBFGS, Adam
from adabelief_pytorch import AdaBelief
from torch_cpo_utils import *
# from cpo_torch import CPO
from buffer_torch import *
from models_torch import MLP_DiagGaussianPolicy, MLP
from utils import *
from neural_nets import *
import wandb
wandb.login()
PROJECT_NAME = 'cpo_500e_8hz_cost1_rew1_lim25'
wandb.init(project="cpo-agent-test", name= PROJECT_NAME )
# recommend a protocol for evaluating constrained RL
# algorithms on Safety Gym environments based on three metrics:
# 1. task performance of the final policy,
# 2. constraint satisfaction of the final policy, and
# 3. average regret with respect to safety costs throughout training.
# In all Safety Gym benchmark environments, the layout of environment elements is randomized at the start of each episode. Each distribution over layouts is continuous and minimally
# restricted, allowing for essentially infinite variations within each environment. This prevents
# RL algorithms from learning trivial solutions that memorize
def discount(vals, discount_term):
n = vals.size(0)
disc_pows = torch.pow(discount_term, torch.arange(n).float())
# Reverse indexes
reverse_ix = torch.arange(n - 1, -1, -1)
discounted = torch.cumsum((vals * disc_pows)[reverse_ix], dim=-1)[reverse_ix] / disc_pows
return discounted
def compute_advs(actual_vals, exp_vals, discount_term):
# Advantage calculation: discount(predicted - actual)
exp_vals_next = torch.cat([exp_vals[1:], torch.tensor([0.0])])
td_res = actual_vals + discount_term * exp_vals_next - exp_vals
advs = discount(td_res, discount_term)
return advs
class CPO:
@autoassign
def __init__(self,
policy,
value_fun,
cost_fun,
simulator,
target_kl=1e-2,
vf_lr=1e-2,
cf_lr=1e-2,
cost_lim=0.1,
train_v_iters=5,
train_c_iters=5,
val_l2_reg=1e-3,
cost_l2_reg=1e-3,
gamma=0.995,
cost_gamma=0.995,
cg_damping=1e-3,
cg_max_iters=10,
line_search_coef=0.9,
line_search_max_iter=10,
line_search_accept_ratio=0.1,
optim_mode = "adam",
optim_max_iter=25,
model_name=None,
continue_from_file=False,
save_every=10,
save_dir='trained-models-dir',
print_updates=True):
# Special function to avoid certain slowdowns from PyTorch + MPI combo.
setup_pytorch_for_mpi()
self.save_dir = save_dir
self.mse_loss = MSELoss(reduction='mean')
# Set policy and functions if starting from scratch
# if continue_from_file == False:
# Different Optimizer Modes (Think LBFGS, Adam and AdaBelief)
if optim_mode == "adam":
self.value_fun_optimizer = Adam(self.value_fun.parameters(), lr=vf_lr)
self.cost_fun_optimizer = Adam(self.cost_fun.parameters(), lr=vf_lr)
elif optim_mode == "adabelief":
self.value_fun_optimizer = AdaBelief(self.value_fun.parameters(), betas=(0.9, 0.999), eps=1e-8)
self.cost_fun_optimizer = AdaBelief(self.cost_fun.parameters(), betas=(0.9, 0.999), eps=1e-8)
else:
self.value_fun_optimizer = LBFGS(self.value_fun.parameters(), lr=vf_lr, max_iter=optim_max_iter)
self.cost_fun_optimizer = LBFGS(self.cost_fun.parameters(), lr=cf_lr, max_iter=optim_max_iter)
self.epoch_num = 0
self.elapsed_time = timedelta(0)
self.device = get_device()
self.mean_rewards = []
self.mean_costs = []
self.session_cum_avg_rewards = 0
self.session_cum_avg_costs = 0
if not model_name and continue_from_file:
raise Exception('Argument continue_from_file to __init__ method of ' \
'CPO case was set to True but model_name was not ' \
'specified.')
if not model_name and save_every:
raise Exception('Argument save_every to __init__ method of CPO ' \
'was set to a value greater than 0 but model_name ' \
'was not specified.')
if continue_from_file:
print("about to continue")
self.load_session()
def train(self, n_epochs, logger_kwargs):
# Set up logger and save configuration
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
# Set up model saving
logger.setup_pytorch_saver(policy)
states_w_time_old = None
disc_rewards_old = None
disc_costs_old = None
# Main loop: collect experience in env and update/log each epoch
for epoch in range(n_epochs):
start_time = dt.now()
self.epoch_num += 1
# Run the simulator and collect experiences in the buffer
buffer = self.simulator.run_sim()
# Sample buffer experiences
observations, actions, rewards, costs = buffer.sample()
# print("reward sample:", rewards)
episode_lengths = torch.tensor([len(episode) for episode in buffer])
print("episode lengths: ", episode_lengths)
episode_limits = torch.cat([torch.tensor([0]), torch.cumsum(episode_lengths, dim=-1)])
N = np.sum([len(episode) for episode in buffer])
T = self.simulator.max_ep_len
time = torch.cat([torch.arange(size).float() for size in episode_lengths])
time = torch.unsqueeze(time, dim=1) / T
states_w_time = torch.cat([observations, time], dim=1)
# print("states with time: ", states_w_time)
disc_rewards = torch.zeros(N)
disc_costs = torch.zeros(N)
reward_advs = torch.zeros(N)
cost_advs = torch.zeros(N)
with torch.no_grad():
state_vals = self.value_fun(states_w_time).view(-1)
state_costs = self.cost_fun(states_w_time).view(-1)
print("state vals: ", state_vals)
print("state costs: ", state_costs)
for start, end in zip(episode_limits[:-1], episode_limits[1:]):
disc_rewards[start:end] = discount(rewards[start:end], self.gamma)
disc_costs[start:end] = discount(costs[start:end], self.cost_gamma)
reward_advs[start:end] = compute_advs(rewards[start:end],
state_vals[start:end],
self.gamma)
cost_advs[start:end] = compute_advs(costs[start:end],
state_costs[start:end],
self.cost_gamma)
# Tyna note: think about bias reduction
# Advantage normalizing trick for policy gradient
reward_advs -= reward_advs.mean()
reward_advs /= reward_advs.std()
# Center, but do NOT rescale advantages for cost gradient # Tyna to ask Josh about this
cost_advs -= reward_advs.mean()
# cost_advs /= cost_advs.std()
if states_w_time_old is not None:
states_w_time_train = torch.cat([states_w_time, states_w_time_old])
disc_rewards_train = torch.cat([disc_rewards, disc_rewards_old])
disc_costs_train = torch.cat([disc_costs, disc_costs_old])
else:
states_w_time_train = states_w_time
disc_rewards_train = disc_rewards
disc_costs_train = disc_costs
states_w_time_old = states_w_time
disc_rewards_old = disc_rewards
disc_costs_old = disc_costs
# constraint_cost = torch.mean(torch.tensor([disc_costs[start] for start in episode_limits[:-1]]))
constraint_cost = torch.mean(torch.tensor([torch.sum(torch.tensor(episode.costs))
for episode in buffer]))
self.update_policy(observations, actions, reward_advs, cost_advs, constraint_cost)
self.update_nn_regressor(self.value_fun, self.value_fun_optimizer, states_w_time_train,
disc_rewards_train, self.val_l2_reg, self.train_v_iters)
self.update_nn_regressor(self.cost_fun, self.cost_fun_optimizer, states_w_time_train,
disc_costs_train, self.cost_l2_reg, self.train_c_iters)
reward_sums = [np.sum(episode.rewards) for episode in buffer]
cost_sums = [np.sum(episode.costs) for episode in buffer]
# print("all episode rewards for each episode: ", [episode.rewards for episode in buffer])
print("sum episode rewards: ", reward_sums)
print("mean of sum episode rewards: ", np.mean(reward_sums))
self.mean_rewards.append(np.mean(reward_sums))
self.mean_costs.append(np.mean(cost_sums))
self.elapsed_time += dt.now() - start_time
if self.print_updates:
self.print_update(logger)
# Save model and save last trajectory
if (epoch % self.save_every == 0) or (epoch == epochs - 1):
logger.save_state({'env': env}, None)
if self.save_every and not self.epoch_num % self.save_every:
self.save_session(logger)
def update_policy(self, observations, actions, reward_advs, constraint_advs, J_c):
# J_c is constraint cost
self.policy.train()
action_dists = self.policy(observations)
log_action_probs = action_dists.log_prob(actions)
imp_sampling = torch.exp(log_action_probs - log_action_probs.detach())
# Change to torch.matmul
reward_loss = -torch.mean(imp_sampling * reward_advs)
reward_grad = flat_grad(reward_loss, self.policy.parameters(), retain_graph=True)
# Change to torch.matmul
constraint_loss = torch.sum(imp_sampling * constraint_advs) / self.simulator.n_episodes
constraint_grad = flat_grad(constraint_loss, self.policy.parameters(), retain_graph=True)
loss_metrics = {'reward loss': reward_loss,
'constraint loss': constraint_loss
}
wandb.log(loss_metrics)
mean_kl = mean_kl_first_fixed(action_dists, action_dists)
Fvp_fun = get_Hvp_fun(mean_kl, self.policy.parameters())
F_inv_g = cg_solver(Fvp_fun, reward_grad)
F_inv_b = cg_solver(Fvp_fun, constraint_grad)
q = torch.matmul(reward_grad, F_inv_g)
r = torch.matmul(reward_grad, F_inv_b)
s = torch.matmul(constraint_grad, F_inv_b)
c = (J_c - self.cost_lim)
# .to(self.device)
# Is the policy feasible (within the kl constraints?)
is_feasible = False if c > 0 and c ** 2 / s - 2 * self.target_kl > 0 else True
if is_feasible:
lam, nu = self.calc_dual_vars(q, r, s, c)
cur_penalty = nu
search_dir = -lam ** -1 * (F_inv_g + nu * F_inv_b)
# if not feasible, perform infeasible recovery: step to purely decrease cost
else:
search_dir = -torch.sqrt(2 * self.target_kl / s) * F_inv_b
# Should be positive, calculate improvement over loss
exp_loss_improv = torch.matmul(reward_grad, search_dir)
current_policy = get_flat_params(self.policy)
def line_search_criterion(search_dir, step_len):
test_policy = current_policy + step_len * search_dir
set_params(self.policy, test_policy)
with torch.no_grad():
# Test if conditions are satisfied
test_dists = self.policy(observations)
test_probs = test_dists.log_prob(actions)
imp_sampling = torch.exp(test_probs - log_action_probs.detach())
test_loss = -torch.mean(imp_sampling * reward_advs)
test_cost = torch.sum(imp_sampling * constraint_advs) / self.simulator.n_episodes
test_kl = mean_kl_first_fixed(action_dists, test_dists)
loss_improv_cond = (test_loss - reward_loss) / (step_len * exp_loss_improv) >= self.line_search_accept_ratio
cost_cond = step_len * torch.matmul(constraint_grad, search_dir) <= max(-c, 0.0)
kl_cond = test_kl <= self.target_kl
set_params(self.policy, current_policy)
if is_feasible:
return loss_improv_cond and cost_cond and kl_cond
return cost_cond and kl_cond
step_len = line_search(search_dir, 1.0, line_search_criterion, self.line_search_coef)
# print('Step Len.:', step_len, '\n')
step_metrics = {'step length': step_len}
wandb.log(step_metrics)
# improved policy
new_policy = current_policy + step_len * search_dir
set_params(self.policy, new_policy)
def update_nn_regressor(self, nn_regressor, optimizer, states, targets, l2_reg_coef, n_iters=1):
nn_regressor.train()
# states = states.to(self.device)
# targets = targets.to(self.device)
for _ in range(n_iters):
def mse():
optimizer.zero_grad()
predictions = nn_regressor(states).view(-1)
loss = self.mse_loss(predictions, targets)
flat_params = get_flat_params(nn_regressor)
l2_loss = l2_reg_coef * torch.sum(torch.pow(flat_params, 2))
loss += l2_loss
loss.backward()
return loss
optimizer.step(mse)
def calc_dual_vars(self, q, r, s, c):
A = q - r ** 2 / s # should be always positive (Cauchy-Shwarz)
B = 2 * self.target_kl - c ** 2 / s # does safety boundary intersect trust region? (positive = yes)
# optim_case in [3,4]
if c < 0.0 and c ** 2 / s - 2 * self.target_kl > 0.0:
lam = torch.sqrt(q / (2 * self.target_kl))
nu = 0.0
return lam, nu
# w = tro.cg(Hx, b)
# r = np.dot(w, approx_g) # b^T H^{-1} g
# s = np.dot(w, Hx(w)) # b^T H^{-1} b
lam_mid = r / c
lam_a = torch.sqrt(A / B)
lam_b = torch.sqrt(q / (2 * self.target_kl))
f_mid = -0.5 * (q / lam_mid + 2 * lam_mid * self.target_kl)
f_a = -torch.sqrt(A * B) - r * c / s
f_b = -torch.sqrt(2 * q * self.target_kl)
if lam_mid > 0:
if c < 0:
if lam_a > lam_mid:
lam_a = lam_mid
f_a = f_mid
if lam_b < lam_mid:
lam_b = lam_mid
f_b = f_mid
else:
if lam_a < lam_mid:
lam_a = lam_mid
f_a = f_mid
if lam_b > lam_mid:
lam_b = lam_mid
f_b = f_mid
else:
if c < 0:
lam = lam_b
else:
lam = lam_a
lam = lam_a if f_a >= f_b else lam_b
nu = max(0.0, (lam * c - r) / s)
return lam, nu
def save_session(self, logger):
# Where experiment outputs are saved by default:
DEFAULT_DATA_DIR = osp.join(osp.abspath(osp.dirname(osp.dirname(__file__))), 'data')
self.output_dir = DEFAULT_DATA_DIR
fpath = 'pyt_save'
fpath = osp.join(self.output_dir, self.model_name , fpath)
itr = None
fname = 'model' + ('%d' % itr if itr is not None else '') + '.pt'
fname = osp.join(fpath, fname)
os.makedirs(fpath, exist_ok=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We are using a non-recommended way of saving PyTorch models,
# by pickling whole objects (which are dependent on the exact
# directory structure at the time of saving) as opposed to
# just saving network weights. This works sufficiently well
# for the purposes of Spinning Up, but you may want to do
# something different for your personal PyTorch project.
# We use a catch_warnings() context to avoid the warnings about
# not being able to save the source code.
torch.save(logger.pytorch_saver_elements, fname)
save_path = os.path.join(fpath, self.model_name + '.pt')
ckpt = dict(policy_state_dict=self.policy.state_dict(),
value_state_dict=self.value_fun.state_dict(),
cost_state_dict=self.cost_fun.state_dict(),
mean_rewards=self.mean_rewards,
mean_costs=self.mean_costs,
epoch_num=self.epoch_num,
elapsed_time=self.elapsed_time)
if self.simulator.obs_filter:
ckpt['obs_filter'] = self.simulator.obs_filter
torch.save(ckpt, save_path)
def load_session(self, load_path=None):
if load_path is None:
load_path = os.path.join(self.save_dir, self.model_name + '.pt')
print("load path:", load_path)
ckpt = torch.load(load_path)
self.policy.load_state_dict(ckpt['policy_state_dict'])
self.value_fun.load_state_dict(ckpt['value_state_dict'])
self.cost_fun.load_state_dict(ckpt['cost_state_dict'])
self.mean_rewards = ckpt['mean_rewards']
self.mean_costs = ckpt['mean_costs']
self.epoch_num = ckpt['epoch_num']
self.elapsed_time = ckpt['elapsed_time']
try:
self.simulator.obs_filter = ckpt['obs_filter']
except KeyError:
pass
def print_update(self, logger):
update_message = '[Epoch]: {0} | [Avg. Reward]: {1} | [Avg. Cost]: {2} | [Elapsed Time]: {3}'
elapsed_time_str = ''.join(str(self.elapsed_time)).split('.')[0]
format_args = (self.epoch_num, self.mean_rewards[-1], self.mean_costs[-1], elapsed_time_str)
self.session_cum_avg_rewards += (self.mean_rewards[-1]/(self.epoch_num+1))
self.session_cum_avg_costs += (self.mean_costs[-1]/(self.epoch_num+1))
logger.store(EpRet=self.mean_rewards[-1],
EpCost=self.mean_costs[-1])
# logger.store()
logger.log_tabular('Epoch', self.epoch_num)
logger.log_tabular('EpRet', with_min_and_max=False)
logger.log_tabular('EpCost', with_min_and_max=False)
logger.dump_tabular()
update_metrics = {'mean rewards': self.mean_rewards[-1],
'mean costs': self.mean_costs[-1],
'cum average rewards': self.session_cum_avg_rewards,
'cum average costs': self.session_cum_avg_costs
}
wandb.log(update_metrics)
print(update_message.format(*format_args))
if __name__ == '__main__':
import argparse
from utils import setup_logger_kwargs
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', type=str, default='Safexp-PointGoal1-v0')
# parser.add_argument('--env_name', type=str, default='Safexp-PointGoal0-v0')
parser.add_argument('--target_kl', type=float, default=0.01)
parser.add_argument('--vf_lr', type=float, default=0.01)
parser.add_argument('--cf_lr', type=float, default=0.01)
parser.add_argument('--cost_lim', type=int, default=10)
parser.add_argument('--train_v_iters', type=int, default=5)
parser.add_argument('--train_c_iters', type=int, default=5)
parser.add_argument('--val_l2_reg', type=float, default=0.001)
parser.add_argument('--cost_l2_reg', type=float, default=0.001)
parser.add_argument('--gamma', type=float, default=0.995)
parser.add_argument('--cost_gamma', type=float, default=0.995)
parser.add_argument('--cg_damping', type=float, default=0.001)
parser.add_argument('--cg_max_iters', type=int, default=5)
parser.add_argument('--line_search_coef', type=float, default=0.9)
parser.add_argument('--line_search_max_iter', type=int, default=10)
parser.add_argument('--line_search_accept_ratio', type=float, default=0.1)
parser.add_argument('--optim_max_iter', type=int, default=25)
parser.add_argument('--model-name', type=str, dest='model_name', default='Safe-model',
# required=True,
help='The entry in config.yaml from which settings' \
'should be loaded.')
parser.add_argument('--continue_from_file', action='store_true')
parser.add_argument('--save_every', type=int, default=5)
parser.add_argument('--print_updates', action='store_false')
parser.add_argument('--cpu', type=int, default=1)
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
DEFAULT_DATA_DIR = osp.join(osp.abspath(osp.dirname(osp.dirname(__file__))), 'data')
logger_kwargs = setup_logger_kwargs(PROJECT_NAME, args.seed, data_dir = DEFAULT_DATA_DIR)
# mpi_fork(args.cpu) # run parallel code with mpi
# Set environment and arguments
env = gym.make(args.env_name)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
epochs = 500
n_episodes = 5
# n_episodes = 10000
max_ep_len = 16
policy_dims = [64, 64]
vf_dims = [64, 64]
cf_dims = [64, 64]
cost_lim = 10
# Gaussian policy
policy = MLP_DiagGaussianPolicy(state_dim, policy_dims, action_dim)
value_fun = MLP(state_dim + 1, vf_dims, 1)
cost_fun = MLP(state_dim + 1, cf_dims, 1)
simulator = SinglePathSimulator(args.env_name, policy, n_episodes, max_ep_len)
cpo = CPO(policy,
value_fun,
cost_fun,
simulator,
model_name='cpo-run-500e',
cost_lim=args.cost_lim)
model_name = 'cpo'
print(f'Training policy {model_name} on {args.env_name} environment...\n')
cpo.train(epochs, logger_kwargs)
wandb.config.update(args)
wandb.finish()
| [
"torch.zeros",
"torch.cat",
"torch.sqrt",
"torch.nn.MSELoss",
"torch.arange",
"torch.save",
"torch.no_grad",
"torch.unsqueeze",
"torch.sum",
"torch.pow",
"torch.tensor",
"torch.load",
"torch.matmul",
"torch.mean",
"torch.cumsum"
] | 1.3.0 | feloundou/research-project | fe7f5414901f02ae24ef33af31e65782d8511da1 |
1.0 | # The code here is based on the code at
# https://github.com/gpleiss/temperature_scaling/blob/master/temperature_scaling.py
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.autograd import Variable
import numpy as np
def logits_from_probs(prob_arr):
return np.log(prob_arr)
def optimal_temp_scale(probs_arr, labels_arr, lr=0.01, max_iter=50):
probs = torch.from_numpy(probs_arr).float()
labels = torch.from_numpy(labels_arr.astype(int))
logits = torch.log(probs + 1e-12)
nll_criterion = nn.CrossEntropyLoss()
before_temperature_nll = nll_criterion(logits, labels).item()
print('Before temperature - NLL: %.3f' % (before_temperature_nll))
T = Variable(torch.ones(1,), requires_grad=True)
optimizer = optim.LBFGS([T], lr=lr, max_iter=max_iter)
def eval():
loss = nll_criterion(logits / T, labels)
loss.backward(retain_graph=True)
return loss
optimizer.step(eval)
after_temperature_nll = nll_criterion(logits / T, labels).item()
print('After temperature - NLL: %.3f' % (after_temperature_nll), ", Temperature:", T)
return T.item(), F.softmax(logits / T).data.numpy()
def rescale_temp(probs_arr, temp):
logits = np.log(probs_arr)
logits /= temp
probs = np.exp(logits)
probs /= np.sum(probs, axis=1)[:, None]
return probs
| [
"torch.ones",
"torch.from_numpy",
"torch.nn.functional.softmax",
"torch.log",
"torch.optim.LBFGS",
"torch.nn.CrossEntropyLoss"
] | 1.0.0 | probabilisticdeeplearning/swa_gaussian | 033f2b956e98f7050793a0d8a4155feb98931a3d |
1.5 | import argparse
from collections import Counter
import os
from loguru import logger
import torch
from torch import nn
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from virtex.config import Config
from virtex.factories import (
DownstreamDatasetFactory,
PretrainingModelFactory,
OptimizerFactory,
LRSchedulerFactory,
)
from virtex.utils.checkpointing import CheckpointManager
from virtex.utils.common import common_parser, common_setup, cycle
import virtex.utils.distributed as dist
from virtex.utils.metrics import TopkAccuracy
from virtex.utils.timer import Timer
# fmt: off
parser = common_parser(
description="""Do image classification with linear models and frozen
feature extractor, or fine-tune the feature extractor end-to-end."""
)
group = parser.add_argument_group("Downstream config arguments.")
group.add_argument(
"--down-config", metavar="FILE", help="Path to a downstream config file."
)
group.add_argument(
"--down-config-override", nargs="*", default=[],
help="A list of key-value pairs to modify downstream config params.",
)
parser.add_argument_group("Checkpointing and Logging")
parser.add_argument(
"--weight-init", choices=["random", "imagenet", "torchvision", "virtex"],
default="virtex", help="""How to initialize weights:
1. 'random' initializes all weights randomly
2. 'imagenet' initializes backbone weights from torchvision model zoo
3. {'torchvision', 'virtex'} load state dict from --checkpoint-path
- with 'torchvision', state dict would be from PyTorch's training
script.
- with 'virtex' it should be for our full pretrained model."""
)
parser.add_argument(
"--log-every", type=int, default=50,
help="""Log training curves to tensorboard after every these many iterations
only master process logs averaged loss values across processes.""",
)
parser.add_argument(
"--checkpoint-path",
help="""Path to load checkpoint and run downstream task evaluation. The
name of checkpoint file is required to be `model_*.pth`, where * is
iteration number from which the checkpoint was serialized."""
)
parser.add_argument(
"--checkpoint-every", type=int, default=5000,
help="""Serialize model to a checkpoint after every these many iterations.
For ImageNet, (5005 iterations = 1 epoch); for iNaturalist (1710 iterations
= 1 epoch).""",
)
# fmt: on
def main(_A: argparse.Namespace):
if _A.num_gpus_per_machine == 0:
# Set device as CPU if num_gpus_per_machine = 0.
device = torch.device("cpu")
else:
# Get the current device as set for current distributed process.
# Check `launch` function in `virtex.utils.distributed` module.
device = torch.cuda.current_device()
# Create a downstream config object (this will be immutable) and perform
# common setup such as logging and setting up serialization directory.
_DOWNC = Config(_A.down_config, _A.down_config_override)
common_setup(_DOWNC, _A, job_type="downstream")
# Create a (pretraining) config object and backup in serializaion directory.
_C = Config(_A.config, _A.config_override)
_C.dump(os.path.join(_A.serialization_dir, "pretrain_config.yaml"))
# Get dataset name for tensorboard logging.
DATASET = _DOWNC.DATA.ROOT.split("/")[-1]
# Set number of output classes according to dataset:
NUM_CLASSES_MAPPING = {"imagenet": 1000, "inaturalist": 8142}
NUM_CLASSES = NUM_CLASSES_MAPPING[DATASET]
# -------------------------------------------------------------------------
# INSTANTIATE DATALOADER, MODEL, OPTIMIZER, SCHEDULER
# -------------------------------------------------------------------------
train_dataset = DownstreamDatasetFactory.from_config(_DOWNC, split="train")
train_dataloader = DataLoader(
train_dataset,
batch_size=_DOWNC.OPTIM.BATCH_SIZE // dist.get_world_size(),
num_workers=_A.cpu_workers,
sampler=DistributedSampler(
train_dataset,
num_replicas=dist.get_world_size(),
rank=dist.get_rank(),
shuffle=True,
),
drop_last=False,
pin_memory=True,
collate_fn=train_dataset.collate_fn,
)
val_dataset = DownstreamDatasetFactory.from_config(_DOWNC, split="val")
val_dataloader = DataLoader(
val_dataset,
batch_size=_DOWNC.OPTIM.BATCH_SIZE // dist.get_world_size(),
num_workers=_A.cpu_workers,
sampler=DistributedSampler(
val_dataset,
num_replicas=dist.get_world_size(),
rank=dist.get_rank(),
shuffle=False,
),
pin_memory=True,
drop_last=False,
collate_fn=val_dataset.collate_fn,
)
# Initialize model using pretraining config.
pretrained_model = PretrainingModelFactory.from_config(_C)
# Load weights according to the init method, do nothing for `random`, and
# `imagenet` is already taken care of.
if _A.weight_init == "virtex":
CheckpointManager(model=pretrained_model).load(_A.checkpoint_path)
elif _A.weight_init == "torchvision":
# Keep strict=False because this state dict may have weights for
# last fc layer.
pretrained_model.visual.cnn.load_state_dict(
torch.load(_A.checkpoint_path, map_location="cpu")["state_dict"],
strict=False,
)
# Pull out the CNN (torchvision-like) from our pretrained model and add
# back the FC layer - this is exists in torchvision models, and is set to
# `nn.Identity()` during pretraining.
model = pretrained_model.visual.cnn # type: ignore
model.fc = nn.Linear(_DOWNC.MODEL.VISUAL.FEATURE_SIZE, NUM_CLASSES).to(device)
model = model.to(device)
# Re-initialize the FC layer.
torch.nn.init.normal_(model.fc.weight.data, mean=0.0, std=0.01)
torch.nn.init.constant_(model.fc.bias.data, 0.0)
# Freeze all layers except FC as per config param.
if _DOWNC.MODEL.VISUAL.FROZEN:
for name, param in model.named_parameters():
if "fc" not in name:
param.requires_grad = False
# Cross entropy loss and accuracy meter.
criterion = nn.CrossEntropyLoss()
top1 = TopkAccuracy(top_k=1)
optimizer = OptimizerFactory.from_config(_DOWNC, model.named_parameters())
scheduler = LRSchedulerFactory.from_config(_DOWNC, optimizer)
del pretrained_model
# -------------------------------------------------------------------------
# BEFORE TRAINING STARTS
# -------------------------------------------------------------------------
# Create an iterator from dataloader to sample batches perpetually.
train_dataloader_iter = cycle(train_dataloader, device)
# Wrap model and optimizer using NVIDIA Apex for mixed precision training.
# NOTE: Always do this before wrapping model with DistributedDataParallel.
if _DOWNC.FP16_OPT > 0:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level=f"O{_DOWNC.FP16_OPT}"
)
if dist.get_world_size() > 1:
dist.synchronize()
model = nn.parallel.DistributedDataParallel(
model, device_ids=[device], find_unused_parameters=True
)
if dist.is_master_process():
checkpoint_manager = CheckpointManager(
_A.serialization_dir,
model=model,
optimizer=optimizer,
scheduler=scheduler,
)
tensorboard_writer = SummaryWriter(log_dir=_A.serialization_dir)
# Keep track of time per iteration and ETA.
timer = Timer(start_from=1, total_iterations=_DOWNC.OPTIM.NUM_ITERATIONS)
# -------------------------------------------------------------------------
# TRAINING LOOP
# -------------------------------------------------------------------------
for iteration in range(1, _DOWNC.OPTIM.NUM_ITERATIONS + 1):
timer.tic()
optimizer.zero_grad()
batch = next(train_dataloader_iter)
logits = model(batch["image"])
loss = criterion(logits, batch["label"])
# Perform dynamic scaling of loss to adjust for mixed precision.
if _DOWNC.FP16_OPT > 0:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
scheduler.step(iteration)
timer.toc()
if iteration % _A.log_every == 0 and dist.is_master_process():
logger.info(
f"{timer.stats} | Loss: {loss:.3f} | GPU: {dist.gpu_mem_usage()} MB"
)
tensorboard_writer.add_scalar(f"{DATASET}/train_loss", loss, iteration)
tensorboard_writer.add_scalar(
f"{DATASET}/learning_rate",
optimizer.param_groups[0]["lr"],
iteration,
)
# ---------------------------------------------------------------------
# VALIDATION
# ---------------------------------------------------------------------
if iteration % _A.checkpoint_every == 0:
torch.set_grad_enabled(False)
model.eval()
total_val_loss = torch.tensor(0.0).to(device)
for val_iteration, batch in enumerate(val_dataloader, start=1):
for key in batch:
batch[key] = batch[key].to(device)
logits = model(batch["image"])
loss = criterion(logits, batch["label"])
top1(logits, batch["label"])
total_val_loss += loss
# Divide each loss component by number of val batches per GPU.
total_val_loss = total_val_loss / val_iteration
dist.average_across_processes(total_val_loss)
# Get accumulated Top-1 accuracy for logging across GPUs.
acc = top1.get_metric(reset=True)
dist.average_across_processes(acc)
torch.set_grad_enabled(True)
model.train()
# Save recent checkpoint and best checkpoint based on accuracy.
if dist.is_master_process():
checkpoint_manager.step(iteration)
if iteration % _A.checkpoint_every == 0 and dist.is_master_process():
logger.info(f"Iter: {iteration} | Top-1 accuracy: {acc})")
tensorboard_writer.add_scalar(
f"{DATASET}/val_loss", total_val_loss, iteration
)
# This name scoping will result in Tensorboard displaying all metrics
# (VOC07, caption, etc.) together.
tensorboard_writer.add_scalars(
f"metrics/{DATASET}", {"top1": acc}, iteration
)
# All processes will wait till master process is done logging.
dist.synchronize()
if __name__ == "__main__":
_A = parser.parse_args()
# Add an arg in config override if `--weight-init` is imagenet.
if _A.weight_init == "imagenet":
_A.config_override.extend(["MODEL.VISUAL.PRETRAINED", True])
if _A.num_gpus_per_machine == 0:
main(_A)
else:
# This will launch `main` and set appropriate CUDA device (GPU ID) as
# per process (accessed in the beginning of `main`).
dist.launch(
main,
num_machines=_A.num_machines,
num_gpus_per_machine=_A.num_gpus_per_machine,
machine_rank=_A.machine_rank,
dist_url=_A.dist_url,
args=(_A,),
)
| [
"torch.nn.Linear",
"torch.device",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.init.constant_",
"torch.nn.parallel.DistributedDataParallel",
"torch.set_grad_enabled",
"torch.cuda.current_device",
"torch.nn.init.normal_",
"torch.tensor",
"torch.load",
"torch.nn.CrossEntropyLoss"
] | 1.5.0 | tongyao-zhu/virtex | 43b33289ffc963b41b6b98affc5e94dfe25e29c8 |
0.4 | import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from utils import to_var
class SentenceVAE(nn.Module):
def __init__(self, vocab_size, embedding_size, rnn_type, hidden_size, word_dropout, embedding_dropout, latent_size,
sos_idx, eos_idx, pad_idx, unk_idx, max_sequence_length, num_layers=1, bidirectional=False):
super().__init__()
self.tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor
self.max_sequence_length = max_sequence_length
self.sos_idx = sos_idx
self.eos_idx = eos_idx
self.pad_idx = pad_idx
self.unk_idx = unk_idx
self.latent_size = latent_size
self.rnn_type = rnn_type
self.bidirectional = bidirectional
self.num_layers = num_layers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.word_dropout_rate = word_dropout
self.embedding_dropout = nn.Dropout(p=embedding_dropout)
if rnn_type == 'rnn':
rnn = nn.RNN
elif rnn_type == 'gru':
rnn = nn.GRU
elif rnn_type == 'lstm':
rnn = nn.LSTM
else:
raise ValueError()
self.encoder_rnn = rnn(embedding_size, hidden_size, num_layers=num_layers, bidirectional=self.bidirectional,
batch_first=True)
self.decoder_rnn = rnn(embedding_size, hidden_size, num_layers=num_layers, bidirectional=self.bidirectional,
batch_first=True)
self.hidden_factor = (2 if bidirectional else 1) * num_layers
self.hidden2mean = nn.Linear(hidden_size * self.hidden_factor, latent_size)
self.hidden2logv = nn.Linear(hidden_size * self.hidden_factor, latent_size)
self.latent2hidden = nn.Linear(latent_size, hidden_size * self.hidden_factor)
self.outputs2vocab = nn.Linear(hidden_size * (2 if bidirectional else 1), vocab_size)
def forward(self, input_sequence, length):
batch_size = input_sequence.size(0)
sorted_lengths, sorted_idx = torch.sort(length, descending=True)
input_sequence = input_sequence[sorted_idx]
# ENCODER
input_embedding = self.embedding(input_sequence)
packed_input = rnn_utils.pack_padded_sequence(input_embedding, sorted_lengths.data.tolist(), batch_first=True)
_, hidden = self.encoder_rnn(packed_input)
if self.bidirectional or self.num_layers > 1:
# flatten hidden state
hidden = hidden.view(batch_size, self.hidden_size * self.hidden_factor)
else:
hidden = hidden.squeeze()
# REPARAMETERIZATION
mean = self.hidden2mean(hidden)
logv = self.hidden2logv(hidden)
std = torch.exp(0.5 * logv)
z = to_var(torch.randn([batch_size, self.latent_size]))
z = z * std + mean
# DECODER
hidden = self.latent2hidden(z)
if self.bidirectional or self.num_layers > 1:
# unflatten hidden state
hidden = hidden.view(self.hidden_factor, batch_size, self.hidden_size)
else:
hidden = hidden.unsqueeze(0)
# decoder input
if self.word_dropout_rate > 0:
# randomly replace decoder input with <unk>
prob = torch.rand(input_sequence.size())
if torch.cuda.is_available():
prob = prob.cuda()
prob[(input_sequence.data - self.sos_idx) * (input_sequence.data - self.pad_idx) == 0] = 1
decoder_input_sequence = input_sequence.clone()
decoder_input_sequence[prob < self.word_dropout_rate] = self.unk_idx
input_embedding = self.embedding(decoder_input_sequence)
input_embedding = self.embedding_dropout(input_embedding)
packed_input = rnn_utils.pack_padded_sequence(input_embedding, sorted_lengths.data.tolist(), batch_first=True)
# decoder forward pass
outputs, _ = self.decoder_rnn(packed_input, hidden)
# process outputs
padded_outputs = rnn_utils.pad_packed_sequence(outputs, batch_first=True)[0]
padded_outputs = padded_outputs.contiguous()
_, reversed_idx = torch.sort(sorted_idx)
padded_outputs = padded_outputs[reversed_idx]
b, s, _ = padded_outputs.size()
# project outputs to vocab
logp = nn.functional.log_softmax(self.outputs2vocab(padded_outputs.view(-1, padded_outputs.size(2))), dim=-1)
logp = logp.view(b, s, self.embedding.num_embeddings)
return logp, mean, logv, z
def inference(self, n=4, z=None):
if z is None:
batch_size = n
z = to_var(torch.randn([batch_size, self.latent_size]))
else:
batch_size = z.size(0)
hidden = self.latent2hidden(z)
if self.bidirectional or self.num_layers > 1:
# unflatten hidden state
hidden = hidden.view(self.hidden_factor, batch_size, self.hidden_size)
hidden = hidden.unsqueeze(0)
# required for dynamic stopping of sentence generation
sequence_idx = torch.arange(0, batch_size, out=self.tensor()).long() # all idx of batch
sequence_running = torch.arange(0, batch_size,
out=self.tensor()).long() # all idx of batch which are still generating
sequence_mask = torch.ones(batch_size, out=self.tensor()).byte()
running_seqs = torch.arange(0, batch_size,
out=self.tensor()).long() # idx of still generating sequences with respect to current loop
generations = self.tensor(batch_size, self.max_sequence_length).fill_(self.pad_idx).long()
t = 0
while (t < self.max_sequence_length and len(running_seqs) > 0):
if t == 0:
input_sequence = to_var(torch.Tensor(batch_size).fill_(self.sos_idx).long())
input_sequence = input_sequence.unsqueeze(1)
input_embedding = self.embedding(input_sequence)
output, hidden = self.decoder_rnn(input_embedding, hidden)
logits = self.outputs2vocab(output)
input_sequence = self._sample(logits)
# save next input
generations = self._save_sample(generations, input_sequence, sequence_running, t)
# update gloabl running sequence
sequence_mask[sequence_running] = (input_sequence != self.eos_idx).data
sequence_running = sequence_idx.masked_select(sequence_mask)
# update local running sequences
running_mask = (input_sequence != self.eos_idx).data
running_seqs = running_seqs.masked_select(running_mask)
# prune input and hidden state according to local update
if batch_size == 1 or len(input_sequence.size()) == 0:
input_sequence = input_sequence.unsqueeze(0)
if len(running_seqs) > 0:
input_sequence = input_sequence[running_seqs]
hidden = hidden[:, running_seqs]
running_seqs = torch.arange(0, len(running_seqs), out=self.tensor()).long()
t += 1
return generations, z
def _sample(self, dist, mode='greedy'):
if mode == 'greedy':
_, sample = torch.topk(dist, 1, dim=-1)
sample = sample.squeeze()
return sample
def _save_sample(self, save_to, sample, running_seqs, t):
# select only still running
running_latest = save_to[running_seqs]
# update token at position t
running_latest[:, t] = sample.data
# save back
save_to[running_seqs] = running_latest
return save_to
| [
"torch.nn.Linear",
"torch.nn.Embedding",
"torch.nn.Dropout",
"torch.randn",
"torch.cuda.is_available",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.Tensor",
"torch.exp",
"torch.sort",
"torch.topk"
] | 0.4.1 | oscarvik/Language-Modelling-CSE291-AS2 | 18af16de61cbe8d820b1445207107b4ea4771680 |
1.7 | from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
from utils import Tensor, assert_shape, build_grid, conv_transpose_out_shape
class SlotAttention(nn.Module):
"""Slot attention module that iteratively performs cross-attention.
Args:
slot_agnostic (bool): If True, all slots share trained embedding.
If False, we train embeddings seperately for each slot.
Defaults to True (as in the paper).
random_slot (bool): If True, we train mu and sigma for slot embedding,
and sample slot from the Gaussian when forward pass. If False, we
train slot embedding itself (similar to the learnable positional
embedding in DETR), so that we use the same embedding to interact
with input image features. Defaults to True (as in the paper).
"""
def __init__(self,
in_features,
num_iterations,
num_slots,
slot_size,
mlp_hidden_size,
learnable_slot=False,
slot_agnostic=True,
random_slot=True,
epsilon=1e-6):
super().__init__()
self.in_features = in_features
self.num_iterations = num_iterations
self.num_slots = num_slots
self.slot_size = slot_size # number of hidden layers in slot dimensions
self.mlp_hidden_size = mlp_hidden_size
self.learnable_slot = learnable_slot
self.slot_agnostic = slot_agnostic
self.random_slot = random_slot
self.epsilon = epsilon
self.norm_inputs = nn.LayerNorm(self.in_features)
# I guess this is layer norm across each slot? should look into this
self.norm_slots = nn.LayerNorm(self.slot_size)
self.norm_mlp = nn.LayerNorm(self.slot_size)
# Linear maps for the attention module.
self.project_q = nn.Linear(self.slot_size, self.slot_size, bias=False)
self.project_k = nn.Linear(in_features, self.slot_size, bias=False)
self.project_v = nn.Linear(in_features, self.slot_size, bias=False)
# Slot update functions.
self.gru = nn.GRUCell(self.slot_size, self.slot_size)
self.mlp = nn.Sequential(
nn.Linear(self.slot_size, self.mlp_hidden_size),
nn.ReLU(),
nn.Linear(self.mlp_hidden_size, self.slot_size),
)
trainable_slot_num = 1 if self.slot_agnostic else self.num_slots
slot_init_func = self.register_parameter if \
learnable_slot else self.register_buffer
if self.random_slot:
# train the mean and std of slot embedding
slot_init_func(
"slots_mu",
torch.nn.Parameter(
nn.init.xavier_uniform_(
torch.zeros((1, trainable_slot_num, self.slot_size)),
gain=nn.init.calculate_gain("linear"))),
)
slot_init_func(
"slots_log_sigma",
torch.nn.Parameter(
nn.init.xavier_uniform_(
torch.zeros((1, trainable_slot_num, self.slot_size)),
gain=nn.init.calculate_gain("linear"))),
)
else:
# train slot embedding itself
# should definitely be one trainable embedding for each slot
assert not slot_agnostic, 'cannot use the same emb for each slot!'
slot_init_func(
"slots_mu",
torch.nn.Parameter(
nn.init.xavier_normal_( # TODO: mind the init method here?
torch.zeros((1, self.num_slots, self.slot_size)),
gain=nn.init.calculate_gain("linear"))),
)
def forward(self, inputs: Tensor):
# `inputs` has shape [batch_size, num_inputs, inputs_size].
batch_size, num_inputs, inputs_size = inputs.shape
inputs = self.norm_inputs(inputs) # Apply layer norm to the input.
# Shape: [batch_size, num_inputs, slot_size].
k = self.project_k(inputs)
# Shape: [batch_size, num_inputs, slot_size].
v = self.project_v(inputs)
# Initialize the slots. Shape: [batch_size, num_slots, slot_size].
if self.random_slot:
# if in testing mode, fix random seed to get same slot embedding
if not self.training:
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
slots_init = torch.randn(
(1, self.num_slots,
self.slot_size)).repeat(batch_size, 1, 1)
# in training mode, sample from Gaussian with learned mean and std
else:
slots_init = torch.randn(
(batch_size, self.num_slots, self.slot_size))
slots_init = slots_init.type_as(inputs)
slots = self.slots_mu + self.slots_log_sigma.exp() * slots_init
else:
# use the learned embedding itself, no sampling, no randomness
slots = self.slots_mu.repeat(batch_size, 1, 1)
# Multiple rounds of attention.
for _ in range(self.num_iterations):
slots_prev = slots
slots = self.norm_slots(slots)
# Attention.
q = self.project_q(
slots) # Shape: [batch_size, num_slots, slot_size].
attn_norm_factor = self.slot_size**-0.5
attn_logits = attn_norm_factor * torch.matmul(k, q.transpose(2, 1))
attn = F.softmax(attn_logits, dim=-1)
# `attn` has shape: [batch_size, num_inputs, num_slots].
# Weighted mean.
attn = attn + self.epsilon
attn = attn / torch.sum(attn, dim=1, keepdim=True)
updates = torch.matmul(attn.transpose(1, 2), v)
# `updates` has shape: [batch_size, num_slots, slot_size].
# Slot update.
# GRU is expecting inputs of size (N,H)
# so flatten batch and slots dimension
slots = self.gru(
updates.view(batch_size * self.num_slots, self.slot_size),
slots_prev.view(batch_size * self.num_slots, self.slot_size),
)
slots = slots.view(batch_size, self.num_slots, self.slot_size)
slots = slots + self.mlp(self.norm_mlp(slots))
return slots
class SlotAttentionModel(nn.Module):
def __init__(
self,
resolution: Tuple[int, int],
num_slots: int,
num_iterations: int,
in_channels: int = 3,
kernel_size: int = 5,
slot_size: int = 64,
hidden_dims: Tuple[int, ...] = (64, 64, 64, 64),
decoder_resolution: Tuple[int, int] = (8, 8),
empty_cache: bool = False,
use_relu: bool = False, # TODO: official code use ReLU
slot_mlp_size: int = 128,
learnable_slot: bool = False,
slot_agnostic: bool = True,
random_slot: bool = True,
use_entropy_loss: bool = False,
):
super().__init__()
self.resolution = resolution
self.num_slots = num_slots
self.num_iterations = num_iterations
self.in_channels = in_channels
self.kernel_size = kernel_size
self.slot_size = slot_size
self.empty_cache = empty_cache
self.hidden_dims = hidden_dims
self.decoder_resolution = decoder_resolution
self.out_features = self.hidden_dims[-1]
modules = []
channels = self.in_channels
# Build Encoder
for h_dim in self.hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(
channels,
out_channels=h_dim,
kernel_size=self.kernel_size,
stride=1,
padding=self.kernel_size // 2,
),
nn.ReLU() if use_relu else nn.LeakyReLU(),
))
channels = h_dim
self.encoder = nn.Sequential(*modules)
self.encoder_pos_embedding = SoftPositionEmbed(self.in_channels,
self.out_features,
resolution)
self.encoder_out_layer = nn.Sequential(
nn.Linear(self.out_features, self.out_features),
nn.ReLU() if use_relu else nn.LeakyReLU(),
nn.Linear(self.out_features, self.out_features),
)
# Build Decoder
modules = []
in_size = decoder_resolution[0]
out_size = in_size
for i in range(len(self.hidden_dims) - 1, -1, -1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(
self.hidden_dims[i],
self.hidden_dims[i - 1],
kernel_size=5,
stride=2,
padding=2,
output_padding=1,
),
nn.ReLU() if use_relu else nn.LeakyReLU(),
))
out_size = conv_transpose_out_shape(out_size, 2, 2, 5, 1)
assert_shape(
resolution,
(out_size, out_size),
message="Output shape of decoder did not match input resolution. "
"Try changing `decoder_resolution`.",
)
# same convolutions
modules.append(
nn.Sequential(
nn.ConvTranspose2d(
self.out_features,
self.out_features,
kernel_size=5,
stride=1,
padding=2,
output_padding=0,
),
nn.ReLU() if use_relu else nn.LeakyReLU(),
nn.ConvTranspose2d(
self.out_features,
4,
kernel_size=3,
stride=1,
padding=1,
output_padding=0,
),
))
self.decoder = nn.Sequential(*modules)
self.decoder_pos_embedding = SoftPositionEmbed(self.in_channels,
self.out_features,
self.decoder_resolution)
self.slot_attention = SlotAttention(
in_features=self.out_features,
num_iterations=self.num_iterations,
num_slots=self.num_slots,
slot_size=self.slot_size,
mlp_hidden_size=slot_mlp_size,
learnable_slot=learnable_slot,
slot_agnostic=slot_agnostic,
random_slot=random_slot,
)
self.use_entropy_loss = use_entropy_loss # -p*log(p)
def forward(self, x):
if self.empty_cache:
torch.cuda.empty_cache()
batch_size, num_channels, height, width = x.shape
encoder_out = self.encoder(x)
encoder_out = self.encoder_pos_embedding(encoder_out)
# `encoder_out` has shape: [batch_size, filter_size, height, width]
encoder_out = torch.flatten(encoder_out, start_dim=2, end_dim=3)
# `encoder_out` has shape: [batch_size, filter_size, height*width]
encoder_out = encoder_out.permute(0, 2, 1)
encoder_out = self.encoder_out_layer(encoder_out)
# `encoder_out` has shape: [batch_size, height*width, filter_size]
# (batch_size, self.num_slots, self.slot_size)
slots = self.slot_attention(encoder_out)
# `slots` has shape: [batch_size, num_slots, slot_size].
batch_size, num_slots, slot_size = slots.shape
# spatial broadcast
slots = slots.view(batch_size * num_slots, slot_size, 1, 1)
decoder_in = slots.repeat(1, 1, self.decoder_resolution[0],
self.decoder_resolution[1])
out = self.decoder_pos_embedding(decoder_in)
out = self.decoder(out)
# `out` has shape: [batch_size*num_slots, num_channels+1, height, width].
out = out.view(batch_size, num_slots, num_channels + 1, height, width)
recons = out[:, :, :num_channels, :, :]
masks = out[:, :, -1:, :, :]
masks = F.softmax(masks, dim=1)
recon_combined = torch.sum(recons * masks, dim=1)
return recon_combined, recons, masks, slots
def loss_function(self, input):
recon_combined, recons, masks, slots = self.forward(input)
loss = F.mse_loss(recon_combined, input)
loss_dict = {
'recon_loss': loss,
}
# masks: [B, num_slots, 1, H, W], apply entropy loss
if self.use_entropy_loss:
masks = masks[:, :, 0] # [B, num_slots, H, W]
entropy_loss = (-masks * torch.log(masks + 1e-6)).sum(1).mean()
loss_dict['entropy'] = entropy_loss
return loss_dict
class SoftPositionEmbed(nn.Module):
def __init__(self, num_channels: int, hidden_size: int,
resolution: Tuple[int, int]):
super().__init__()
self.dense = nn.Linear(
in_features=num_channels + 1, out_features=hidden_size)
self.register_buffer("grid", build_grid(resolution))
def forward(self, inputs: Tensor):
emb_proj = self.dense(self.grid).permute(0, 3, 1, 2)
return inputs + emb_proj
| [
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.sum",
"torch.nn.LayerNorm",
"torch.nn.ConvTranspose2d",
"torch.manual_seed",
"torch.nn.init.calculate_gain",
"torch.zeros",
"torch.cuda.manual_seed_all",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.cuda.empty_cache",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.log",
"torch.nn.GRUCell",
"torch.nn.functional.mse_loss",
"torch.flatten",
"torch.randn"
] | 1.7.1 | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef |
1.4 | import torch
import numpy as np
from copy import deepcopy
from typing import Any, Dict, Tuple, Optional
from tianshou.policy import DDPGPolicy
from tianshou.data import Batch, ReplayBuffer
from tianshou.exploration import BaseNoise, GaussianNoise
class TD3Policy(DDPGPolicy):
"""Implementation of TD3, arXiv:1802.09477.
:param torch.nn.Module actor: the actor network following the rules in
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
:param torch.optim.Optimizer actor_optim: the optimizer for actor network.
:param torch.nn.Module critic1: the first critic network. (s, a -> Q(s,
a))
:param torch.optim.Optimizer critic1_optim: the optimizer for the first
critic network.
:param torch.nn.Module critic2: the second critic network. (s, a -> Q(s,
a))
:param torch.optim.Optimizer critic2_optim: the optimizer for the second
critic network.
:param action_range: the action range (minimum, maximum).
:type action_range: Tuple[float, float]
:param float tau: param for soft update of the target network, defaults to
0.005.
:param float gamma: discount factor, in [0, 1], defaults to 0.99.
:param float exploration_noise: the exploration noise, add to the action,
defaults to ``GaussianNoise(sigma=0.1)``
:param float policy_noise: the noise used in updating policy network,
default to 0.2.
:param int update_actor_freq: the update frequency of actor network,
default to 2.
:param float noise_clip: the clipping range used in updating policy
network, default to 0.5.
:param bool reward_normalization: normalize the reward to Normal(0, 1),
defaults to False.
:param bool ignore_done: ignore the done flag while training the policy,
defaults to False.
.. seealso::
Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed
explanation.
"""
def __init__(
self,
actor: torch.nn.Module,
actor_optim: torch.optim.Optimizer,
critic1: torch.nn.Module,
critic1_optim: torch.optim.Optimizer,
critic2: torch.nn.Module,
critic2_optim: torch.optim.Optimizer,
action_range: Tuple[float, float],
tau: float = 0.005,
gamma: float = 0.99,
exploration_noise: Optional[BaseNoise] = GaussianNoise(sigma=0.1),
policy_noise: float = 0.2,
update_actor_freq: int = 2,
noise_clip: float = 0.5,
reward_normalization: bool = False,
ignore_done: bool = False,
estimation_step: int = 1,
**kwargs: Any,
) -> None:
super().__init__(actor, actor_optim, None, None, action_range,
tau, gamma, exploration_noise, reward_normalization,
ignore_done, estimation_step, **kwargs)
self.critic1, self.critic1_old = critic1, deepcopy(critic1)
self.critic1_old.eval()
self.critic1_optim = critic1_optim
self.critic2, self.critic2_old = critic2, deepcopy(critic2)
self.critic2_old.eval()
self.critic2_optim = critic2_optim
self._policy_noise = policy_noise
self._freq = update_actor_freq
self._noise_clip = noise_clip
self._cnt = 0
self._last = 0
def train(self, mode: bool = True) -> "TD3Policy":
self.training = mode
self.actor.train(mode)
self.critic1.train(mode)
self.critic2.train(mode)
return self
def sync_weight(self) -> None:
for o, n in zip(self.actor_old.parameters(), self.actor.parameters()):
o.data.copy_(o.data * (1.0 - self._tau) + n.data * self._tau)
for o, n in zip(
self.critic1_old.parameters(), self.critic1.parameters()
):
o.data.copy_(o.data * (1.0 - self._tau) + n.data * self._tau)
for o, n in zip(
self.critic2_old.parameters(), self.critic2.parameters()
):
o.data.copy_(o.data * (1.0 - self._tau) + n.data * self._tau)
def _target_q(
self, buffer: ReplayBuffer, indice: np.ndarray
) -> torch.Tensor:
batch = buffer[indice] # batch.obs: s_{t+n}
a_ = self(batch, model="actor_old", input="obs_next").act
dev = a_.device
noise = torch.randn(size=a_.shape, device=dev) * self._policy_noise
if self._noise_clip > 0.0:
noise = noise.clamp(-self._noise_clip, self._noise_clip)
a_ += noise
a_ = a_.clamp(self._range[0], self._range[1])
target_q = torch.min(
self.critic1_old(batch.obs_next, a_),
self.critic2_old(batch.obs_next, a_))
return target_q
def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, float]:
weight = batch.pop("weight", 1.0)
# critic 1
current_q1 = self.critic1(batch.obs, batch.act).flatten()
target_q = batch.returns.flatten()
td1 = current_q1 - target_q
critic1_loss = (td1.pow(2) * weight).mean()
# critic1_loss = F.mse_loss(current_q1, target_q)
self.critic1_optim.zero_grad()
critic1_loss.backward()
self.critic1_optim.step()
# critic 2
current_q2 = self.critic2(batch.obs, batch.act).flatten()
td2 = current_q2 - target_q
critic2_loss = (td2.pow(2) * weight).mean()
# critic2_loss = F.mse_loss(current_q2, target_q)
self.critic2_optim.zero_grad()
critic2_loss.backward()
self.critic2_optim.step()
batch.weight = (td1 + td2) / 2.0 # prio-buffer
if self._cnt % self._freq == 0:
actor_loss = -self.critic1(
batch.obs, self(batch, eps=0.0).act).mean()
self.actor_optim.zero_grad()
actor_loss.backward()
self._last = actor_loss.item()
self.actor_optim.step()
self.sync_weight()
self._cnt += 1
return {
"loss/actor": self._last,
"loss/critic1": critic1_loss.item(),
"loss/critic2": critic2_loss.item(),
}
| [
"torch.randn"
] | 1.4.0 | cm107/tianshou | 0febf4bc1dc1366d837bab4574664f8116b66819 |
1.8 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Represents a model repository, including pre-trained models and bags of models.
A repo can either be the main remote repository stored in AWS, or a local repository
with your own models.
"""
from hashlib import sha256
from pathlib import Path
import typing as tp
import torch
import yaml
from .apply import BagOfModels, Model
from .states import load_model
AnyModel = tp.Union[Model, BagOfModels]
class ModelLoadingError(RuntimeError):
pass
def check_checksum(path: Path, checksum: str):
sha = sha256()
with open(path, 'rb') as file:
while True:
buf = file.read(2**20)
if not buf:
break
sha.update(buf)
actual_checksum = sha.hexdigest()[:len(checksum)]
if actual_checksum != checksum:
raise ModelLoadingError(f'Invalid checksum for file {path}, '
f'expected {checksum} but got {actual_checksum}')
class ModelOnlyRepo:
"""Base class for all model only repos.
"""
def has_model(self, sig: str) -> bool:
raise NotImplementedError()
def get_model(self, sig: str) -> Model:
raise NotImplementedError()
class RemoteRepo(ModelOnlyRepo):
def __init__(self, root_url: str, remote_files: tp.List[str]):
if not root_url.endswith('/'):
root_url += '/'
self._models: tp.Dict[str, str] = {}
for file in remote_files:
sig, checksum = file.split('.')[0].split('-')
assert sig not in self._models
self._models[sig] = root_url + file
def has_model(self, sig: str) -> bool:
return sig in self._models
def get_model(self, sig: str) -> Model:
try:
url = self._models[sig]
except KeyError:
raise ModelLoadingError(f'Could not find a pre-trained model with signature {sig}.')
pkg = torch.hub.load_state_dict_from_url(url, map_location='cpu', check_hash=True)
return load_model(pkg)
class LocalRepo(ModelOnlyRepo):
def __init__(self, root: Path):
self.root = root
self.scan()
def scan(self):
self._models = {}
self._checksums = {}
for file in self.root.iterdir():
if file.suffix == '.th':
if '-' in file.stem:
xp_sig, checksum = file.stem.split('-')
self._checksums[xp_sig] = checksum
else:
xp_sig = file.stem
if xp_sig in self._models:
raise ModelLoadingError(
f'Duplicate pre-trained model exist for signature {xp_sig}. '
'Please delete all but one.')
self._models[xp_sig] = file
def has_model(self, sig: str) -> bool:
return sig in self._models
def get_model(self, sig: str) -> Model:
try:
file = self._models[sig]
except KeyError:
raise ModelLoadingError(f'Could not find pre-trained model with signature {sig}.')
if sig in self._checksums:
check_checksum(file, self._checksums[sig])
return load_model(file)
class BagOnlyRepo:
"""Handles only YAML files containing bag of models, leaving the actual
model loading to some Repo.
"""
def __init__(self, root: Path, model_repo: ModelOnlyRepo):
self.root = root
self.model_repo = model_repo
self.scan()
def scan(self):
self._bags = {}
for file in self.root.iterdir():
if file.suffix == '.yaml':
self._bags[file.stem] = file
def has_model(self, name: str) -> bool:
return name in self._bags
def get_model(self, name: str) -> BagOfModels:
try:
yaml_file = self._bags[name]
except KeyError:
raise ModelLoadingError(f'{name} is neither a single pre-trained model or '
'a bag of models.')
bag = yaml.safe_load(open(yaml_file))
signatures = bag['models']
models = [self.model_repo.get_model(sig) for sig in signatures]
weights = bag.get('weights')
segment = bag.get('segment')
return BagOfModels(models, weights, segment)
class AnyModelRepo:
def __init__(self, model_repo: ModelOnlyRepo, bag_repo: BagOnlyRepo):
self.model_repo = model_repo
self.bag_repo = bag_repo
def has_model(self, name_or_sig: str) -> bool:
return self.model_repo.has_model(name_or_sig) or self.bag_repo.has_model(name_or_sig)
def get_model(self, name_or_sig: str) -> AnyModel:
if self.model_repo.has_model(name_or_sig):
return self.model_repo.get_model(name_or_sig)
else:
return self.bag_repo.get_model(name_or_sig)
| [
"torch.hub.load_state_dict_from_url"
] | 1.8.1 | sparshpriyadarshi/demucs | 7c7f65401db654d750df2b6f4d5b82a0101500b1 |
1.4 | import torch
import torch.nn.functional as F
from torch import nn
from torch_geometric.nn import GCNConv
from . import graph_embed
class Net(torch.nn.Module):
"""docstring for Net"""
def __init__(self, cfg, config=None, PRINT_DEBUG=False):
super(Net, self).__init__()
input_size = cfg.SCENE_GRAPH.NODE_FEATURE_SIZE
middle_size = cfg.SCENE_GRAPH.NODE_MIDDEL_FEATURE_SIZE
output_size = cfg.SCENE_GRAPH.NODE_OUT_FEATURE_SIZE
# True => one of the variables needed for gradient computation has been modified by an inplace operation
normalize = cfg.SCENE_GRAPH.NORMALIZATION
self.cfg = cfg
self.conv1 = GCNConv(input_size, middle_size, cached=True,
normalize=normalize,
# add_self_loops=False
)
self.conv2 = GCNConv(middle_size, output_size, cached=True,
normalize=normalize,
# add_self_loops=False
)
graph_embed_model = getattr(graph_embed, cfg.SCENE_GRAPH.EMBED_TYPE)
NODE_FEATURE_SIZE = cfg.SCENE_GRAPH.NODE_OUT_FEATURE_SIZE
EMBED_FEATURE_SIZE = cfg.SCENE_GRAPH.EMBED_FEATURE_SIZE
self.final_mapping = graph_embed_model(
INPUT_FEATURE_SIZE=NODE_FEATURE_SIZE,
EMBED_FEATURE_SIZE=EMBED_FEATURE_SIZE
)
if cfg.SCENE_GRAPH.CHOSE_IMPORTENT_NODE:
# nn.linear bert_hidden_size -> NODE_FEATURE_SIZE
bert_hidden_size = config['general']['model']['block_hidden_dim']
NODE_FEATURE_SIZE = cfg.SCENE_GRAPH.NODE_OUT_FEATURE_SIZE + cfg.SCENE_GRAPH.ATTRIBUTE_FEATURE_SIZE
NUM_CHOSE_NODE = cfg.SCENE_GRAPH.NUM_CHOSE_NODE
self.chose_node_module = graph_embed.DotAttnChoseImportentNode(
bert_hidden_size,
NODE_FEATURE_SIZE,
NUM_CHOSE_NODE,
PRINT_DEBUG=PRINT_DEBUG
)
def forward(self, data, *args):
'''
data.x
tensor([[-0.0474, 0.0324, 0.1443, ..., 1.0000, 0.0000, 0.0000],
[ 0.0440, -0.0058, 0.0014, ..., 1.0000, 0.0000, 0.0000],
[ 0.0057, 0.0471, 0.0377, ..., 1.0000, 0.0000, 0.0000],
[ 0.0724, -0.0065, -0.0210, ..., 0.0000, 0.0000, 0.0000],
[-0.0474, 0.0324, 0.1443, ..., 1.0000, 0.0000, 0.0000]],
grad_fn=<CatBackward>)
data.edge_obj_to_obj
tensor([[3, 0],
[3, 1],
[3, 2],
[3, 4]])
data.obj_cls_to_ind
{64: [0, 4], 70: [1], 47: [2], 81: [3]}
data.obj_id_to_ind
{'Pillow|-02.89|+00.62|+00.82': 0, 'RemoteControl|-03.03|+00.56|+02.01': 1, 'Laptop|-02.81|+00.56|+01.81': 2, 'Sofa|-02.96|+00.08|+01.39': 3, 'Pillow|-02.89|+00.62|+01.19': 4}
'''
# import pdb; pdb.set_trace()
# x, edge_obj_to_obj, edge_weight = data.x, data.edge_obj_to_obj, data.edge_attr
x, edge_obj_to_obj, edge_weight = data.x, data.edge_obj_to_obj, data.edge_attr
if edge_obj_to_obj is not None:
x = x.clone().detach()
edge_obj_to_obj = edge_obj_to_obj.clone().detach()
x = F.relu(self.conv1(x, edge_obj_to_obj, edge_weight))
x = F.dropout(x, training=self.training)
x = F.relu(self.conv2(x, edge_obj_to_obj, edge_weight))
# x = self.conv2(x, edge_obj_to_obj, edge_weight)
if self.cfg.SCENE_GRAPH.CHOSE_IMPORTENT_NODE:
chose_nodes = self.self.chose_node_module(x)
x = self.final_mapping(x)
x = torch.cat([x, chose_nodes], dim=1)
else:
x = torch.zeros((1, self.cfg.SCENE_GRAPH.RESULT_FEATURE))
if self.cfg.SCENE_GRAPH.GPU:
x = x.to('cuda')
return x
| [
"torch.zeros",
"torch.nn.functional.dropout",
"torch.cat"
] | 1.4.0 | roy860328/VSGM | 3ec19f9cf1401cecf45527687936b8fe4167f672 |
1.4 | import matplotlib
matplotlib.use('Agg')
import os
import sys
sys.path.append(os.path.join(os.environ['ALFRED_ROOT']))
sys.path.append(os.path.join(os.environ['ALFRED_ROOT'], 'gen'))
sys.path.append(os.path.join(os.environ['ALFRED_ROOT'], 'models'))
sys.path.append(os.path.join(os.environ['ALFWORLD_ROOT'], 'agents'))
import argparse
import torch.multiprocessing as mp
from eval_task import EvalTask
from eval_subgoals import EvalSubgoals
def load_config(args):
import yaml
import glob
assert os.path.exists(args.config_file), "Invalid config file "
with open(args.config_file) as reader:
config = yaml.safe_load(reader)
# Parse overriden params.
for param in args.params:
fqn_key, value = param.split("=")
entry_to_change = config
keys = fqn_key.split(".")
for k in keys[:-1]:
entry_to_change = entry_to_change[k]
entry_to_change[keys[-1]] = yaml.load(value)
### other ###
if args.semantic_config_file is not None:
sys.path.insert(0, os.path.join(os.environ['ALFWORLD_ROOT'], 'agents'))
from config import cfg
cfg.merge_from_file(args.semantic_config_file)
cfg.GENERAL.save_path = cfg.GENERAL.save_path + sys.argv[0].split("/")[-1] + "_"
config['semantic_cfg'] = cfg
config["general"]["save_path"] = cfg.GENERAL.save_path
config["vision_dagger"]["use_exploration_frame_feats"] = cfg.GENERAL.use_exploration_frame_feats
if args.sgg_config_file is not None:
sys.path.insert(0, os.environ['GRAPH_RCNN_ROOT'])
from lib.config import cfg
cfg.merge_from_file(args.sgg_config_file)
config['sgg_cfg'] = cfg
# print(config)
return config
if __name__ == '__main__':
# multiprocessing settings
mp.set_start_method('spawn')
manager = mp.Manager()
# parser
parser = argparse.ArgumentParser()
parser.add_argument("config_file", default="models/config/without_env_base.yaml", help="path to config file")
parser.add_argument("--semantic_config_file", default="models/config/mini_moca_graph_softmaxgcn.yaml", help="path to config file")
parser.add_argument("--sgg_config_file", default=None, help="path to config file $GRAPH_RCNN_ROOT/configs/attribute.yaml")
parser.add_argument("-p", "--params", nargs="+", metavar="my.setting=value", default=[],
help="override params of the config file,"
" e.g. -p 'training.gamma=0.95'")
# settings
parser.add_argument('--splits', type=str, default="data/splits/oct21.json")
parser.add_argument('--data', type=str, default="data/json_2.1.0")
parser.add_argument('--reward_config', default='models/config/rewards.json')
parser.add_argument('--eval_split', type=str, default='valid_seen', choices=['train', 'valid_seen', 'valid_unseen', 'tests_seen', 'tests_unseen'])
parser.add_argument('--model_path', type=str, default="model.pth")
parser.add_argument('--model', type=str, default='models.model.seq2seq_im_mask')
parser.add_argument('--preprocess', dest='preprocess', action='store_true')
parser.add_argument('--shuffle', dest='shuffle', action='store_true')
parser.add_argument('--gpu', dest='gpu', action='store_true')
parser.add_argument('--gpu_id', help='use gpu 0/1', default=1, type=int)
parser.add_argument('--num_threads', type=int, default=1)
parser.add_argument('--gcn_cat_visaul', help='use visual embedding to gcn', action='store_true')
# eval params
parser.add_argument('--max_steps', type=int, default=1000, help='max steps before episode termination')
parser.add_argument('--max_fails', type=int, default=10, help='max API execution failures before episode termination')
# eval settings
parser.add_argument('--subgoals', type=str, help="subgoals to evaluate independently, eg:all or GotoLocation,PickupObject...", default="")
parser.add_argument('--smooth_nav', dest='smooth_nav', action='store_true', help='smooth nav actions (might be required based on training data)')
parser.add_argument('--skip_model_unroll_with_expert', action='store_true', help='forward model with expert actions')
parser.add_argument('--no_teacher_force_unroll_with_expert', action='store_true', help='no teacher forcing with expert')
# debug
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument('--fast_epoch', dest='fast_epoch', action='store_true')
parser.add_argument('--task_types', type=str, help="task_types", default="1,2,3,4,5,6")
# parse arguments
args = parser.parse_args()
config = load_config(args)
args.config_file = config
# import torch
# device = torch.device("cuda:%d" % args.gpu_id if args.gpu else "cpu")
# if args.gpu and torch.cuda.is_available():
# torch.cuda.set_device(device)
# eval mode
if args.subgoals:
eval = EvalSubgoals(args, manager)
else:
eval = EvalTask(args, manager)
# start threads
eval.spawn_threads() | [
"torch.multiprocessing.Manager",
"torch.multiprocessing.set_start_method"
] | 1.4.0 | roy860328/VSGM | 3ec19f9cf1401cecf45527687936b8fe4167f672 |
1.10 | from typing import *
import torch.nn as nn
import torch.optim as optim
from models.LitBase import LitBase
from .models import AlexNet
class LitAlexNet(LitBase):
def __init__(self, args: Dict[str, Any]):
super().__init__()
self.save_hyperparameters(args)
self.model = AlexNet(
image_channels=self.hparams.image_channels,
num_classes=self.hparams.num_classes,
dropout_rate=self.hparams.dropout_rate,
)
self.loss = nn.CrossEntropyLoss()
def configure_optimizers(self) -> optim.Optimizer:
optimizer = optim.Adam(
self.parameters(),
lr=self.hparams.lr,
weight_decay=self.hparams.weight_decay,
)
scheduler_dict = {
"scheduler": optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode=self.hparams.scheduler_mode,
factor=self.hparams.scheduler_factor,
patience=self.hparams.scheduler_patience,
verbose=True,
),
"monitor": self.hparams.scheduler_monitor,
}
return {"optimizer": optimizer, "lr_scheduler": scheduler_dict}
| [
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.CrossEntropyLoss"
] | 1.10.1 | zkdlfrlwl2/Classification-For-Everyone | a99428080ef470a3270d3f4a6048df197216a050 |
0.4 | import os
import numpy as np
import mgplvm
import torch
from mgplvm import kernels, rdist
from mgplvm.manifolds import Torus, Euclid, So3
from mgplvm.models import Core
from mgplvm.training import train
import matplotlib.pyplot as plt
import pickle
from scipy.stats import ttest_1samp
torch.set_default_dtype(torch.float64)
def initialize(Y,
device,
d,
n,
m,
n_z,
fit_manif=Torus,
GPparams=None,
Ntrain=None,
Tfix=slice(0),
sig0=1.5,
ell0=2):
if fit_manif == So3:
sig0 = 0.4 # sqrt diagonal covariance matrix
elif fit_manif == Torus:
sig0 = np.pi / 2
else:
sig0 = 1
if GPparams is None:
gammas = None
mu = None
ell = np.ones(n) * ell0
alpha = np.mean(np.std(Y, axis=1), axis=1)
sigma = np.mean(np.std(Y, axis=1), axis=1) # initialize noise
z = None
else:
# get raw params since we don't have inverse map
mu = GPparams.manif.mu.data.cpu().numpy()
# get .prms since we initialize with inv_transform
gammas = GPparams.rdist.prms.data.cpu().numpy()
alpha, ell = [
prm.data.cpu().numpy()[Ntrain] for prm in GPparams.kernel.prms
]
sigma, z = [prm.data.cpu()[Ntrain, :, :] for prm in GPparams.sgp.prms]
sigma = sigma[:, 0, 0]
# construct model
manif = fit_manif(m, d, mu=mu, Tinds=Tfix)
ref_dist = mgplvm.rdist.MVN(m, d, sigma=sig0, gammas=gammas, Tinds=Tfix)
kernel = kernels.QuadExp(n, manif.distance, alpha=alpha, ell=ell)
mod = Core(manif, n, m, n_z, kernel, ref_dist, sigma=sigma, z=z).to(device)
return mod
def recover_model(fname, device):
params = pickle.load(open(fname + '.pickled', 'rb'))
manifdict = {'Torus': Torus, 'Euclid': Euclid, 'So3': So3}
kerneldict = {'QuadExp': kernels.QuadExp}
rdistdict = {'MVN': mgplvm.rdist.MVN}
moddict = {'Core': Core}
manif = params['manif'].split('(')[0]
manif = 'So3' if manif == 'So' else manif
m, n, d, n_z = [params[key] for key in ['m', 'n', 'd', 'n_z']]
manif = manifdict[manif](m, d)
kernel = kerneldict[params['kernel']](n, manif.distance)
ref_dist = rdistdict[params['rdist']](m, d)
mod = moddict[params['model']](manif, n, m, n_z, kernel, ref_dist)
mod_params = torch.load(fname + '.torch')
mod.load_state_dict(mod_params)
mod.to(device)
return mod, params
def train_cv(Y,
manifs,
n_z,
device,
callback=None,
max_steps=500,
n_b=128,
lrate=5e-2,
randN=True,
frac=2,
outname='test',
sig0=1.5,
ell0=2,
burnin='default'):
'''
given a dataset Y and a set of manifolds, fit each manifold to the data
manifs is a list of (manif, d)
frac is the inverse fraction of neurons used in the test set
'''
def trainfunc(Y, mod, burnin, trainGP=True, Tfix=slice(0), callback=None):
nbatch = 1
nbatch_max = 100
while nbatch < nbatch_max:
if nbatch > 1:
print('nbatch = ' + str(nbatch))
try:
return train(Y,
mod,
device,
trainGP=trainGP,
Tfix=Tfix,
max_steps=max_steps,
n_b=n_b,
callback=callback,
lrate=lrate,
burnin=burnin,
nbatch=nbatch)
except RuntimeError:
nbatch += 1
raise RuntimeError('maximum batch size exceeded')
try:
os.mkdir(outname)
except FileExistsError:
print(outname, 'already exists')
n, m = Y.shape[:2]
m1, n1 = int(m - m / frac), int(n - n / frac) # 'test'
# random shuffle of timepoints
Tshuff = np.random.permutation(np.arange(m))
T1, T2 = Tshuff[:m1], Tshuff[m1:]
# random shuffle of neurons
Nshuff = np.random.permutation(np.arange(n)) if randN else np.arange(n)
N1, N2 = Nshuff[:n1], Nshuff[n1:]
Y1, Y2, Y3 = Y[:, T1], Y[N1, :], Y[N2, :]
params = {'Y': Y, 'N1': N1, 'N2': N2, 'T1': T1, 'T2': T2}
for i, (fit_manif, d) in enumerate(manifs):
print('\nfitting manifold', fit_manif(m, d).name)
if (burnin != 'default'):
burn = int(round(burnin / 3))
else:
burn = burnin
# fit all neurons half timepoints
mod1 = initialize(Y1,
device,
d,
n,
m1,
n_z,
fit_manif=fit_manif,
sig0=sig0,
ell0=ell0)
trainfunc(Y1, mod1, burn)
mod1.store_model(outname + '/' + str(i) + '_mod1', extra_params=params)
# fit all timepoints half neurons
mod2 = initialize(Y2,
device,
d,
n1,
m,
n_z,
fit_manif=fit_manif,
GPparams=mod1,
Ntrain=N1,
Tfix=T1,
sig0=sig0,
ell0=ell0)
trainfunc(Y2, mod2, burn, trainGP=False, Tfix=T1, callback=callback)
mod2.store_model(outname + '/' + str(i) + '_mod2')
del mod2
torch.cuda.empty_cache()
# fit all timepoints half neurons reverse
mod3 = initialize(Y3,
device,
d,
n1,
m,
n_z,
fit_manif=fit_manif,
GPparams=mod1,
Ntrain=N2,
Tfix=T1,
sig0=sig0,
ell0=ell0)
if frac == 2:
trainfunc(Y3, mod3, burn, trainGP=False, Tfix=T1, callback=callback)
mod3.store_model(outname + '/' + str(i) + '_mod3')
del mod1
del mod3
torch.cuda.empty_cache()
return params
def gen_cvmodels(fbase, device, Type='MSE'):
mod1, params1 = recover_model(fbase + '1', device)
mod2, params2 = recover_model(fbase + '2', device)
mod3, params3 = recover_model(fbase + '3', device)
T1, T2 = [params1[key] for key in ['T1', 'T2']]
if Type == 'MSE':
mus2_T2 = mod2.manif.prms[T2, ...].detach()
mus3_T2 = mod3.manif.prms[T2, ...].detach()
for mod in [mod2, mod3]:
# change variational parameters mu, gamma to reference
mod.manif.mu = torch.nn.Parameter(mod1.manif.mu.detach())
mod.rdist.gamma = torch.nn.Parameter(mod1.rdist.gamma.detach())
mod.rdist.m, mod.manif.m, mod.m, mod.sgp.m = [
len(T1) for _ in range(4)
]
return mod1, mod2, mod3, params1, mus2_T2, mus3_T2
else:
mus = [mod.manif.mu[T2, ...].detach() for mod in [mod3, mod2]]
gammas = [mod.rdist.gamma[T2, ...].detach() for mod in [mod3, mod2]]
# swap variational distributions
for mod, mu, gamma in zip([mod2, mod3], mus, gammas):
mod.manif.mu = torch.nn.Parameter(mu)
mod.rdist.gamma = torch.nn.Parameter(gamma)
mod.rdist.m, mod.manif.m, mod.m, mod.sgp.m = [
len(T2) for _ in range(4)
]
return mod1, mod2, mod3, params1
def calc_NLLs(fname, device=None, itermax='none', itermin=0, twoway=True):
iterdirs = np.sort(os.listdir(fname))
iterdirs = iterdirs if itermax == 'none' else iterdirs[:itermax]
iterdirs = iterdirs[itermin:]
niter = len(iterdirs)
device = mgplvm.utils.get_device() if device is None else device
print('\ncomputing cross-validated log likelihoods')
for (i_iter, iterdir) in enumerate(iterdirs):
manifs = np.sort(os.listdir(fname + "/" + iterdir))
nmanif = np.amax([int(f[0]) for f in manifs]) + 1
if i_iter == 0:
NLLs = np.zeros((niter, nmanif))
print(niter, 'iterations &', nmanif, 'manifolds')
for i_manif in range(nmanif):
mod1, mod2, mod3, params = gen_cvmodels(fname + "/" + iterdir +
'/' + str(i_manif) + '_mod',
device,
Type='LL')
Y, N1, N2, T2 = [params[key] for key in ['Y', 'N1', 'N2', 'T2']]
Y2, Y3 = Y[N1, :, :], Y[N2, :, :]
data2, data3 = [
torch.tensor(Ytest[:, T2, :], dtype=torch.get_default_dtype())
for Ytest in [Y2, Y3]
]
# calc LL and MSE
LL2 = mod2.calc_LL(data2.to(device),
128).data.cpu().numpy() # trained on Y2
LL3 = mod3.calc_LL(data3.to(device),
128).data.cpu().numpy() # trained on Y3
if twoway:
NLL = -(LL2 + LL3) / 2
else:
NLL = -LL2
NLLs[i_iter, i_manif] = NLL
print(str(i_iter) + ':', mod1.manif.name, 'NLL=' + str(NLL))
return NLLs
def calc_MSEs(fname,
device=None,
itermax='none',
iterp=100,
itermin=0,
twoway=True):
print('\ncomputing cross-validated mean squared errors')
iterdirs = np.sort(os.listdir(fname))
iterdirs = iterdirs if itermax == 'none' else iterdirs[:itermax]
iterdirs = iterdirs[itermin:]
niter = len(iterdirs)
device = mgplvm.utils.get_device() if device is None else device
for (i_iter, iterdir) in enumerate(iterdirs):
manifs = np.sort(os.listdir(fname + "/" + iterdir))
nmanif = np.amax([int(f[0]) for f in manifs]) + 1
if i_iter == 0:
MSEs = np.zeros((niter, nmanif))
print(niter, 'iterations &', nmanif, 'manifolds')
for i_manif in range(nmanif):
mod1, mod2, mod3, params, mus2_T2, mus3_T2 = gen_cvmodels(
fname + "/" + iterdir + '/' + str(i_manif) + '_mod',
device,
Type='MSE')
Y, T1, T2, N1, N2 = [
params[key] for key in ['Y', 'T1', 'T2', 'N1', 'N2']
]
Y2, Y3 = Y[N1, :, :], Y[N2, :, :]
data2, data3 = [
torch.tensor(Ytrain[:, T1, :],
dtype=torch.get_default_dtype()).to(device)
for Ytrain in [Y2, Y3]
]
# trained on T1 (data), predict on T2 (manif.mu)
mus3_T2 = mus3_T2.to(device)
fmean2, _ = mod2.predict(data2, mus3_T2, niter=iterp)
fmean3, _ = mod3.predict(data3, mus2_T2, niter=iterp)
MSE2 = np.mean((fmean2.cpu().numpy() - Y2[:, T2, :])**2)
MSE3 = np.mean((fmean3.cpu().numpy() - Y3[:, T2, :])**2)
var2 = np.mean(np.var(Y2[:, T2, 0], axis=1))
var3 = np.mean(np.var(Y3[:, T2, 0], axis=1))
if twoway:
MSE = (MSE2 + MSE3) / 2
else:
MSE = MSE3
MSEs[i_iter, i_manif] = MSE
print(str(i_iter) + ':', mod1.manif.name, MSE, (var2 + var3) / 2)
for mod in [mod1, mod2, mod3]:
del mod
torch.cuda.empty_cache()
return MSEs
| [
"torch.get_default_dtype",
"torch.nn.Parameter",
"torch.cuda.empty_cache",
"torch.load",
"torch.set_default_dtype"
] | 0.4.1 | rkj26/mgplvm-pytorch | 7d082d92be4d82ae8ab978e774ce83429444c14b |
1.7 | import torch
import torch.nn as nn
from torch.nn import init
from torch.nn import utils
import torch.nn.functional as F
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = lambda x: Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.niter> epochs
and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.cuda()
net = torch.nn.DataParallel(net) # multi-GPUs
# net.to(gpu_ids[0])
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif netD == 'n_layers_proj':
net = NLayerProjDiscriminator(input_nc, ndf, n_layers=n_layers_D, norm_layer=norm_layer)
elif netD == 'fc':
net = FCDiscriminator(input_nc, ndf)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
# class GANLoss(nn.Module):
# """Define different GAN objectives.
#
# The GANLoss class abstracts away the need to create the target label tensor
# that has the same size as the input.
# """
#
# def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
# """ Initialize the GANLoss class.
#
# Parameters:
# gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
# target_real_label (bool) - - label for a real image
# target_fake_label (bool) - - label of a fake image
#
# Note: Do not use sigmoid as the last layer of Discriminator.
# LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
# """
# super(GANLoss, self).__init__()
# self.register_buffer('real_label', torch.tensor(target_real_label))
# self.register_buffer('fake_label', torch.tensor(target_fake_label))
# self.gan_mode = gan_mode
# if gan_mode == 'lsgan':
# self.loss = nn.MSELoss()
# elif gan_mode == 'vanilla':
# self.loss = nn.BCEWithLogitsLoss()
# elif gan_mode in ['wgangp']:
# self.loss = None
# else:
# raise NotImplementedError('gan mode %s not implemented' % gan_mode)
#
# def get_target_tensor(self, prediction, target_is_real):
# """Create label tensors with the same size as the input.
#
# Parameters:
# prediction (tensor) - - tpyically the prediction from a discriminator
# target_is_real (bool) - - if the ground truth label is for real images or fake images
#
# Returns:
# A label tensor filled with ground truth label, and with the size of the input
# """
#
# if target_is_real:
# target_tensor = self.real_label
# else:
# target_tensor = self.fake_label
# return target_tensor.expand_as(prediction)
#
# def __call__(self, prediction, target_is_real):
# """Calculate loss given Discriminator's output and grount truth labels.
#
# Parameters:
# prediction (tensor) - - tpyically the prediction output from a discriminator
# target_is_real (bool) - - if the ground truth label is for real images or fake images
#
# Returns:
# the calculated loss.
# """
# if self.gan_mode in ['lsgan', 'vanilla']:
# target_tensor = self.get_target_tensor(prediction, target_is_real)
# loss = self.loss(prediction, target_tensor)
# elif self.gan_mode == 'wgangp':
# if target_is_real:
# loss = -prediction.mean()
# else:
# loss = prediction.mean()
# return loss
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor, opt=None):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_tensor = None
self.fake_label_tensor = None
self.zero_tensor = None
self.Tensor = tensor
self.gan_mode = gan_mode
self.opt = opt
if gan_mode == 'lsgan':
pass
elif gan_mode == 'vanilla':
pass
elif gan_mode == 'wgangp':
pass
elif gan_mode == 'hinge':
pass
else:
raise ValueError('Unexpected gan_mode {}'.format(gan_mode))
def get_target_tensor(self, input, target_is_real):
if target_is_real:
if self.real_label_tensor is None:
self.real_label_tensor = self.Tensor(1).fill_(self.real_label)
self.real_label_tensor.requires_grad_(False)
return self.real_label_tensor.expand_as(input).cuda()
else:
if self.fake_label_tensor is None:
self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label)
self.fake_label_tensor.requires_grad_(False)
return self.fake_label_tensor.expand_as(input).cuda()
def get_zero_tensor(self, input):
if self.zero_tensor is None:
self.zero_tensor = self.Tensor(1).fill_(0)
self.zero_tensor.requires_grad_(False)
return self.zero_tensor.expand_as(input).cuda()
def loss(self, input, target_is_real, for_discriminator=True):
if self.gan_mode == 'vanilla': # cross entropy loss
target_tensor = self.get_target_tensor(input, target_is_real)
loss = F.binary_cross_entropy_with_logits(input, target_tensor)
return loss
elif self.gan_mode == 'lsgan':
target_tensor = self.get_target_tensor(input, target_is_real)
return F.mse_loss(input, target_tensor)
elif self.gan_mode == 'hinge':
if for_discriminator:
if target_is_real:
minval = torch.min(input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
minval = torch.min(-input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
assert target_is_real, "The generator's hinge loss must be aiming for real"
loss = -torch.mean(input)
return loss
else:
# wgan
if target_is_real:
return -input.mean()
else:
return input.mean()
def __call__(self, input, target_is_real, for_discriminator=True):
# computing loss is a bit complicated because |input| may not be
# a tensor, but list of tensors in case of multiscale discriminator
if isinstance(input, list):
loss = 0
for pred_i in input:
if isinstance(pred_i, list):
pred_i = pred_i[-1]
loss_tensor = self.loss(pred_i, target_is_real, for_discriminator)
bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0)
new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1)
loss += new_loss
return loss / len(input)
else:
return self.loss(input, target_is_real, for_discriminator)
class MMD_loss(nn.Module):
def __init__(self, kernel_mul = 2.0, kernel_num = 5):
super(MMD_loss, self).__init__()
self.kernel_num = kernel_num
self.kernel_mul = kernel_mul
self.fix_sigma = None
def guassian_kernel(self, source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
n_samples = int(source.size(0))+int(target.size(0))
total = torch.cat([source, target], dim=0)
total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
L2_distance = ((total0-total1)**2).sum(2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(kernel_num)]
kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
return sum(kernel_val)
def forward(self, source, target):
batch_size = int(source.size(0))
kernels = self.guassian_kernel(source, target, kernel_mul=self.kernel_mul, kernel_num=self.kernel_num, fix_sigma=self.fix_sigma)
XX = kernels[:batch_size, :batch_size]
YY = kernels[batch_size:, batch_size:]
XY = kernels[:batch_size, batch_size:]
YX = kernels[batch_size:, :batch_size]
loss = torch.mean(XX + YY - XY -YX)
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
# model += [nn.Tanh()]
model += [nn.Sigmoid()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
user_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class NLayerProjDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, num_classes=2, norm_layer=nn.BatchNorm2d, activation=F.relu):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerProjDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.activation = activation
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
self.model = nn.Sequential(*sequence)
self.l_h = nn.Conv2d(ndf * nf_mult, 1, kernel_size=1, stride=1, padding=0) # output 1 channel prediction map
if num_classes > 0:
self.l_y = nn.Embedding(num_classes, ndf * nf_mult)
def forward(self, x, y=None):
"""Standard forward."""
h = self.model(x)
output = self.l_h(h)
if y is not None:
output += torch.sum(self.l_y(y).unsqueeze(-1).unsqueeze(-1) * h, dim=1, keepdim=True)
return output
class FCDiscriminator(nn.Module):
def __init__(self, feature_dim=2048, ndf = 64):
super(FCDiscriminator, self).__init__()
self.fc1 = nn.Linear(feature_dim, ndf)
self.fc2 = nn.Linear(ndf, ndf*2)
self.fc3 = nn.Linear(ndf*2, ndf*4)
self.fc4 = nn.Linear(ndf*4, ndf*8)
self.classifier = nn.Linear(ndf*8, 1)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
x = self.fc1(x)
x = self.leaky_relu(x)
x = self.fc2(x)
x = self.leaky_relu(x)
x = self.fc3(x)
x = self.leaky_relu(x)
x = self.fc4(x)
x = self.leaky_relu(x)
x = self.classifier(x)
return x
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
| [
"torch.nn.Linear",
"torch.cat",
"torch.optim.lr_scheduler.StepLR",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.nn.LeakyReLU",
"torch.nn.init.kaiming_normal_",
"torch.cuda.is_available",
"torch.exp",
"torch.nn.DataParallel",
"torch.sum",
"torch.nn.init.constant_",
"torch.nn.ConvTranspose2d",
"torch.nn.init.normal_",
"torch.nn.ReflectionPad2d",
"torch.nn.init.orthogonal_",
"torch.nn.init.xavier_normal_",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.ReplicationPad2d",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.mean",
"torch.rand",
"torch.nn.Dropout",
"torch.nn.Sigmoid",
"torch.nn.functional.mse_loss",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.Embedding"
] | 1.7.1 | zhoufengfan/MMT-plus | e95db1452d3480518a851dd7ffa07208522f2614 |
1.1 | import logging
import os
import random
import subprocess
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from mmcv.runner import get_dist_info
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'mpi':
_init_dist_mpi(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError('Invalid launcher type: {}'.format(launcher))
def _init_dist_pytorch(backend, **kwargs):
# TODO: use local_rank instead of rank % num_gpus
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
def _init_dist_mpi(backend, **kwargs):
raise NotImplementedError
def _init_dist_slurm(backend, port=29500, **kwargs):
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput(
'scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(port)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_root_logger(log_level=logging.INFO):
logger = logging.getLogger()
if not logger.hasHandlers():
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
level=log_level)
rank, _ = get_dist_info()
if rank != 0:
logger.setLevel('ERROR')
return logger | [
"torch.cuda.manual_seed_all",
"torch.distributed.init_process_group",
"torch.multiprocessing.set_start_method",
"torch.multiprocessing.get_start_method",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.set_device"
] | 1.1 | dingmyu/mmdetection | 705dc91ca43ea62f4f69355a81271d5bd81268ca |
1.8 | from copy import deepcopy
import torch
from torch import nn
from onnx2torch.node_converters.registry import add_converter
from onnx2torch.onnx_graph import OnnxGraph
from onnx2torch.onnx_node import OnnxNode
from onnx2torch.utils.common import OnnxToTorchModule
from onnx2torch.utils.common import OperationConverterResult
from onnx2torch.utils.common import onnx_mapping_from_node
def _arbitrary_dim_shift_and_insert_zero(
input_tensor: torch.Tensor,
insert_dim: int,
) -> torch.Tensor:
# single item shift
slice_index, insertion = [[slice(None)] * len(input_tensor.shape)] * 2
insert_dim_size = input_tensor.shape[insert_dim]
slice_index[insert_dim] = slice(0, -1)
slice_index = tuple(slice_index)
tensor_slice = input_tensor[slice_index]
insert_index = torch.arange(start=1, end=insert_dim_size, dtype=torch.int64, device=input_tensor.device)
index_shape = [1] * len(input_tensor.shape)
index_shape[insert_dim] = insert_dim_size - 1
insert_index = torch.reshape(insert_index, index_shape)
insert_index = insert_index + torch.zeros_like(tensor_slice, dtype=torch.int64, device=input_tensor.device)
input_tensor = torch.scatter(
input=input_tensor,
dim=insert_dim,
index=insert_index,
src=tensor_slice,
)
insertion[insert_dim] = slice(0, 1)
insertion = tuple(insertion)
input_tensor[insertion] = 0
return input_tensor
class OnnxCumSum(nn.Module, OnnxToTorchModule):
def __init__(
self,
exclusive: bool = False,
reverse: bool = False,
):
super().__init__()
self.exclusive = exclusive
self.reverse = reverse
def forward(self, input_tensor: torch.Tensor, axis: torch.Tensor) -> torch.Tensor:
axis = axis.item()
if self.reverse:
input_tensor = torch.flip(input_tensor, dims=(axis,))
if self.exclusive:
input_tensor = _arbitrary_dim_shift_and_insert_zero(input_tensor, insert_dim=axis)
input_tensor = torch.cumsum(input_tensor, dim=axis)
if self.reverse:
input_tensor = torch.flip(input_tensor, dims=(axis,))
return input_tensor
@add_converter(operation_type='CumSum', version=11)
@add_converter(operation_type='CumSum', version=14)
def _(node: OnnxNode, graph: OnnxGraph) -> OperationConverterResult: # pylint: disable=unused-argument
node_attributes = node.attributes
exclusive = bool(node_attributes.get('exclusive', 0))
reverse = bool(node_attributes.get('reverse', 1))
return OperationConverterResult(
torch_module=OnnxCumSum(exclusive, reverse),
onnx_mapping=onnx_mapping_from_node(node),
)
| [
"torch.reshape",
"torch.arange",
"torch.scatter",
"torch.zeros_like",
"torch.flip",
"torch.cumsum"
] | 1.8.0 | ENOT-AutoDL/onnx2torch | 2391987b3349bed1670ac3c1bc9062a37323abe3 |
1.10 | # pylint: disable-all
import argparse
from animus import EarlyStoppingCallback, IExperiment
from animus.torch.callbacks import TorchCheckpointerCallback
from apto.utils.report import get_classification_report
from catalyst import utils
import numpy as np
import optuna
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from tqdm.auto import tqdm
from src.settings import LOGS_ROOT, UTCNOW
from src.ts import load_balanced_OASIS, TSQuantileTransformer
import wandb
class ResidualBlock(nn.Module):
def __init__(self, block):
super().__init__()
self.block = block
def forward(self, x: torch.Tensor):
return self.block(x) + x
class MLP(nn.Module):
def __init__(
self,
input_size: int,
output_size: int,
dropout: float = 0.5,
hidden_size: int = 128,
num_layers: int = 0,
):
super(MLP, self).__init__()
layers = [
nn.LayerNorm(input_size),
nn.Dropout(p=dropout),
nn.Linear(input_size, hidden_size),
nn.ReLU(),
]
for _ in range(num_layers):
layers.append(
ResidualBlock(
nn.Sequential(
nn.LayerNorm(hidden_size),
nn.Dropout(p=dropout),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
)
)
)
layers.append(
nn.Sequential(
nn.LayerNorm(hidden_size),
nn.Dropout(p=dropout),
nn.Linear(hidden_size, output_size),
)
)
self.fc = nn.Sequential(*layers)
def forward(self, x):
bs, ln, fs = x.shape
fc_output = self.fc(x.reshape(-1, fs))
fc_output = fc_output.reshape(bs, ln, -1).mean(1) # .squeeze(1)
return fc_output
class Experiment(IExperiment):
def __init__(self, quantile: bool, max_epochs: int, logdir: str) -> None:
super().__init__()
assert not quantile, "Not implemented yet"
self._quantile: bool = quantile
self._trial: optuna.Trial = None
self.max_epochs = max_epochs
self.logdir = logdir
def on_tune_start(self, trial, k):
self.k = k
self.trial = trial
features, labels = load_balanced_OASIS()
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42 + trial)
skf.get_n_splits(features, labels)
train_index, test_index = list(skf.split(features, labels))[self.k]
X_train, X_test = features[train_index], features[test_index]
y_train, y_test = labels[train_index], labels[test_index]
X_train = np.swapaxes(X_train, 1, 2) # [n_samples; seq_len; n_features]
X_test = np.swapaxes(X_test, 1, 2)
self._train_ds = TensorDataset(
torch.tensor(X_train, dtype=torch.float32),
torch.tensor(y_train, dtype=torch.int64),
)
self._valid_ds = TensorDataset(
torch.tensor(X_test, dtype=torch.float32),
torch.tensor(y_test, dtype=torch.int64),
)
def on_experiment_start(self, exp: "IExperiment"):
# init wandb logger
self.wandb_logger: wandb.run = wandb.init(
project="mlp_oasis_cv_1", name=f"{UTCNOW}-k_{self.k}-trial_{self.trial}"
)
super().on_experiment_start(exp)
# # setup experiment
# self.num_epochs = self._trial.suggest_int("exp.num_epochs", 20, self.max_epochs)
# # setup data
# self.batch_size = self._trial.suggest_int("data.batch_size", 4, 32, log=True)
# self.datasets = {
# "train": DataLoader(
# self._train_ds, batch_size=self.batch_size, num_workers=0, shuffle=True
# ),
# "valid": DataLoader(
# self._valid_ds, batch_size=self.batch_size, num_workers=0, shuffle=False
# ),
# }
# # setup model
# hidden_size = self._trial.suggest_int("mlp.hidden_size", 32, 256, log=True)
# num_layers = self._trial.suggest_int("mlp.num_layers", 0, 4)
# dropout = self._trial.suggest_uniform("mlp.dropout", 0.1, 0.9)
# self.model = MLP(
# input_size=53, # PRIOR
# output_size=2, # PRIOR
# hidden_size=hidden_size,
# num_layers=num_layers,
# dropout=dropout,
# )
# best tune
# model = MLP(
# input_size=53, # PRIOR
# output_size=2, # PRIOR
# hidden_size=997,
# num_layers=4,
# dropout=0.20352535084272705,
# )
# best cv
self.num_epochs = 32
# setup data
self.batch_size = 6
self.datasets = {
"train": DataLoader(
self._train_ds, batch_size=self.batch_size, num_workers=0, shuffle=True
),
"valid": DataLoader(
self._valid_ds, batch_size=self.batch_size, num_workers=0, shuffle=False
),
}
# setup model
hidden_size = 142
num_layers = 2
dropout = 0.15847198018446662
self.model = MLP(
input_size=53, # PRIOR
output_size=2, # PRIOR
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
)
# lr = self._trial.suggest_float("adam.lr", 1e-5, 1e-3, log=True)
lr = 0.0002222585782420201
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(
self.model.parameters(),
lr=lr,
)
# setup callbacks
self.callbacks = {
"early-stop": EarlyStoppingCallback(
minimize=False,
patience=5,
dataset_key="valid",
metric_key="score",
min_delta=0.001,
),
"checkpointer": TorchCheckpointerCallback(
exp_attr="model",
logdir=f"{self.logdir}/{self._trial.number:04d}",
dataset_key="valid",
metric_key="score",
minimize=False,
),
}
self.wandb_logger.config.update(
{
"num_epochs": self.num_epochs,
"batch_size": self.batch_size,
"hidden_size": hidden_size,
"num_layers": num_layers,
"dropout": dropout,
"lr": lr,
}
)
def run_dataset(self) -> None:
all_scores, all_targets = [], []
total_loss = 0.0
self.model.train(self.is_train_dataset)
with torch.set_grad_enabled(self.is_train_dataset):
for self.dataset_batch_step, (data, target) in enumerate(tqdm(self.dataset)):
self.optimizer.zero_grad()
logits = self.model(data)
loss = self.criterion(logits, target)
score = torch.softmax(logits, dim=-1)
all_scores.append(score.cpu().detach().numpy())
all_targets.append(target.cpu().detach().numpy())
total_loss += loss.sum().item()
if self.is_train_dataset:
loss.backward()
self.optimizer.step()
total_loss /= self.dataset_batch_step
y_test = np.hstack(all_targets)
y_score = np.vstack(all_scores)
y_pred = np.argmax(y_score, axis=-1).astype(np.int32)
report = get_classification_report(y_true=y_test, y_pred=y_pred, y_score=y_score, beta=0.5)
for stats_type in [0, 1, "macro", "weighted"]:
stats = report.loc[stats_type]
for key, value in stats.items():
if "support" not in key:
self._trial.set_user_attr(f"{key}_{stats_type}", float(value))
self.dataset_metrics = {
"score": report["auc"].loc["weighted"],
"loss": total_loss,
}
def on_epoch_end(self, exp: "IExperiment") -> None:
super().on_epoch_end(self)
self.wandb_logger.log(
{
"train_score": self.epoch_metrics["train"]["score"],
"train_loss": self.epoch_metrics["train"]["loss"],
"valid_score": self.epoch_metrics["valid"]["score"],
"valid_loss": self.epoch_metrics["valid"]["loss"],
},
)
def on_experiment_end(self, exp: "IExperiment") -> None:
super().on_experiment_end(exp)
self._score = self.callbacks["early-stop"].best_score
wandb.summary["valid_score"] = self._score
self.wandb_logger.finish()
def _objective(self, trial) -> float:
self._trial = trial
self.run()
return self._score
def tune(self, n_trials: int):
for trial in range(n_trials):
for k in range(5):
self.on_tune_start(trial, k)
self.study = optuna.create_study(direction="maximize")
self.study.optimize(self._objective, n_trials=1, n_jobs=1)
logfile = f"{self.logdir}/optuna.csv"
df = self.study.trials_dataframe()
df.to_csv(logfile, index=False)
if __name__ == "__main__":
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
utils.boolean_flag(parser, "quantile", default=False)
parser.add_argument("--max-epochs", type=int, default=1)
parser.add_argument("--num-trials", type=int, default=1)
args = parser.parse_args()
Experiment(
quantile=args.quantile,
max_epochs=args.max_epochs,
logdir=f"{LOGS_ROOT}/{UTCNOW}-ts-mlp-oasis-q{args.quantile}/",
).tune(n_trials=args.num_trials)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.Sequential",
"torch.set_grad_enabled",
"torch.softmax",
"torch.nn.ReLU",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss"
] | 1.10.0 | paavalipopov/introspection | ee486a9e8c8b6ddb7ab257eae9e14aac5d637527 |
1.3 | """GaussianMLPModule."""
import abc
import torch
from torch import nn
from torch.distributions import Normal
from torch.distributions.independent import Independent
from garage.torch.distributions import TanhNormal
from garage.torch.modules.mlp_module import MLPModule
from garage.torch.modules.multi_headed_mlp_module import MultiHeadedMLPModule
class GaussianMLPBaseModule(nn.Module):
"""Base of GaussianMLPModel.
Args:
input_dim (int): Input dimension of the model.
output_dim (int): Output dimension of the model.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues (plain value - not log or exponentiated).
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues (plain value - not log or exponentiated).
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of hidden layer (s).
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s).
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a torch.Tensor.
Set it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation.
- softplus: the std will be computed as log(1+exp(x)).
layer_normalization (bool): Bool for using layer normalization or not.
normal_distribution_cls (torch.distribution): normal distribution class
to be constructed and returned by a call to forward. By default, is
`torch.distributions.Normal`.
"""
def __init__(self,
input_dim,
output_dim,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=torch.tanh,
std_hidden_w_init=nn.init.xavier_uniform_,
std_hidden_b_init=nn.init.zeros_,
std_output_nonlinearity=None,
std_output_w_init=nn.init.xavier_uniform_,
std_parameterization='exp',
layer_normalization=False,
normal_distribution_cls=Normal):
super().__init__()
self._input_dim = input_dim
self._hidden_sizes = hidden_sizes
self._action_dim = output_dim
self._learn_std = learn_std
self._std_hidden_sizes = std_hidden_sizes
self._min_std = min_std
self._max_std = max_std
self._std_hidden_nonlinearity = std_hidden_nonlinearity
self._std_hidden_w_init = std_hidden_w_init
self._std_hidden_b_init = std_hidden_b_init
self._std_output_nonlinearity = std_output_nonlinearity
self._std_output_w_init = std_output_w_init
self._std_parameterization = std_parameterization
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._norm_dist_class = normal_distribution_cls
if self._std_parameterization not in ('exp', 'softplus'):
raise NotImplementedError
init_std_param = torch.Tensor([init_std]).log()
if self._learn_std:
self._init_std = torch.nn.Parameter(init_std_param)
else:
self._init_std = init_std_param
self.register_buffer('init_std', self._init_std)
self._min_std_param = self._max_std_param = None
if min_std is not None:
self._min_std_param = torch.Tensor([min_std]).log()
self.register_buffer('min_std_param', self._min_std_param)
if max_std is not None:
self._max_std_param = torch.Tensor([max_std]).log()
self.register_buffer('max_std_param', self._max_std_param)
def to(self, *args, **kwargs):
"""Move the module to the specified device.
Args:
*args: args to pytorch to function.
**kwargs: keyword args to pytorch to function.
"""
super().to(*args, **kwargs)
buffers = dict(self.named_buffers())
if not isinstance(self._init_std, torch.nn.Parameter):
self._init_std = buffers['init_std']
self._min_std_param = buffers['min_std_param']
self._max_std_param = buffers['max_std_param']
@abc.abstractmethod
def _get_mean_and_log_std(self, *inputs):
pass
def forward(self, *inputs):
"""Forward method.
Args:
*inputs: Input to the module.
Returns:
torch.Tensor: Module output.
"""
mean, log_std_uncentered = self._get_mean_and_log_std(*inputs)
if self._min_std_param or self._max_std_param:
log_std_uncentered = log_std_uncentered.clamp(
min=self._to_scalar_if_not_none(self._min_std_param),
max=self._to_scalar_if_not_none(self._max_std_param))
if self._std_parameterization == 'exp':
std = log_std_uncentered.exp()
else:
std = log_std_uncentered.exp().exp().add(1.).log()
dist = self._norm_dist_class(mean, std)
# This control flow is needed because if a TanhNormal distribution is
# wrapped by torch.distributions.Independent, then custom functions
# such as rsample_with_pretanh_value of the TanhNormal distribution
# are not accessable.
if not isinstance(dist, TanhNormal):
# Makes it so that a sample from the distribution is treated as a
# single sample and not dist.batch_shape samples.
dist = Independent(dist, 1)
return dist
# pylint: disable=no-self-use
def _to_scalar_if_not_none(self, tensor):
"""Convert torch.Tensor of a single value to a Python number.
Args:
tensor (torch.Tensor): A torch.Tensor of a single value.
Returns:
float: The value of tensor.
"""
return None if tensor is None else tensor.item()
class GaussianMLPModule(GaussianMLPBaseModule):
"""GaussianMLPModule that mean and std share the same network.
Args:
input_dim (int): Input dimension of the model.
output_dim (int): Output dimension of the model.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues (plain value - not log or exponentiated).
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues (plain value - not log or exponentiated).
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
normal_distribution_cls (torch.distribution): normal distribution class
to be constructed and returned by a call to forward. By default, is
`torch.distributions.Normal`.
"""
def __init__(self,
input_dim,
output_dim,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_parameterization='exp',
layer_normalization=False,
normal_distribution_cls=Normal):
super(GaussianMLPModule,
self).__init__(input_dim=input_dim,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization,
normal_distribution_cls=normal_distribution_cls)
self._mean_module = MLPModule(
input_dim=self._input_dim,
output_dim=self._action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
def _get_mean_and_log_std(self, *inputs):
"""Get mean and std of Gaussian distribution given inputs.
Args:
*inputs: Input to the module.
Returns:
torch.Tensor: The mean of Gaussian distribution.
torch.Tensor: The variance of Gaussian distribution.
"""
assert len(inputs) == 1
mean = self._mean_module(*inputs)
broadcast_shape = list(inputs[0].shape[:-1]) + [self._action_dim]
uncentered_log_std = torch.zeros(*broadcast_shape) + self._init_std
return mean, uncentered_log_std
class GaussianMLPIndependentStdModule(GaussianMLPBaseModule):
"""GaussianMLPModule which has two different mean and std network.
Args:
input_dim (int): Input dimension of the model.
output_dim (int): Output dimension of the model.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues (plain value - not log or exponentiated).
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues (plain value - not log or exponentiated).
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of hidden layer (s).
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s).
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a torch.Tensor.
Set it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
normal_distribution_cls (torch.distribution): normal distribution class
to be constructed and returned by a call to forward. By default, is
`torch.distributions.Normal`.
"""
def __init__(self,
input_dim,
output_dim,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=torch.tanh,
std_hidden_w_init=nn.init.xavier_uniform_,
std_hidden_b_init=nn.init.zeros_,
std_output_nonlinearity=None,
std_output_w_init=nn.init.xavier_uniform_,
std_parameterization='exp',
layer_normalization=False,
normal_distribution_cls=Normal):
super(GaussianMLPIndependentStdModule,
self).__init__(input_dim=input_dim,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_hidden_nonlinearity,
std_hidden_w_init=std_hidden_w_init,
std_hidden_b_init=std_hidden_b_init,
std_output_nonlinearity=std_output_nonlinearity,
std_output_w_init=std_output_w_init,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization,
normal_distribution_cls=normal_distribution_cls)
self._mean_module = MLPModule(
input_dim=self._input_dim,
output_dim=self._action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
self._log_std_module = MLPModule(
input_dim=self._input_dim,
output_dim=self._action_dim,
hidden_sizes=self._std_hidden_sizes,
hidden_nonlinearity=self._std_hidden_nonlinearity,
hidden_w_init=self._std_hidden_w_init,
hidden_b_init=self._std_hidden_b_init,
output_nonlinearity=self._std_output_nonlinearity,
output_w_init=self._std_output_w_init,
output_b_init=self._init_std_b,
layer_normalization=self._layer_normalization)
def _init_std_b(self, b):
"""Default bias initialization function.
Args:
b (torch.Tensor): The bias tensor.
Returns:
torch.Tensor: The bias tensor itself.
"""
return nn.init.constant_(b, self._init_std.item())
def _get_mean_and_log_std(self, *inputs):
"""Get mean and std of Gaussian distribution given inputs.
Args:
*inputs: Input to the module.
Returns:
torch.Tensor: The mean of Gaussian distribution.
torch.Tensor: The variance of Gaussian distribution.
"""
return self._mean_module(*inputs), self._log_std_module(*inputs)
class GaussianMLPTwoHeadedModule(GaussianMLPBaseModule):
"""GaussianMLPModule which has only one mean network.
Args:
input_dim (int): Input dimension of the model.
output_dim (int): Output dimension of the model.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues (plain value - not log or exponentiated).
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues (plain value - not log or exponentiated).
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
normal_distribution_cls (torch.distribution): normal distribution class
to be constructed and returned by a call to forward. By default, is
`torch.distributions.Normal`.
"""
def __init__(self,
input_dim,
output_dim,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_parameterization='exp',
layer_normalization=False,
normal_distribution_cls=Normal):
super(GaussianMLPTwoHeadedModule,
self).__init__(input_dim=input_dim,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization,
normal_distribution_cls=normal_distribution_cls)
self._shared_mean_log_std_network = MultiHeadedMLPModule(
n_heads=2,
input_dim=self._input_dim,
output_dims=self._action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearities=self._output_nonlinearity,
output_w_inits=self._output_w_init,
output_b_inits=[
nn.init.zeros_,
lambda x: nn.init.constant_(x, self._init_std.item())
],
layer_normalization=self._layer_normalization)
def _get_mean_and_log_std(self, *inputs):
"""Get mean and std of Gaussian distribution given inputs.
Args:
*inputs: Input to the module.
Returns:
torch.Tensor: The mean of Gaussian distribution.
torch.Tensor: The variance of Gaussian distribution.
"""
return self._shared_mean_log_std_network(*inputs)
| [
"torch.zeros",
"torch.distributions.independent.Independent",
"torch.Tensor",
"torch.nn.Parameter"
] | 1.3.0 | igor-krawczuk/garage | aa86ce710c6d01380477d6feddc0e38427b1e3b4 |
0.4 | """
A ``TextField`` represents a string of text, the kind that you might want to represent with
standard word vectors, or pass through an LSTM.
"""
import IPython as ipy
from typing import Dict, List, Optional, Iterator
import textwrap
from overrides import overrides
from spacy.tokens import Token as SpacyToken
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.data.tokenizers.token import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer, TokenType
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn import util
TokenList = List[TokenType] # pylint: disable=invalid-name
class TextField(SequenceField[Dict[str, torch.Tensor]]):
"""
This ``Field`` represents a list of string tokens. Before constructing this object, you need
to tokenize raw strings using a :class:`~allennlp.data.tokenizers.tokenizer.Tokenizer`.
Because string tokens can be represented as indexed arrays in a number of ways, we also take a
dictionary of :class:`~allennlp.data.token_indexers.token_indexer.TokenIndexer`
objects that will be used to convert the tokens into indices.
Each ``TokenIndexer`` could represent each token as a single ID, or a list of character IDs, or
something else.
This field will get converted into a dictionary of arrays, one for each ``TokenIndexer``. A
``SingleIdTokenIndexer`` produces an array of shape (num_tokens,), while a
``TokenCharactersIndexer`` produces an array of shape (num_tokens, num_characters).
"""
def __init__(self, tokens: List[Token], token_indexers: Dict[str, TokenIndexer]) -> None:
self.tokens = tokens
self._token_indexers = token_indexers
self._indexed_tokens: Optional[Dict[str, TokenList]] = None
self._indexer_name_to_indexed_token: Optional[Dict[str, List[str]]] = None
if not all([isinstance(x, (Token, SpacyToken)) for x in tokens]):
raise ConfigurationError("TextFields must be passed Tokens. "
"Found: {} with types {}.".format(tokens, [type(x) for x in tokens]))
# Sequence[Token] methods
def __iter__(self) -> Iterator[Token]:
return iter(self.tokens)
def __getitem__(self, idx: int) -> Token:
return self.tokens[idx]
def __len__(self) -> int:
return len(self.tokens)
@overrides
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
for indexer in self._token_indexers.values():
for token in self.tokens:
indexer.count_vocab_items(token, counter)
@overrides
def index(self, vocab: Vocabulary):
token_arrays: Dict[str, TokenList] = {}
indexer_name_to_indexed_token: Dict[str, List[str]] = {}
for indexer_name, indexer in self._token_indexers.items():
token_indices = indexer.tokens_to_indices(self.tokens, vocab, indexer_name)
token_arrays.update(token_indices)
indexer_name_to_indexed_token[indexer_name] = list(token_indices.keys())
self._indexed_tokens = token_arrays
self._indexer_name_to_indexed_token = indexer_name_to_indexed_token
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
"""
The ``TextField`` has a list of ``Tokens``, and each ``Token`` gets converted into arrays by
(potentially) several ``TokenIndexers``. This method gets the max length (over tokens)
associated with each of these arrays.
"""
# Our basic outline: we will iterate over `TokenIndexers`, and aggregate lengths over tokens
# for each indexer separately. Then we will combine the results for each indexer into a single
# dictionary, resolving any (unlikely) key conflicts by taking a max.
lengths = []
if self._indexed_tokens is None:
raise ConfigurationError("You must call .index(vocabulary) on a "
"field before determining padding lengths.")
# Each indexer can return a different sequence length, and for indexers that return
# multiple arrays each can have a different length. We'll keep track of them here.
for indexer_name, indexer in self._token_indexers.items():
indexer_lengths = {}
for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]:
# This is a list of dicts, one for each token in the field.
token_lengths = [indexer.get_padding_lengths(token)
for token in self._indexed_tokens[indexed_tokens_key]]
if not token_lengths:
# This is a padding edge case and occurs when we want to pad a ListField of
# TextFields. In order to pad the list field, we need to be able to have an
# _empty_ TextField, but if this is the case, token_lengths will be an empty
# list, so we add the default empty padding dictionary to the list instead.
token_lengths = [{}]
# Iterate over the keys and find the maximum token length.
# It's fine to iterate over the keys of the first token since all tokens have the same keys.
for key in token_lengths[0]:
indexer_lengths[key] = max(x[key] if key in x else 0 for x in token_lengths)
lengths.append(indexer_lengths)
indexer_sequence_lengths = {key: len(val) for key, val in self._indexed_tokens.items()}
# Get the padding lengths for sequence lengths.
if len(set(indexer_sequence_lengths.values())) == 1:
# This is the default case where all indexers return the same length.
# Keep the existing 'num_tokens' key for backward compatibility with existing config files.
padding_lengths = {'num_tokens': list(indexer_sequence_lengths.values())[0]}
else:
# The indexers return different lengths.
padding_lengths = indexer_sequence_lengths
# Get all keys which have been used for padding for each indexer and take the max if there are duplicates.
padding_keys = {key for d in lengths for key in d.keys()}
for padding_key in padding_keys:
padding_lengths[padding_key] = max(x[padding_key] if padding_key in x else 0 for x in lengths)
return padding_lengths
@overrides
def sequence_length(self) -> int:
return len(self.tokens)
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> Dict[str, torch.Tensor]:
tensors = {}
num_tokens = padding_lengths.get('num_tokens')
for indexer_name, indexer in self._token_indexers.items():
if num_tokens is None:
# The indexers return different lengths.
# Get the desired_num_tokens for this indexer.
desired_num_tokens = {
indexed_tokens_key: padding_lengths[indexed_tokens_key]
for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]
}
else:
desired_num_tokens = {indexer_name: num_tokens}
indices_to_pad = {indexed_tokens_key: self._indexed_tokens[indexed_tokens_key]
for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]}
padded_array = indexer.pad_token_sequence(indices_to_pad,
desired_num_tokens, padding_lengths)
# We use the key of the indexer to recognise what the tensor corresponds to within the
# field (i.e. the result of word indexing, or the result of character indexing, for
# example).
# TODO(mattg): we might someday have a TokenIndexer that needs to use something other
# than a LongTensor here, and it's not clear how to signal that. Maybe we'll need to
# add a class method to TokenIndexer to tell us the type? But we can worry about that
# when there's a compelling use case for it.
try:
indexer_tensors = {key: torch.LongTensor(array) for key, array in padded_array.items()}
except Exception as exc:
print("\n\n",exc,"\n")
ipy.embed()
raise(exc)
tensors.update(indexer_tensors)
return tensors
@overrides
def empty_field(self):
# pylint: disable=protected-access
text_field = TextField([], self._token_indexers)
text_field._indexed_tokens = {}
text_field._indexer_name_to_indexed_token = {}
for indexer_name, indexer in self._token_indexers.items():
array_keys = indexer.get_keys(indexer_name)
for key in array_keys:
text_field._indexed_tokens[key] = []
text_field._indexer_name_to_indexed_token[indexer_name] = array_keys
return text_field
@overrides
def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
# pylint: disable=no-self-use
# This is creating a dict of {token_indexer_key: batch_tensor} for each token indexer used
# to index this field.
return util.batch_tensor_dicts(tensor_list)
def __str__(self) -> str:
indexers = {name: indexer.__class__.__name__ for name, indexer in self._token_indexers.items()}
# Double tab to indent under the header.
formatted_text = "".join(["\t\t" + text + "\n"
for text in textwrap.wrap(repr(self.tokens), 100)])
return f"TextField of length {self.sequence_length()} with " \
f"text: \n {formatted_text} \t\tand TokenIndexers : {indexers}"
| [
"torch.LongTensor"
] | 0.4.1 | pmulcaire/rosita | fffe45fb450d79cf36e0a3e2625300dc95249367 |
1.7 | """
BSD 3-Clause License
Copyright (c) 2018, NVIDIA Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import numpy as np
from collections import namedtuple
import torch
Inputs = namedtuple("Inputs", ["text", "mels", "gate", "text_len", "mel_len"])
InputsCTC = namedtuple("InputsCTC", ["text", "length"])
Outputs = namedtuple("Outputs", ["mels", "mels_postnet", "gate", "alignments"])
OutputsGST = namedtuple("OutputsGST", ["style_emb", "gst_weights"])
def calculate_global_mean(data_loader, path=None):
"""
Based on https://github.com/bfs18/tacotron2
"""
sums = []
frames = []
print("Calculating global mean...")
for i, batch in enumerate(data_loader):
print("\rProcessing batch #{} out of {}".format(i + 1, len(data_loader)), end="")
inputs, *_ = batch
# padded values are 0.
sums.append(inputs.mels.double().sum(dim=(0, 2)))
frames.append(inputs.mel_len.double().sum())
global_mean = (sum(sums) / sum(frames)).float()
if path is not None:
np.save(path, global_mean.numpy())
return to_gpu(global_mean)
def load_global_mean(path):
assert os.path.exists(path)
global_mean = np.load(path)
return to_gpu(torch.tensor(global_mean))
def get_mask_from_lengths(lengths):
max_len = lengths.max()
ids = torch.arange(max_len, device=lengths.device)
mask = ids < lengths.unsqueeze(1)
return mask
def get_mask_3d(widths, heights):
mask_width = get_mask_from_lengths(widths)
mask_height = get_mask_from_lengths(heights)
mask_3d = mask_width.unsqueeze(2) & mask_height.unsqueeze(1)
return mask_3d
def get_drop_frame_mask_from_lengths(lengths, drop_frame_rate):
"""
Based on https://github.com/bfs18/tacotron2
"""
batch_size = lengths.size(0)
max_len = torch.max(lengths).item()
mask = get_mask_from_lengths(lengths).float()
drop_mask = torch.empty([batch_size, max_len], device=lengths.device).uniform_(0., 1.) < drop_frame_rate
drop_mask = drop_mask.float() * mask
return drop_mask
def dropout_frame(mels, global_mean, mel_lengths, drop_frame_rate):
"""
Based on https://github.com/bfs18/tacotron2
"""
drop_mask = get_drop_frame_mask_from_lengths(mel_lengths, drop_frame_rate)
dropped_mels = (mels * (1.0 - drop_mask).unsqueeze(1) +
global_mean[None, :, None] * drop_mask.unsqueeze(1))
return dropped_mels
def load_filepaths_and_text(filename, split="|"):
with open(filename, encoding='utf-8') as f:
filepaths_and_text = [line.strip().split(split) for line in f]
return filepaths_and_text
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
def to_numpy(tensor):
return tensor.data.cpu().numpy() | [
"torch.arange",
"torch.max",
"torch.autograd.Variable",
"torch.cuda.is_available",
"torch.tensor",
"torch.empty"
] | 1.7.1 | NoVarlok/sova-tts-engine | 1b7c0b3591bb7f823be648093de279881e194d05 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.